Submission Template#
This notebook provides a suggested starter template for completing the CMR model assignment.
You should submit your assignment by uploading your completed notebook to Canvas. Please ensure that your notebook runs without errors in Google Colaboratory.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import requests
import os
from scipy.io import loadmat
from tempfile import NamedTemporaryFile
Download the stimuli and behavioral data, returned as a dictionary with the following fields:
‘words’: a list of 1297 strings (one per word in the stimulus pool)
‘sem_mat’: a 1297 x 1297 NumPy array of semantic similarities (range: -1 to 1) between every pair of words in the stimulus pool
‘presented_items’: a number-of-trials by list-length array of items for each presented list (represented using 0-indexed indices in the word pool)
‘recalled_items’: a number-of-trials by max-number-of-recalled-items array of recalled items from each list (represented using 0-indexed indices in the word pool). -1s correspond to extra-list intrusions. Trials are right-padded with nans as needed.
‘task’: a number-of-trials by list-length array of task labels for each presented item (0: size task; 1: animacy task)
‘session’: session labels for each trial (a NumPy array of length number-of-trials)
‘subject’: subject labels for each trial (a NumPy array of length number-of-trials)
‘list_type’: list type labels for each trial (a NumPy array of length number-of-trials; 0: all items studied using the size task; 1: all items studied using the animacy task; 2: task-shift list)
‘list_length’: a scalar value containing the list length (an integer)
def load_data():
# Download the files
base_url = "https://raw.githubusercontent.com/ContextLab/memory-models-course/refs/heads/main/content/assignments/Assignment_3%3AContext_Maintenance_and_Retrieval_Model/PolyEtal09%20data/"
# download the stimuli
response = requests.get(base_url + "stimuli.mat")
response.raise_for_status()
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(response.content)
stimuli_data = loadmat(temp_file.name)
words = [str(x[0][0]) for x in stimuli_data['tfr_wp']]
sem_mat = stimuli_data['sem_mat']
os.remove(temp_file.name)
# download the behavioral data
response = requests.get(base_url + "behavior.mat")
response.raise_for_status()
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(response.content)
behavioral_data = loadmat(temp_file.name)
presented_items = behavioral_data['data'][0][0][0][0][0]['pres_itemnos']
recalled_items = behavioral_data['data'][0][0][0][0][0]['rec_itemnos']
task = behavioral_data['data'][0][0][0][0][0]['pres_task']
session = behavioral_data['data'][0][0][0][0][0]['session'].flatten()
subject = behavioral_data['data'][0][0][0][0][0]['subject'].flatten()
list_type = behavioral_data['data'][0][0][0][0][0]['listType'].flatten()
os.remove(temp_file.name)
return {'words': words,
'sem_mat': sem_mat,
'presented_items': presented_items - 1,
'recalled_items': recalled_items - 1,
'task': task,
'session': session,
'subject': subject,
'list_type': list_type,
'list_length': int(behavioral_data['data'][0][0][0][0][0]['listLength'].flatten()[0])}
data = load_data()