diff --git a/allensdk/brain_observatory/behavior/data_files/stimulus_file.py b/allensdk/brain_observatory/behavior/data_files/stimulus_file.py index 3feeb0378..21363496c 100644 --- a/allensdk/brain_observatory/behavior/data_files/stimulus_file.py +++ b/allensdk/brain_observatory/behavior/data_files/stimulus_file.py @@ -27,6 +27,21 @@ """ +STIMULUS_FILE_SESSION_QUERY_TEMPLATE = """ + SELECT + wkf.storage_directory || wkf.filename AS stim_file + FROM + well_known_files wkf + WHERE + wkf.attachable_id = {ophys_session_id} + AND wkf.attachable_type = 'OphysSession' + AND wkf.well_known_file_type_id IN ( + SELECT id + FROM well_known_file_types + WHERE name = 'StimulusPickle'); +""" + + def from_json_cache_key(cls, dict_repr: dict): return hashkey(json.dumps(dict_repr)) @@ -34,6 +49,9 @@ def from_json_cache_key(cls, dict_repr: dict): def from_lims_cache_key(cls, db, behavior_session_id: int): return hashkey(behavior_session_id) +def from_lims_cache_key_ophys(cls, db, ophys_session_id: int): + return hashkey(ophys_session_id) + class StimulusFile(DataFile): """A DataFile which contains methods for accessing and loading visual @@ -68,6 +86,18 @@ def from_lims( filepath = db.fetchone(query, strict=True) return cls(filepath=filepath) + @classmethod + @cached(cache=LRUCache(maxsize=10), key=from_lims_cache_key_ophys) + def from_lims_for_ophys_session( + cls, db: PostgresQueryMixin, + ophys_session_id: Union[int, str] + ) -> "StimulusFile": + query = STIMULUS_FILE_SESSION_QUERY_TEMPLATE.format( + ophys_session_id=ophys_session_id + ) + filepath = db.fetchone(query, strict=True) + return cls(filepath=filepath) + @staticmethod def load_data(filepath: Union[str, Path]) -> dict: filepath = safe_system_path(file_name=filepath) diff --git a/allensdk/brain_observatory/behavior/data_files/sync_file.py b/allensdk/brain_observatory/behavior/data_files/sync_file.py index dab04f9f7..1d449b8d9 100644 --- a/allensdk/brain_observatory/behavior/data_files/sync_file.py +++ b/allensdk/brain_observatory/behavior/data_files/sync_file.py @@ -24,6 +24,19 @@ AND oe.id = {ophys_experiment_id}; """ +SYNC_FILE_SESSION_QUERY_TEMPLATE = """ + SELECT wkf.storage_directory || wkf.filename AS sync_file + FROM ophys_experiments oe + JOIN ophys_sessions os ON oe.ophys_session_id = os.id + JOIN well_known_files wkf ON wkf.attachable_id = os.id + JOIN well_known_file_types wkft + ON wkft.id = wkf.well_known_file_type_id + WHERE wkf.attachable_type = 'OphysSession' + AND wkft.name = 'OphysRigSync' + AND os.id = {ophys_session_id}; +""" + + def from_json_cache_key(cls, dict_repr: dict): return hashkey(json.dumps(dict_repr)) @@ -33,6 +46,10 @@ def from_lims_cache_key(cls, db, ophys_experiment_id: int): return hashkey(ophys_experiment_id) +def from_lims_cache_key_session(cls, db, ophys_session_id: int): + return hashkey(ophys_session_id) + + class SyncFile(DataFile): """A DataFile which contains methods for accessing and loading visual behavior stimulus *.pkl files. @@ -65,6 +82,19 @@ def from_lims( filepath = db.fetchone(query, strict=True) return cls(filepath=filepath) + + @classmethod + @cached(cache=LRUCache(maxsize=10), key=from_lims_cache_key_session) + def from_lims_for_ophys_session( + cls, db: PostgresQueryMixin, + ophys_session_id: Union[int, str] + ) -> "SyncFile": + query = SYNC_FILE_SESSION_QUERY_TEMPLATE.format( + ophys_session_id=ophys_session_id + ) + filepath = db.fetchall(query, strict=True)[0] + return cls(filepath=filepath) + @staticmethod def load_data(filepath: Union[str, Path]) -> dict: filepath = safe_system_path(file_name=filepath) diff --git a/allensdk/brain_observatory/behavior/data_objects/__init__.py b/allensdk/brain_observatory/behavior/data_objects/__init__.py index 9ea4e7d0c..764ee56d4 100644 --- a/allensdk/brain_observatory/behavior/data_objects/__init__.py +++ b/allensdk/brain_observatory/behavior/data_objects/__init__.py @@ -1,6 +1,8 @@ from allensdk.brain_observatory.behavior.data_objects.base._data_object_abc import DataObject # noqa: E501, F401 from allensdk.brain_observatory.behavior.data_objects.metadata\ .behavior_metadata.behavior_session_id import BehaviorSessionId # noqa: E501, F401 +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.visualcoding_session_id import VisualCodingSessionId from allensdk.brain_observatory.behavior.data_objects.timestamps\ .stimulus_timestamps.stimulus_timestamps import StimulusTimestamps # noqa: E501, F401 from allensdk.brain_observatory.behavior.data_objects.running_speed.running_speed import RunningSpeed # noqa: E501, F401 diff --git a/allensdk/brain_observatory/behavior/data_objects/cell_specimens/cell_specimens.py b/allensdk/brain_observatory/behavior/data_objects/cell_specimens/cell_specimens.py index a626083a9..7d262ca6a 100644 --- a/allensdk/brain_observatory/behavior/data_objects/cell_specimens/cell_specimens.py +++ b/allensdk/brain_observatory/behavior/data_objects/cell_specimens/cell_specimens.py @@ -181,9 +181,10 @@ def __init__(self, # there seem to be cases where cell_specimen_table contains rois not in # events # See ie https://app.zenhub.com/workspaces/allensdk-10-5c17f74db59cfb36f158db8c/issues/alleninstitute/allensdk/2139 # noqa - events.filter_and_reorder( - roi_ids=cell_specimen_table['cell_roi_id'].values, - raise_if_rois_missing=False) + if events is not None: + events.filter_and_reorder( + roi_ids=cell_specimen_table['cell_roi_id'].values, + raise_if_rois_missing=False) self._meta = meta self._cell_specimen_table = cell_specimen_table @@ -235,7 +236,8 @@ def from_lims(cls, ophys_experiment_id: int, ophys_timestamps: OphysTimestamps, segmentation_mask_image_spacing: Tuple, exclude_invalid_rois=True, - events_params: Optional[EventsParams] = None) \ + events_params: Optional[EventsParams] = None, + include_events: bool = True) \ -> "CellSpecimens": def _get_ophys_cell_segmentation_run_id() -> int: """Get the ophys cell segmentation run id associated with an @@ -297,7 +299,10 @@ def _get_events(): ophys_timestamps=ophys_timestamps) dff_traces = _get_dff_traces() corrected_fluorescence_traces = _get_corrected_fluorescence_traces() - events = _get_events() + if include_events: + events = _get_events() + else: + events = None return CellSpecimens( cell_specimen_table=cell_specimen_table, meta=meta, diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/behavior_metadata.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/behavior_metadata.py index d7a77c923..dbc3c2392 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/behavior_metadata.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/behavior_metadata.py @@ -4,7 +4,7 @@ import numpy as np from pynwb import NWBFile -from allensdk.brain_observatory.behavior.data_files import StimulusFile +from allensdk.brain_observatory.behavior.data_files import StimulusFile, SyncFile from allensdk.brain_observatory.behavior.data_objects import DataObject, \ BehaviorSessionId from allensdk.brain_observatory.behavior.data_objects.base \ @@ -211,8 +211,11 @@ def from_lims( stimulus_file = StimulusFile.from_lims( db=lims_db, behavior_session_id=behavior_session_id.value) - stimulus_frame_rate = StimulusFrameRate.from_stimulus_file( - stimulus_file=stimulus_file) + # stimulus_frame_rate = StimulusFrameRate.from_stimulus_file( + # stimulus_file=stimulus_file) + sync_file = SyncFile.from_lims_for_session(db=lims_db, behavior_session_id=behavior_session_id.value) + stimulus_frame_rate = StimulusFrameRate.from_sync_file( + sync_file=sync_file) session_type = SessionType.from_stimulus_file( stimulus_file=stimulus_file) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/date_of_acquisition.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/date_of_acquisition.py index 7f2789c9a..7f0f21eae 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/date_of_acquisition.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/date_of_acquisition.py @@ -45,6 +45,22 @@ def from_lims( datetime=experiment_date) return cls(date_of_acquisition=experiment_date) + @classmethod + def from_lims_for_ophys_session( + cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "DateOfAcquisition": + query = """ + SELECT os.date_of_acquisition + FROM ophys_sessions os + WHERE os.id = {}; + """.format(ophys_session_id) + + experiment_date = lims_db.fetchone(query, strict=True) + experiment_date = cls._postprocess_lims_datetime( + datetime=experiment_date) + return cls(date_of_acquisition=experiment_date) + + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "DateOfAcquisition": return cls(date_of_acquisition=nwbfile.session_start_time) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/equipment.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/equipment.py index 4b8020fc6..95f81ecaa 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/equipment.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/equipment.py @@ -43,6 +43,18 @@ def from_lims(cls, behavior_session_id: int, equipment_name = lims_db.fetchone(query, strict=True) return cls(equipment_name=equipment_name) + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "Equipment": + query = f""" + SELECT e.name AS device_name + FROM ophys_sessions os + JOIN equipment e ON e.id = os.equipment_id + WHERE os.id = {ophys_session_id}; + """ + equipment_name = lims_db.fetchone(query, strict=True) + return cls(equipment_name=equipment_name) + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "Equipment": metadata = nwbfile.lab_meta_data['metadata'] diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/foraging_id.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/foraging_id.py index 3b2bae420..d53daddd9 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/foraging_id.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/foraging_id.py @@ -30,3 +30,18 @@ def from_lims(cls, behavior_session_id: int, foraging_id = lims_db.fetchone(query, strict=True) foraging_id = uuid.UUID(foraging_id) return cls(foraging_id=foraging_id) + + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "ForagingId": + query = f""" + SELECT + foraging_id + FROM + ophys_sessions + WHERE + ophys_sessions.id = {ophys_session_id}; + """ + foraging_id = lims_db.fetchone(query, strict=True) + foraging_id = uuid.UUID(foraging_id) + return cls(foraging_id=foraging_id) \ No newline at end of file diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/stimulus_frame_rate.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/stimulus_frame_rate.py index a9e75b514..582f994a9 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/stimulus_frame_rate.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/stimulus_frame_rate.py @@ -1,6 +1,6 @@ from pynwb import NWBFile -from allensdk.brain_observatory.behavior.data_files import StimulusFile +from allensdk.brain_observatory.behavior.data_files import StimulusFile, SyncFile from allensdk.brain_observatory.behavior.data_objects import DataObject from allensdk.brain_observatory.behavior.data_objects.base \ .readable_interfaces import \ @@ -27,6 +27,15 @@ def from_stimulus_file( frame_rate = calc_frame_rate(timestamps=stimulus_timestamps.value) return cls(stimulus_frame_rate=frame_rate) + @classmethod + def from_sync_file( + cls, + sync_file: SyncFile) -> "StimulusFrameRate": + stimulus_timestamps = StimulusTimestamps.from_sync_file( + sync_file=sync_file) + frame_rate = calc_frame_rate(timestamps=stimulus_timestamps.value) + return cls(stimulus_frame_rate=frame_rate) + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "StimulusFrameRate": metadata = nwbfile.lab_meta_data['metadata'] diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_metadata.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_metadata.py new file mode 100644 index 000000000..3f678ce6c --- /dev/null +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_metadata.py @@ -0,0 +1,324 @@ +import uuid +from typing import Dict, Optional +import re +import numpy as np +from pynwb import NWBFile + +from allensdk.brain_observatory.behavior.data_files import StimulusFile, SyncFile +from allensdk.brain_observatory.behavior.data_objects import DataObject, \ + BehaviorSessionId, VisualCodingSessionId +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + JsonReadableInterface, NwbReadableInterface, \ + LimsReadableInterface +from allensdk.brain_observatory.behavior.data_objects.base \ + .writable_interfaces import \ + JsonWritableInterface, NwbWritableInterface +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.behavior_session_uuid import \ + BehaviorSessionUUID +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.equipment import \ + Equipment +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.foraging_id import \ + ForagingId +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.visualcoding_session_type import \ + VisualCodingSessionType +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.stimulus_frame_rate import \ + StimulusFrameRate +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .subject_metadata.subject_metadata import \ + SubjectMetadata +from allensdk.brain_observatory.behavior.schemas import BehaviorMetadataSchema +from allensdk.brain_observatory.nwb import load_pynwb_extension +from allensdk.internal.api import PostgresQueryMixin + +description_dict = { + # key is a regex and value is returned on match + r"\AOPHYS_0_images": "A behavior training session performed on the 2-photon calcium imaging setup but without recording neural activity, with the goal of habituating the mouse to the experimental setup before commencing imaging of neural activity. Habituation sessions are change detection with the same image set on which the mouse was trained. The session is 75 minutes long, with 5 minutes of gray screen before and after 60 minutes of behavior, followed by 10 repeats of a 30 second natural movie stimulus at the end of the session.", # noqa: E501 + r"\AOPHYS_[1|3]_images": "2-photon calcium imaging in the visual cortex of the mouse brain as the mouse performs a visual change detection task with a set of natural images upon which it has been previously trained. Image stimuli are displayed for 250 ms with a 500 ms intervening gray period. 5% of non-change image presentations are randomly omitted. The session is 75 minutes long, with 5 minutes of gray screen before and after 60 minutes of behavior, followed by 10 repeats of a 30 second natural movie stimulus at the end of the session.", # noqa: E501 + r"\AOPHYS_2_images": "2-photon calcium imaging in the visual cortex of the mouse brain as the mouse is passively shown changes in natural scene images upon which it was previously trained as the change detection task is played in open loop mode, with the lick-response sensory withdrawn and the mouse is unable to respond to changes or receive reward feedback. Image stimuli are displayed for 250 ms with a 500 ms intervening gray period. 5% of non-change image presentations are randomly omitted. The session is 75 minutes long, with 5 minutes of gray screen before and after 60 minutes of behavior, followed by 10 repeats of a 30 second natural movie stimulus at the end of the session.", # noqa: E501 + r"\AOPHYS_[4|6]_images": "2-photon calcium imaging in the visual cortex of the mouse brain as the mouse performs a visual change detection task with natural scene images that are unique from those on which the mouse was trained prior to the imaging phase of the experiment. Image stimuli are displayed for 250 ms with a 500 ms intervening gray period. 5% of non-change image presentations are randomly omitted. The session is 75 minutes long, with 5 minutes of gray screen before and after 60 minutes of behavior, followed by 10 repeats of a 30 second natural movie stimulus at the end of the session.", # noqa: E501 + r"\AOPHYS_5_images": "2-photon calcium imaging in the visual cortex of the mouse brain as the mouse is passively shown changes in natural scene images that are unique from those on which the mouse was trained prior to the imaging phase of the experiment. In this session, the change detection task is played in open loop mode, with the lick-response sensory withdrawn and the mouse is unable to respond to changes or receive reward feedback. Image stimuli are displayed for 250 ms with a 500 ms intervening gray period. 5% of non-change image presentations are randomly omitted. The session is 75 minutes long, with 5 minutes of gray screen before and after 60 minutes of behavior, followed by 10 repeats of a 30 second natural movie stimulus at the end of the session.", # noqa: E501 + r"\ATRAINING_0_gratings": "An associative training session where a mouse is automatically rewarded when a grating stimulus changes orientation. Grating stimuli are full-field, square-wave static gratings with a spatial frequency of 0.04 cycles per degree, with orientation changes between 0 and 90 degrees, at two spatial phases. Delivered rewards are 5ul in volume, and the session lasts for 15 minutes.", # noqa: E501 + r"\ATRAINING_1_gratings": "An operant behavior training session where a mouse must lick following a change in stimulus identity to earn rewards. Stimuli consist of full-field, square-wave static gratings with a spatial frequency of 0.04 cycles per degree. Orientation changes between 0 and 90 degrees occur with no intervening gray period. Delivered rewards are 10ul in volume, and the session lasts 60 minutes", # noqa: E501 + r"\ATRAINING_2_gratings": "An operant behavior training session where a mouse must lick following a change in stimulus identity to earn rewards. Stimuli consist of full-field, square-wave static gratings with a spatial frequency of 0.04 cycles per degree. Gratings of 0 or 90 degrees are presented for 250 ms with a 500 ms intervening gray period. Delivered rewards are 10ul in volume, and the session lasts 60 minutes.", # noqa: E501 + r"\ATRAINING_3_images": "An operant behavior training session where a mouse must lick following a change in stimulus identity to earn rewards. Stimuli consist of 8 natural scene images, for a total of 64 possible pairwise transitions. Images are shown for 250 ms with a 500 ms intervening gray period. Delivered rewards are 10ul in volume, and the session lasts for 60 minutes", # noqa: E501 + r"\ATRAINING_4_images": "An operant behavior training session where a mouse must lick a spout following a change in stimulus identity to earn rewards. Stimuli consist of 8 natural scene images, for a total of 64 possible pairwise transitions. Images are shown for 250 ms with a 500 ms intervening gray period. Delivered rewards are 7ul in volume, and the session lasts for 60 minutes", # noqa: E501 + r"\ATRAINING_5_images": "An operant behavior training session where a mouse must lick a spout following a change in stimulus identity to earn rewards. Stimuli consist of 8 natural scene images, for a total of 64 possible pairwise transitions. Images are shown for 250 ms with a 500 ms intervening gray period. Delivered rewards are 7ul in volume. The session is 75 minutes long, with 5 minutes of gray screen before and after 60 minutes of behavior, followed by 10 repeats of a 30 second natural movie stimulus at the end of the session." # noqa: E501 + } + + +def get_expt_description(session_type: str) -> str: + """Determine a behavior ophys session's experiment description based on + session type. Matches the regex patterns defined as the keys in + description_dict + + Parameters + ---------- + session_type : str + A session description string (e.g. OPHYS_1_images_B ) + + Returns + ------- + str + A description of the experiment based on the session_type. + + Raises + ------ + RuntimeError + Behavior ophys sessions should only have 6 different session types. + Unknown session types (or malformed session_type strings) will raise + an error. + """ + match = dict() + for k, v in description_dict.items(): + if re.match(k, session_type) is not None: + match.update({k: v}) + + if len(match) != 1: + emsg = (f"session type should match one and only one possible pattern " + f"template. '{session_type}' matched {len(match)} pattern " + "templates.") + if len(match) > 1: + emsg += f"{list(match.keys())}" + emsg += f"the regex pattern templates are {list(description_dict)}" + raise RuntimeError(emsg) + + return match.popitem()[1] + + +def get_task_parameters(data: Dict) -> Dict: + """ + Read task_parameters metadata from the behavior stimulus pickle file. + + Parameters + ---------- + data: dict + The nested dict read in from the behavior stimulus pickle file. + All of the data expected by this method lives under + data['items']['behavior'] + + Returns + ------- + dict + A dict containing the task_parameters associated with this session. + """ + behavior = data["items"]["behavior"] + stimuli = behavior['stimuli'] + config = behavior["config"] + doc = config["DoC"] + + task_parameters = {} + + task_parameters['blank_duration_sec'] = \ + [float(x) for x in doc['blank_duration_range']] + + if 'images' in stimuli: + stim_key = 'images' + elif 'grating' in stimuli: + stim_key = 'grating' + else: + msg = "Cannot get stimulus_duration_sec\n" + msg += "'images' and/or 'grating' not a valid " + msg += "key in pickle file under " + msg += "['items']['behavior']['stimuli']\n" + msg += f"keys: {list(stimuli.keys())}" + raise RuntimeError(msg) + + stim_duration = stimuli[stim_key]['flash_interval_sec'] + + # from discussion in + # https://github.com/AllenInstitute/AllenSDK/issues/1572 + # + # 'flash_interval' contains (stimulus_duration, gray_screen_duration) + # (as @matchings said above). That second value is redundant with + # 'blank_duration_range'. I'm not sure what would happen if they were + # set to be conflicting values in the params. But it looks like + # they're always consistent. It should always be (0.25, 0.5), + # except for TRAINING_0 and TRAINING_1, which have statically + # displayed stimuli (no flashes). + + if stim_duration is None: + stim_duration = np.NaN + else: + stim_duration = stim_duration[0] + + task_parameters['stimulus_duration_sec'] = stim_duration + + task_parameters['omitted_flash_fraction'] = \ + behavior['params'].get('flash_omit_probability', float('nan')) + task_parameters['response_window_sec'] = \ + [float(x) for x in doc["response_window"]] + task_parameters['reward_volume'] = config["reward"]["reward_volume"] + task_parameters['auto_reward_volume'] = doc['auto_reward_volume'] + task_parameters['session_type'] = behavior["params"]["stage"] + task_parameters['stimulus'] = next(iter(behavior["stimuli"])) + task_parameters['stimulus_distribution'] = doc["change_time_dist"] + + task_id = config['behavior']['task_id'] + if 'DoC' in task_id: + task_parameters['task'] = 'change detection' + else: + msg = "metadata.get_task_parameters does not " + msg += f"know how to parse 'task_id' = {task_id}" + raise RuntimeError(msg) + + n_stimulus_frames = 0 + for stim_type, stim_table in behavior["stimuli"].items(): + n_stimulus_frames += sum(stim_table.get("draw_log", [])) + task_parameters['n_stimulus_frames'] = n_stimulus_frames + + return task_parameters + + +class VisualCodingMetadata(DataObject, LimsReadableInterface, + JsonReadableInterface, + NwbReadableInterface, + JsonWritableInterface, + NwbWritableInterface): + """Container class for visual coding metadata""" + def __init__(self, + subject_metadata: SubjectMetadata, + ophys_session_id: VisualCodingSessionId, + equipment: Equipment, + stimulus_frame_rate: StimulusFrameRate, + session_type: VisualCodingSessionType, + behavior_session_uuid: BehaviorSessionUUID): + super().__init__(name='visualcoding_metadata', value=self) + self._subject_metadata = subject_metadata + self._ophys_session_id = ophys_session_id + self._equipment = equipment + self._stimulus_frame_rate = stimulus_frame_rate + self._session_type = session_type + self._behavior_session_uuid = behavior_session_uuid + + self._exclude_from_equals = set() + + @classmethod + def from_lims( + cls, + ophys_session_id: VisualCodingSessionId, + lims_db: PostgresQueryMixin + ) -> "VisualCodingMetadata": + subject_metadata = SubjectMetadata.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id, lims_db=lims_db) + equipment = Equipment.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id.value, lims_db=lims_db) + + stimulus_file = StimulusFile.from_lims_for_ophys_session( + db=lims_db, ophys_session_id=ophys_session_id.value) + # stimulus_frame_rate = StimulusFrameRate.from_stimulus_file( + # stimulus_file=stimulus_file) + sync_file = SyncFile.from_lims_for_ophys_session(db=lims_db, ophys_session_id=ophys_session_id.value) + stimulus_frame_rate = StimulusFrameRate.from_sync_file( + sync_file=sync_file) + session_type = VisualCodingSessionType.from_stimulus_file( + stimulus_file=stimulus_file) + + # foraging_id = ForagingId.from_lims( + # behavior_session_id=ophys_session_id.value, lims_db=lims_db) + # behavior_session_uuid = BehaviorSessionUUID.from_stimulus_file( + # stimulus_file=stimulus_file)\ + # .validate(behavior_session_id=behavior_session_id.value, + # foraging_id=foraging_id.value, + # stimulus_file=stimulus_file) + behavior_session_uuid=None + + return cls( + subject_metadata=subject_metadata, + ophys_session_id=ophys_session_id, + equipment=equipment, + stimulus_frame_rate=stimulus_frame_rate, + session_type=session_type, + behavior_session_uuid=behavior_session_uuid, + ) + + @classmethod + def from_json(cls, dict_repr: dict) -> "BehaviorMetadata": + subject_metadata = SubjectMetadata.from_json(dict_repr=dict_repr) + behavior_session_id = BehaviorSessionId.from_json(dict_repr=dict_repr) + equipment = Equipment.from_json(dict_repr=dict_repr) + + stimulus_file = StimulusFile.from_json(dict_repr=dict_repr) + stimulus_frame_rate = StimulusFrameRate.from_stimulus_file( + stimulus_file=stimulus_file) + session_type = SessionType.from_stimulus_file( + stimulus_file=stimulus_file) + session_uuid = BehaviorSessionUUID.from_stimulus_file( + stimulus_file=stimulus_file) + + return cls( + subject_metadata=subject_metadata, + behavior_session_id=behavior_session_id, + equipment=equipment, + stimulus_frame_rate=stimulus_frame_rate, + session_type=session_type, + behavior_session_uuid=session_uuid, + ) + + @classmethod + def from_nwb(cls, nwbfile: NWBFile) -> "BehaviorMetadata": + subject_metadata = SubjectMetadata.from_nwb(nwbfile=nwbfile) + + behavior_session_id = BehaviorSessionId.from_nwb(nwbfile=nwbfile) + equipment = Equipment.from_nwb(nwbfile=nwbfile) + stimulus_frame_rate = StimulusFrameRate.from_nwb(nwbfile=nwbfile) + session_type = SessionType.from_nwb(nwbfile=nwbfile) + session_uuid = BehaviorSessionUUID.from_nwb(nwbfile=nwbfile) + + return cls( + subject_metadata=subject_metadata, + behavior_session_id=behavior_session_id, + equipment=equipment, + stimulus_frame_rate=stimulus_frame_rate, + session_type=session_type, + behavior_session_uuid=session_uuid + ) + + @property + def equipment(self) -> Equipment: + return self._equipment + + @property + def stimulus_frame_rate(self) -> float: + return self._stimulus_frame_rate.value + + @property + def session_type(self) -> str: + return self._session_type.value + + @property + def behavior_session_uuid(self) -> Optional[uuid.UUID]: + return None # self._behavior_session_uuid.value + + @property + def ophys_session_id(self) -> int: + return self._ophys_session_id.value + + @property + def subject_metadata(self): + return self._subject_metadata + + def to_json(self) -> dict: + pass + + def to_nwb(self, nwbfile: NWBFile) -> NWBFile: + self._subject_metadata.to_nwb(nwbfile=nwbfile) + self._equipment.to_nwb(nwbfile=nwbfile) + extension = load_pynwb_extension(BehaviorMetadataSchema, + 'ndx-aibs-behavior-ophys') + nwb_metadata = extension( + name='metadata', + behavior_session_id=self.behavior_session_id, + behavior_session_uuid=str(self.behavior_session_uuid), + stimulus_frame_rate=self.stimulus_frame_rate, + session_type=self.session_type, + equipment_name=self.equipment.value + ) + nwbfile.add_lab_meta_data(nwb_metadata) + + return nwbfile diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_session_id.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_session_id.py new file mode 100644 index 000000000..acb58104f --- /dev/null +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_session_id.py @@ -0,0 +1,52 @@ +from pynwb import NWBFile + +from cachetools import cached, LRUCache +from cachetools.keys import hashkey + +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + JsonReadableInterface, LimsReadableInterface, NwbReadableInterface +from allensdk.brain_observatory.behavior.data_objects.base\ + .writable_interfaces import \ + JsonWritableInterface +from allensdk.internal.api import PostgresQueryMixin +from allensdk.brain_observatory.behavior.data_objects import DataObject + + +def from_lims_cache_key(cls, db, ophys_experiment_id: int): + return hashkey(ophys_experiment_id) + + +class VisualCodingSessionId(DataObject, LimsReadableInterface, + JsonReadableInterface, + NwbReadableInterface, + JsonWritableInterface): + def __init__(self, ophys_session_id: int): + super().__init__(name="ophys_session_id", value=ophys_session_id) + + @classmethod + def from_json(cls, dict_repr: dict) -> "VisualCodingSessionId": + return cls(ophys_session_id=dict_repr["ophys_session_id"]) + + def to_json(self) -> dict: + return {"ophys_session_id": self.value} + + @classmethod + @cached(cache=LRUCache(maxsize=10), key=from_lims_cache_key) + def from_lims( + cls, db: PostgresQueryMixin, + ophys_experiment_id: int + ) -> "VisualCodingSessionId": + query = f""" + SELECT oe.ophys_session_id + FROM ophys_experiments oe + -- every ophys_experiment should have an ophys_session + WHERE oe.id = {ophys_experiment_id}; + """ + ophys_session_id = db.fetchone(query, strict=True) + return cls(ophys_session_id=ophys_session_id) + + @classmethod + def from_nwb(cls, nwbfile: NWBFile) -> "VisualCodingSessionId": + metadata = nwbfile.lab_meta_data['metadata'] + return cls(ophys_session_id=metadata.ophys_session_id) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_session_type.py b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_session_type.py new file mode 100644 index 000000000..8848beb93 --- /dev/null +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/behavior_metadata/visualcoding_session_type.py @@ -0,0 +1,36 @@ +from pynwb import NWBFile + +from allensdk.brain_observatory.behavior.data_files import StimulusFile +from allensdk.brain_observatory.behavior.data_objects import DataObject +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + NwbReadableInterface, StimulusFileReadableInterface + + +class VisualCodingSessionType(DataObject, StimulusFileReadableInterface, + NwbReadableInterface): + """the stimulus set used""" + def __init__(self, session_type: str): + super().__init__(name="session_type", value=session_type) + + @classmethod + def from_stimulus_file( + cls, + stimulus_file: StimulusFile) -> "SessionType": + try: + stimulus_name = \ + stimulus_file.data["stage"] + except KeyError: + raise RuntimeError( + f"Could not obtain stimulus_name/stage information from " + f"the *.pkl file ({stimulus_file.filepath}) " + f"for the behavior session to save as NWB! The " + f"following series of nested keys did not work: " + f"['stage']" + ) + return cls(session_type=stimulus_name) + + @classmethod + def from_nwb(cls, nwbfile: NWBFile) -> "SessionType": + metadata = nwbfile.lab_meta_data['metadata'] + return cls(session_type=metadata.session_type) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/experiment_container_id.py b/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/experiment_container_id.py index dc358de3e..d16cff672 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/experiment_container_id.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/experiment_container_id.py @@ -5,6 +5,7 @@ .readable_interfaces import \ JsonReadableInterface, LimsReadableInterface, NwbReadableInterface from allensdk.internal.api import PostgresQueryMixin +from allensdk import OneResultExpectedError class ExperimentContainerId(DataObject, LimsReadableInterface, @@ -22,7 +23,12 @@ def from_lims(cls, ophys_experiment_id: int, FROM ophys_experiments_visual_behavior_experiment_containers WHERE ophys_experiment_id = {}; """.format(ophys_experiment_id) - container_id = lims_db.fetchone(query, strict=False) + try: + container_id = lims_db.fetchone(query, strict=False) + except OneResultExpectedError: + print("No single container id found for {}!".format(ophys_experiment_id)) + container_id = lims_db.fetchall(query, strict=False) + print("Container id query returned {}".format(container_id)) return cls(experiment_container_id=container_id) @classmethod diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/imaging_plane.py b/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/imaging_plane.py index 7c48b6df3..7a29ec4b9 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/imaging_plane.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/ophys_experiment_metadata/imaging_plane.py @@ -3,7 +3,7 @@ from pynwb import NWBFile from allensdk.brain_observatory.behavior.data_objects import DataObject, \ - BehaviorSessionId + BehaviorSessionId, VisualCodingSessionId from allensdk.brain_observatory.behavior.data_objects.base \ .readable_interfaces import \ JsonReadableInterface, NwbReadableInterface, \ @@ -35,13 +35,16 @@ def from_lims(cls, ophys_experiment_id: int, lims_db: PostgresQueryMixin, ophys_timestamps: OphysTimestamps, excitation_lambda=910.0) -> "ImagingPlane": - behavior_session_id = BehaviorSessionId.from_lims( - db=lims_db, ophys_experiment_id=ophys_experiment_id) + # behavior_session_id = BehaviorSessionId.from_lims( + # db=lims_db, ophys_experiment_id=ophys_experiment_id) + ophys_session_id = VisualCodingSessionId.from_lims(db=lims_db, ophys_experiment_id=ophys_experiment_id) ophys_frame_rate = calc_frame_rate(timestamps=ophys_timestamps.value) targeted_structure = cls._get_targeted_structure_from_lims( ophys_experiment_id=ophys_experiment_id, lims_db=lims_db) - reporter_line = ReporterLine.from_lims( - behavior_session_id=behavior_session_id.value, lims_db=lims_db) + # reporter_line = ReporterLine.from_lims( + # behavior_session_id=behavior_session_id.value, lims_db=lims_db) + reporter_line = ReporterLine.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id.value, lims_db=lims_db) indicator = reporter_line.parse_indicator(warn=True) return cls(ophys_frame_rate=ophys_frame_rate, targeted_structure=targeted_structure, diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/age.py b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/age.py index 111ac436f..dd91d2bb5 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/age.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/age.py @@ -37,6 +37,21 @@ def from_lims(cls, behavior_session_id: int, age = cls._age_code_to_days(age=age) return cls(age=age) + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "Age": + query = f""" + SELECT a.name AS age + FROM ophys_sessions os + JOIN specimens s ON os.specimen_id = s.id + JOIN donors d ON s.donor_id = d.id + JOIN ages a ON a.id = d.age_id + WHERE os.id = {ophys_session_id}; + """ + age = lims_db.fetchone(query, strict=True) + age = cls._age_code_to_days(age=age) + return cls(age=age) + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "Age": age = cls._age_code_to_days(age=nwbfile.subject.age) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/driver_line.py b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/driver_line.py index 1d0a8a34c..ccbe6b001 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/driver_line.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/driver_line.py @@ -41,6 +41,28 @@ def from_lims(cls, behavior_session_id: int, driver_line = sorted(result) return cls(driver_line=driver_line) + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "DriverLine": + query = f""" + SELECT g.name AS driver_line + FROM ophys_sessions os + JOIN specimens s ON os.specimen_id = s.id + JOIN donors d ON s.donor_id = d.id + JOIN donors_genotypes dg ON dg.donor_id=d.id + JOIN genotypes g ON g.id=dg.genotype_id + JOIN genotype_types gt + ON gt.id=g.genotype_type_id AND gt.name = 'driver' + WHERE os.id={ophys_session_id}; + """ + result = lims_db.fetchall(query) + if result is None or len(result) < 1: + raise OneOrMoreResultExpectedError( + f"Expected one or more, but received: '{result}' " + f"from query:\n'{query}'") + driver_line = sorted(result) + return cls(driver_line=driver_line) + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "DriverLine": driver_line = sorted(list(nwbfile.subject.driver_line)) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/full_genotype.py b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/full_genotype.py index be9977fc2..05fa3f8bb 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/full_genotype.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/full_genotype.py @@ -32,6 +32,19 @@ def from_lims(cls, behavior_session_id: int, genotype = lims_db.fetchone(query, strict=True) return cls(full_genotype=genotype) + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "FullGenotype": + query = f""" + SELECT d.full_genotype + FROM ophys_sessions os + JOIN specimens s ON os.specimen_id = s.id + JOIN donors d ON s.donor_id = d.id + WHERE os.id= {ophys_session_id}; + """ + genotype = lims_db.fetchone(query, strict=True) + return cls(full_genotype=genotype) + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "FullGenotype": return cls(full_genotype=nwbfile.subject.genotype) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/mouse_id.py b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/mouse_id.py index d29b9069d..531b387ac 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/mouse_id.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/mouse_id.py @@ -37,6 +37,25 @@ def from_lims(cls, behavior_session_id: int, mouse_id = int(lims_db.fetchone(query, strict=True)) return cls(mouse_id=mouse_id) + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "MouseId": + # TODO: Should this even be included? + # Found sometimes there were entries with NONE which is + # why they are filtered out; also many entries in the table + # match the donor_id, which is why used DISTINCT + query = f""" + SELECT DISTINCT(sp.external_specimen_name) + FROM ophys_sessions os + JOIN specimens s ON os.specimen_id = s.id + JOIN donors d ON s.donor_id = d.id + JOIN specimens sp ON sp.donor_id=d.id + WHERE os.id={ophys_session_id} + AND sp.external_specimen_name IS NOT NULL; + """ + mouse_id = int(lims_db.fetchone(query, strict=True)) + return cls(mouse_id=mouse_id) + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "MouseId": return cls(mouse_id=int(nwbfile.subject.subject_id)) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/reporter_line.py b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/reporter_line.py index 56fcfb838..a36368c85 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/reporter_line.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/reporter_line.py @@ -44,6 +44,52 @@ def from_lims(cls, behavior_session_id: int, reporter_line = cls.parse(reporter_line=result, warn=True) return cls(reporter_line=reporter_line) + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "ReporterLine": + query = f""" + SELECT g.name AS reporter_line + FROM ophys_sessions os + JOIN specimens s ON os.specimen_id = s.id + JOIN donors d ON s.donor_id=d.id + JOIN donors_genotypes dg ON dg.donor_id=d.id + JOIN genotypes g ON g.id=dg.genotype_id + JOIN genotype_types gt + ON gt.id=g.genotype_type_id AND gt.name = 'reporter' + WHERE bs.id={ophys_session_id}; + """ + result = lims_db.fetchall(query) + if result is None or len(result) < 1: + raise OneOrMoreResultExpectedError( + f"Expected one or more, but received: '{result}' " + f"from query:\n'{query}'") + reporter_line = cls.parse(reporter_line=result, warn=True) + return cls(reporter_line=reporter_line) + + + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "ReporterLine": + query = f""" + SELECT g.name AS reporter_line + FROM ophys_sessions os + JOIN specimens s ON os.specimen_id = s.id + JOIN donors d ON s.donor_id = d.id + JOIN donors_genotypes dg ON dg.donor_id=d.id + JOIN genotypes g ON g.id=dg.genotype_id + JOIN genotype_types gt + ON gt.id=g.genotype_type_id AND gt.name = 'reporter' + WHERE os.id={ophys_session_id}; + """ + result = lims_db.fetchall(query) + if result is None or len(result) < 1: + raise OneOrMoreResultExpectedError( + f"Expected one or more, but received: '{result}' " + f"from query:\n'{query}'") + reporter_line = cls.parse(reporter_line=result, warn=True) + return cls(reporter_line=reporter_line) + + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "ReporterLine": return cls(reporter_line=nwbfile.subject.reporter_line) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/sex.py b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/sex.py index aa81242fb..f674aba85 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/sex.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/sex.py @@ -36,6 +36,21 @@ def from_lims(cls, behavior_session_id: int, sex = lims_db.fetchone(query, strict=True) return cls(sex=sex) + + @classmethod + def from_lims_for_ophys_session(cls, ophys_session_id: int, + lims_db: PostgresQueryMixin) -> "Sex": + query = f""" + SELECT g.name AS sex + FROM ophys_sessions os + JOIN specimens s ON os.specimen_id = s.id + JOIN donors d ON s.donor_id = d.id + JOIN genders g ON g.id = d.gender_id + WHERE os.id = {ophys_session_id}; + """ + sex = lims_db.fetchone(query, strict=True) + return cls(sex=sex) + @classmethod def from_nwb(cls, nwbfile: NWBFile) -> "Sex": return cls(sex=nwbfile.subject.sex) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/subject_metadata.py b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/subject_metadata.py index 710a07ed4..59fa835aa 100644 --- a/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/subject_metadata.py +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/subject_metadata/subject_metadata.py @@ -79,6 +79,32 @@ def from_lims(cls, reporter_line=reporter_line ) + @classmethod + def from_lims_for_ophys_session(cls, + ophys_session_id: BehaviorSessionId, + lims_db: PostgresQueryMixin) -> "SubjectMetadata": + sex = Sex.from_lims_for_ophys_session(ophys_session_id=ophys_session_id.value, + lims_db=lims_db) + age = Age.from_lims_for_ophys_session(ophys_session_id=ophys_session_id.value, + lims_db=lims_db) + reporter_line = ReporterLine.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id.value, lims_db=lims_db) + full_genotype = FullGenotype.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id.value, lims_db=lims_db) + driver_line = DriverLine.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id.value, lims_db=lims_db) + mouse_id = MouseId.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id.value, + lims_db=lims_db) + return cls( + sex=sex, + age=age, + full_genotype=full_genotype, + driver_line=driver_line, + mouse_id=mouse_id, + reporter_line=reporter_line + ) + @classmethod def from_json(cls, dict_repr: dict) -> "SubjectMetadata": sex = Sex.from_json(dict_repr=dict_repr) diff --git a/allensdk/brain_observatory/behavior/data_objects/metadata/visualcoding_ophys_metadata.py b/allensdk/brain_observatory/behavior/data_objects/metadata/visualcoding_ophys_metadata.py new file mode 100644 index 000000000..ef80cdf13 --- /dev/null +++ b/allensdk/brain_observatory/behavior/data_objects/metadata/visualcoding_ophys_metadata.py @@ -0,0 +1,171 @@ +from typing import Union + +from pynwb import NWBFile + +from allensdk.brain_observatory.behavior.data_objects import DataObject, \ + BehaviorSessionId, VisualCodingSessionId +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + JsonReadableInterface, NwbReadableInterface, \ + LimsReadableInterface +from allensdk.brain_observatory.behavior.data_objects.base\ + .writable_interfaces import \ + NwbWritableInterface +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.behavior_metadata import \ + BehaviorMetadata +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.visualcoding_metadata import \ + VisualCodingMetadata +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .ophys_experiment_metadata.multi_plane_metadata\ + .multi_plane_metadata import \ + MultiplaneMetadata +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .ophys_experiment_metadata.ophys_experiment_metadata import \ + OphysExperimentMetadata +from allensdk.brain_observatory.behavior.schemas import \ + OphysBehaviorMetadataSchema +from allensdk.brain_observatory.nwb import load_pynwb_extension +from allensdk.internal.api import PostgresQueryMixin + + +class VisualCodingOphysMetadata(DataObject, LimsReadableInterface, + JsonReadableInterface, NwbReadableInterface, + NwbWritableInterface): + def __init__(self, visualcoding_metadata: VisualCodingMetadata, + ophys_metadata: Union[OphysExperimentMetadata, + MultiplaneMetadata]): + super().__init__(name='visualcoding_ophys_metadata', value=self) + + self._visualcoding_metadata = visualcoding_metadata + self._ophys_metadata = ophys_metadata + + @property + def visualcoding_metadata(self) -> VisualCodingMetadata: + return self._visualcoding_metadata + + @property + def ophys_metadata(self) -> Union["OphysExperimentMetadata", + "MultiplaneMetadata"]: + return self._ophys_metadata + + @classmethod + def from_lims(cls, ophys_experiment_id: int, + lims_db: PostgresQueryMixin, + is_multiplane=False) -> "BehaviorOphysMetadata": + """ + + Parameters + ---------- + ophys_experiment_id + lims_db + is_multiplane + Whether to fetch metadata for an experiment that is part of a + container containing multiple imaging planes + """ + # should be an ophys_session_id + ophys_session_id = VisualCodingSessionId.from_lims( + ophys_experiment_id=ophys_experiment_id, db=lims_db) + + visualcoding_metadata = VisualCodingMetadata.from_lims( + ophys_session_id=ophys_session_id, lims_db=lims_db) + + if is_multiplane: + ophys_metadata = MultiplaneMetadata.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db) + else: + ophys_metadata = OphysExperimentMetadata.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db) + + return cls(visualcoding_metadata=visualcoding_metadata, + ophys_metadata=ophys_metadata) + + @classmethod + def from_json(cls, dict_repr: dict, + is_multiplane=False) -> "BehaviorOphysMetadata": + """ + + Parameters + ---------- + dict_repr + is_multiplane + Whether to fetch metadata for an experiment that is part of a + container containing multiple imaging planes + + Returns + ------- + + """ + behavior_metadata = BehaviorMetadata.from_json(dict_repr=dict_repr) + + if is_multiplane: + ophys_metadata = MultiplaneMetadata.from_json( + dict_repr=dict_repr) + else: + ophys_metadata = OphysExperimentMetadata.from_json( + dict_repr=dict_repr) + + return cls(behavior_metadata=behavior_metadata, + ophys_metadata=ophys_metadata) + + @classmethod + def from_nwb(cls, nwbfile: NWBFile, + is_multiplane=False) -> "BehaviorOphysMetadata": + """ + + Parameters + ---------- + nwbfile + is_multiplane + Whether to fetch metadata for an experiment that is part of a + container containing multiple imaging planes + """ + behavior_metadata = BehaviorMetadata.from_nwb(nwbfile=nwbfile) + + if is_multiplane: + ophys_metadata = MultiplaneMetadata.from_nwb( + nwbfile=nwbfile) + else: + ophys_metadata = OphysExperimentMetadata.from_nwb( + nwbfile=nwbfile) + + return cls(behavior_metadata=behavior_metadata, + ophys_metadata=ophys_metadata) + + def to_nwb(self, nwbfile: NWBFile) -> NWBFile: + self._behavior_metadata.subject_metadata.to_nwb(nwbfile=nwbfile) + self._behavior_metadata.equipment.to_nwb(nwbfile=nwbfile) + + nwb_extension = load_pynwb_extension( + OphysBehaviorMetadataSchema, 'ndx-aibs-behavior-ophys') + + behavior_meta = self._behavior_metadata + ophys_meta = self._ophys_metadata + + if isinstance(ophys_meta, MultiplaneMetadata): + imaging_plane_group = ophys_meta.imaging_plane_group + imaging_plane_group_count = ophys_meta.imaging_plane_group_count + else: + imaging_plane_group_count = 0 + imaging_plane_group = -1 + + nwb_metadata = nwb_extension( + name='metadata', + ophys_session_id=ophys_meta.ophys_session_id, + field_of_view_width=ophys_meta.field_of_view_shape.width, + field_of_view_height=ophys_meta.field_of_view_shape.height, + imaging_plane_group=imaging_plane_group, + imaging_plane_group_count=imaging_plane_group_count, + stimulus_frame_rate=behavior_meta.stimulus_frame_rate, + experiment_container_id=ophys_meta.experiment_container_id, + ophys_experiment_id=ophys_meta.ophys_experiment_id, + session_type=behavior_meta.session_type, + equipment_name=behavior_meta.equipment.value, + imaging_depth=ophys_meta.imaging_depth, + behavior_session_uuid=str(behavior_meta.behavior_session_uuid), + behavior_session_id=behavior_meta.behavior_session_id + ) + nwbfile.add_lab_meta_data(nwb_metadata) + + return nwbfile diff --git a/allensdk/brain_observatory/behavior/data_objects/running_speed/running_acquisition.py b/allensdk/brain_observatory/behavior/data_objects/running_speed/running_acquisition.py index 2f640e158..908b6e70c 100644 --- a/allensdk/brain_observatory/behavior/data_objects/running_speed/running_acquisition.py +++ b/allensdk/brain_observatory/behavior/data_objects/running_speed/running_acquisition.py @@ -21,7 +21,7 @@ DataObject, StimulusTimestamps ) from allensdk.brain_observatory.behavior.data_files import ( - StimulusFile + StimulusFile, SyncFile ) from allensdk.brain_observatory.behavior.data_objects.running_speed.running_processing import ( # noqa: E501 get_running_df @@ -139,6 +139,31 @@ def from_lims( stimulus_timestamps=stimulus_timestamps, ) + @classmethod + @cached(cache=LRUCache(maxsize=10), key=from_lims_cache_key) + def from_lims_for_ophys_session( + cls, + db: PostgresQueryMixin, + ophys_session_id: int, + ophys_experiment_id: Optional[int] = None, + ) -> "RunningAcquisition": + + stimulus_file = StimulusFile.from_lims_for_ophys_session(db, ophys_session_id) + sync_file = SyncFile.from_lims_for_ophys_session(db=db, ophys_session_id=ophys_session_id) + stimulus_timestamps = StimulusTimestamps.from_sync_file( + sync_file=sync_file) + + running_acq_df = get_running_df( + data=stimulus_file.data, time=stimulus_timestamps.value, + ) + running_acq_df.drop("speed", axis=1, inplace=True) + + return cls( + running_acquisition=running_acq_df, + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps, + ) + @classmethod def from_nwb( cls, diff --git a/allensdk/brain_observatory/behavior/data_objects/running_speed/running_processing.py b/allensdk/brain_observatory/behavior/data_objects/running_speed/running_processing.py index 7222cb7d1..456717fcd 100644 --- a/allensdk/brain_observatory/behavior/data_objects/running_speed/running_processing.py +++ b/allensdk/brain_observatory/behavior/data_objects/running_speed/running_processing.py @@ -351,8 +351,13 @@ def get_running_df( their own corrections and compute running speed from the raw source. """ - v_sig = data["items"]["behavior"]["encoders"][0]["vsig"] - v_in = data["items"]["behavior"]["encoders"][0]["vin"] + try: + v_sig = data["items"]["behavior"]["encoders"][0]["vsig"] + v_in = data["items"]["behavior"]["encoders"][0]["vin"] + except KeyError: + v_sig = data["items"]["foraging"]["encoders"][0]["vsig"] + v_in = data["items"]["foraging"]["encoders"][0]["vin"] + if len(v_in) > len(time) + 1: error_string = ("length of v_in ({}) cannot be longer than length of " @@ -372,7 +377,11 @@ def get_running_df( # dx = 'd_theta' = angular change # There are some issues with angular change in the raw data so we # recompute this value - dx_raw = data["items"]["behavior"]["encoders"][0]["dx"] + try: + dx_raw = data["items"]["behavior"]["encoders"][0]["dx"] + except KeyError: + dx_raw = data["items"]["foraging"]["encoders"][0]["dx"] + # Identify "wraps" in the voltage signal that need to be unwrapped # This is where the encoder switches from 0V to 5V or vice versa pos_wraps, neg_wraps = _identify_wraps( diff --git a/allensdk/brain_observatory/behavior/data_objects/running_speed/running_speed.py b/allensdk/brain_observatory/behavior/data_objects/running_speed/running_speed.py index 694d5f49f..a9a372241 100644 --- a/allensdk/brain_observatory/behavior/data_objects/running_speed/running_speed.py +++ b/allensdk/brain_observatory/behavior/data_objects/running_speed/running_speed.py @@ -17,7 +17,7 @@ DataObject, StimulusTimestamps ) from allensdk.brain_observatory.behavior.data_files import ( - StimulusFile + StimulusFile, SyncFile ) from allensdk.brain_observatory.behavior.data_objects.running_speed.running_processing import ( # noqa: E501 get_running_df @@ -127,6 +127,32 @@ def from_lims( filtered=filtered ) + @classmethod + def from_lims_for_ophys_session( + cls, + db: PostgresQueryMixin, + ophys_session_id: int, + filtered: bool = True, + zscore_threshold: float = 10.0, + stimulus_timestamps: Optional[StimulusTimestamps] = None + ) -> "RunningSpeed": + stimulus_file = StimulusFile.from_lims_for_ophys_session(db, ophys_session_id) + if stimulus_timestamps is None: + sync_file = SyncFile.from_lims_for_ophys_session(db=db, ophys_session_id=ophys_session_id.value) + stimulus_timestamps = StimulusTimestamps.from_sync_file( + sync_file=sync_file) + + running_speed = cls._get_running_speed_df( + stimulus_file, stimulus_timestamps, filtered, zscore_threshold + ) + return cls( + running_speed=running_speed, + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps, + filtered=filtered + ) + + @classmethod def from_nwb( cls, diff --git a/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_presentations.py b/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_presentations.py new file mode 100644 index 000000000..e9e1169be --- /dev/null +++ b/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_presentations.py @@ -0,0 +1,134 @@ +from typing import Optional, List + +import pandas as pd +import numpy as np +from pynwb import NWBFile + +from allensdk.brain_observatory.behavior.data_files import StimulusFile +from allensdk.brain_observatory.behavior.data_objects import DataObject, \ + StimulusTimestamps +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + StimulusFileReadableInterface, NwbReadableInterface +from allensdk.brain_observatory.behavior.data_objects.base \ + .writable_interfaces import \ + NwbWritableInterface +from allensdk.brain_observatory.behavior.stimulus_processing import \ + get_stimulus_presentations, get_stimulus_metadata, is_change_event +from allensdk.brain_observatory.nwb import \ + create_stimulus_presentation_time_interval, get_column_name +from allensdk.brain_observatory.nwb.nwb_api import NwbApi + + +def stim_name_parse(stim_name): + + stim_name = stim_name[:-4] + components = stim_name.split('_') + + session_number = int(components[-2]) + segment_number = int(components[-1]) + + if components[-3]=='test': + test_or_train = 'test' + else: + test_or_train = 'train' + + return session_number, segment_number, test_or_train + +def get_original_stim_name(stage_number, segment_number, test_or_train): + + if test_or_train=='test': + original_stim_name = 'Session_test_'+str(stage_number)+'_'+str(segment_number)+'.npy' + if test_or_train=='train': + original_stim_name = 'Session_'+str(stage_number)+'_'+str(segment_number)+'.npy' + + return original_stim_name + +def shorten_stimulus_presentation(msn_stim_table): + + min_table=msn_stim_table.groupby(['stimulus_template', 'trial_number'],as_index=False).min() + max_table=msn_stim_table.groupby(['stimulus_template', 'trial_number'],as_index=False).max() + + min_table['end_frame'] = max_table['end_frame'] + min_table['end_time'] = max_table['end_time'] + min_table['duration'] = min_table['end_time']-min_table['start_time'] + min_table['stimulus_start_index'] = min_table['stimulus_index'] + min_table['stimulus_length'] = max_table['stimulus_index']+1 + + short_table = min_table + short_table = short_table.drop(columns=['stimulus_index']) + + return short_table.sort_values(by='start_frame').reset_index(drop=True) + + +class DenseMoviePresentations(DataObject, StimulusFileReadableInterface, + NwbReadableInterface, NwbWritableInterface): + """Stimulus presentations""" + def __init__(self, presentations: pd.DataFrame): + super().__init__(name='presentations', value=presentations) + + def to_nwb(self, nwbfile: NWBFile) -> NWBFile: + raise NotImplementedError + return None + + @classmethod + def from_nwb(cls, nwbfile: NWBFile) -> "DenseMoviePresentations": + raise NotImplementedError + + @classmethod + def from_stimulus_file( + cls, stimulus_file: StimulusFile, + stimulus_timestamps: StimulusTimestamps) -> "DenseMoviePresentations": + """Get stimulus presentation data. + + :param stimulus_file + :param stimulus_timestamps + + + :returns: pd.DataFrame -- + Table whose rows are stimulus presentations + (i.e. a given image, for a given duration) + and whose columns are presentation characteristics. + """ + timestamps = stimulus_timestamps.value + pkl_data = stimulus_file.data + + stimulus_presentation_table = pd.DataFrame() + + pre_blank = int(pkl_data['pre_blank_sec']*pkl_data['fps']) + + for stim in pkl_data['stimuli']: + + warped_stim_name = str(stim['movie_path']).split('\\')[-1] + + stage_number, segment_number, test_or_train = stim_name_parse(warped_stim_name) + original_stim_name = get_original_stim_name(stage_number, segment_number, test_or_train) + + frame_list = np.array(stim['frame_list']) + + frame_index = frame_list[frame_list!=-1][::2] + indices = np.where(frame_list!=-1)[0] + start_frames = indices[::2] + pre_blank + end_frames = start_frames + 2 + start_times = timestamps[start_frames] + end_times = timestamps[end_frames] + duration = end_times - start_times + trial_number = np.arange(frame_index.shape[0])//(np.max(frame_index)+1) + + data = np.vstack([frame_index, start_frames, end_frames, start_times, end_times, duration, trial_number]).T + temp_df = pd.DataFrame(data, columns=('stimulus_index', 'start_frame', 'end_frame', 'start_time', 'end_time', 'duration', 'trial_number')) + + temp_df['warped_stimulus'] = warped_stim_name + temp_df['stimulus_template'] = original_stim_name + temp_df['stage'] = stage_number + temp_df['segment'] = segment_number + temp_df['test_or_train'] = test_or_train + + stimulus_presentation_table = stimulus_presentation_table.append(temp_df, ignore_index=True) + + stimulus_presentation_table = stimulus_presentation_table.sort_values(by='start_frame').reset_index(drop=True) + + return DenseMoviePresentations(presentations=stimulus_presentation_table) + + + diff --git a/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_stimuli.py b/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_stimuli.py new file mode 100644 index 000000000..969b5df17 --- /dev/null +++ b/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_stimuli.py @@ -0,0 +1,59 @@ +from typing import Optional, List + +from pynwb import NWBFile + +from allensdk.brain_observatory.behavior.data_files import StimulusFile +from allensdk.brain_observatory.behavior.data_objects import DataObject, \ + StimulusTimestamps +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + StimulusFileReadableInterface, NwbReadableInterface +from allensdk.brain_observatory.behavior.data_objects.base\ + .writable_interfaces import \ + NwbWritableInterface +from allensdk.brain_observatory.behavior.data_objects.stimuli.densemovie_presentations \ + import \ + DenseMoviePresentations +from allensdk.brain_observatory.behavior.data_objects.stimuli.densemovie_templates \ + import \ + DenseMovieTemplates + + +class DenseMovieStimuli(DataObject, StimulusFileReadableInterface, + NwbReadableInterface, NwbWritableInterface): + def __init__(self, presentations: DenseMoviePresentations, + templates: DenseMovieTemplates): + super().__init__(name='stimuli', value=self) + self._presentations = presentations + self._templates = templates + + @property + def presentations(self) -> DenseMoviePresentations: + return self._presentations + + @property + def templates(self) -> DenseMovieTemplates: + return self._templates + + @classmethod + def from_nwb(cls, nwbfile: NWBFile) -> "DenseMovieStimuli": + p = DenseMoviePresentations.from_nwb(nwbfile=nwbfile) + t = DenseMovieTemplates.from_nwb(nwbfile=nwbfile) + return DenseMovieStimuli(presentations=p, templates=t) + + @classmethod + def from_stimulus_file( + cls, stimulus_file: StimulusFile, + stimulus_timestamps: StimulusTimestamps) -> "DenseMovieStimuli": + p = DenseMoviePresentations.from_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps) + t = DenseMovieTemplates.from_stimulus_file(stimulus_file=stimulus_file) + return DenseMovieStimuli(presentations=p, templates=t) + + def to_nwb(self, nwbfile: NWBFile) -> NWBFile: + nwbfile = self._templates.to_nwb( + nwbfile=nwbfile, stimulus_presentations=self._presentations) + nwbfile = self._presentations.to_nwb(nwbfile=nwbfile) + + return nwbfile diff --git a/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_templates.py b/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_templates.py new file mode 100644 index 000000000..507cfbc3a --- /dev/null +++ b/allensdk/brain_observatory/behavior/data_objects/stimuli/densemovie_templates.py @@ -0,0 +1,74 @@ +import os +import numpy as np +from typing import Optional, List + +import imageio +from pynwb import NWBFile + +from allensdk.brain_observatory import nwb +from allensdk.brain_observatory.behavior.data_files import StimulusFile +from allensdk.brain_observatory.behavior.data_objects import DataObject +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + StimulusFileReadableInterface, NwbReadableInterface +from allensdk.brain_observatory.behavior.data_objects.base\ + .writable_interfaces import \ + NwbWritableInterface +from allensdk.brain_observatory.behavior.data_objects.stimuli.presentations \ + import \ + Presentations +from allensdk.brain_observatory.behavior.stimulus_processing import \ + get_stimulus_templates +from allensdk.brain_observatory.behavior.data_objects.stimuli \ + .stimulus_templates import \ + StimulusTemplate, StimulusTemplateFactory +from allensdk.brain_observatory.behavior.write_nwb.extensions\ + .stimulus_template.ndx_stimulus_template import \ + StimulusTemplateExtension +from allensdk.internal.core.lims_utilities import safe_system_path + +from allensdk.brain_observatory.behavior.data_objects.stimuli.densemovie_presentations import get_original_stim_name, stim_name_parse +from pathlib import Path + + +class DenseMovieTemplates(DataObject, StimulusFileReadableInterface, + NwbReadableInterface, NwbWritableInterface): + def __init__(self, templates: dict): + super().__init__(name='stimulus_templates', value=templates) + + @classmethod + def from_stimulus_file( + cls, stimulus_file: StimulusFile) -> "DenseMovieTemplates": + """Get stimulus templates (movies, scenes) for behavior session. + + FOR NOW: This returns a dict of dicts for warped and unwarped stimuli. Keys are stim names from the presentation table and values are paths to the npy array.""" + + warped_path = Path('/allen/programs/braintv/workgroups/nc-ophys/ImageData/Dan/ten_session_movies') + original_path = Path('/allen/programs/braintv/workgroups/cortexmodels/michaelbu/Stimuli/SignalNoise/arrays') + unwarped_path = Path('/allen/programs/mindscope/workgroups/task-trained/michaelbu/signal_noise/stimuli/') + + stim_dict = {'warped': {}, 'unwarped': {}, 'original': {}} + + pkl_data = stimulus_file.data + + for stim in pkl_data['stimuli']: + + warped_stim_name = str(stim['movie_path']).split('\\')[-1] + unwarped_stim_name = warped_stim_name.replace('warped', 'unwarped') + + stage_number, segment_number, test_or_train = stim_name_parse(warped_stim_name) + original_stim_name = get_original_stim_name(stage_number, segment_number, test_or_train) + + stim_dict['warped'][original_stim_name] = warped_path / warped_stim_name + stim_dict['original'][original_stim_name] = original_path / original_stim_name + stim_dict['unwarped'][original_stim_name] = unwarped_path / unwarped_stim_name + + return DenseMovieTemplates(templates=stim_dict) + + @classmethod + def from_nwb(cls, nwbfile: NWBFile) -> "DenseMovieTemplates": + raise NotImplementedError + + def to_nwb(self, nwbfile: NWBFile, + stimulus_presentations: Presentations) -> NWBFile: + raise NotImplementedError diff --git a/allensdk/brain_observatory/behavior/data_objects/stimuli/util.py b/allensdk/brain_observatory/behavior/data_objects/stimuli/util.py index b3f38b7eb..aee8b1d85 100644 --- a/allensdk/brain_observatory/behavior/data_objects/stimuli/util.py +++ b/allensdk/brain_observatory/behavior/data_objects/stimuli/util.py @@ -50,7 +50,8 @@ def calculate_monitor_delay(sync_file: SyncFile, 'CAM2P.3': 0.021390, 'CAM2P.4': 0.021102, 'CAM2P.5': 0.021192, - 'MESO.1': 0.03613} + 'MESO.1': 0.03613, + 'MESO.2': 0.03613} if equipment_name not in delay_lookup: msg = warning_msg diff --git a/allensdk/brain_observatory/behavior/data_objects/timestamps/stimulus_timestamps/timestamps_processing.py b/allensdk/brain_observatory/behavior/data_objects/timestamps/stimulus_timestamps/timestamps_processing.py index b34f9b0e1..65da35c9d 100644 --- a/allensdk/brain_observatory/behavior/data_objects/timestamps/stimulus_timestamps/timestamps_processing.py +++ b/allensdk/brain_observatory/behavior/data_objects/timestamps/stimulus_timestamps/timestamps_processing.py @@ -23,7 +23,10 @@ def get_behavior_stimulus_timestamps(stimulus_pkl: dict) -> np.ndarray: np.ndarray Timestamps (in seconds) for presented stimulus frames during a session. """ - vsyncs = stimulus_pkl["items"]["behavior"]["intervalsms"] + try: + vsyncs = stimulus_pkl["items"]["behavior"]["intervalsms"] + except KeyError as ke: + vsyncs = stimulus_pkl["items"]["foraging"]["intervalsms"] stimulus_timestamps = np.hstack((0, vsyncs)).cumsum() / 1000.0 return stimulus_timestamps diff --git a/allensdk/brain_observatory/behavior/eye_tracking_processing.py b/allensdk/brain_observatory/behavior/eye_tracking_processing.py index 733348779..9100dbdc8 100644 --- a/allensdk/brain_observatory/behavior/eye_tracking_processing.py +++ b/allensdk/brain_observatory/behavior/eye_tracking_processing.py @@ -3,6 +3,8 @@ import numpy as np import pandas as pd +import logging + from scipy import ndimage, stats @@ -200,6 +202,13 @@ def process_eye_tracking_data(eye_data: pd.DataFrame, frame_times = frame_times[:n_eye_frames] n_sync = len(frame_times) + if n_eye_frames > n_sync: + logging.warning("The number of eye tracking frames is greater than the number of sync file frame times. \ + Truncating extra eye tracking frames.") + eye_data = eye_data.head(n_sync) + n_eye_frames = len(eye_data.index) + + if n_sync != n_eye_frames: raise RuntimeError(f"Error! The number of sync file frame times " f"({len(frame_times)}) does not match the " diff --git a/allensdk/brain_observatory/behavior/sync/__init__.py b/allensdk/brain_observatory/behavior/sync/__init__.py index 50610532a..f88033ec6 100644 --- a/allensdk/brain_observatory/behavior/sync/__init__.py +++ b/allensdk/brain_observatory/behavior/sync/__init__.py @@ -32,7 +32,7 @@ def get_raw_stimulus_frames( """ try: - return dataset.get_edges("falling",'stim_vsync', "seconds") + return dataset.get_edges("falling",['stim_vsync','vsync_stim'], "seconds") except KeyError: if not permissive: raise @@ -63,11 +63,15 @@ def get_ophys_frames( """ try: - return dataset.get_edges("rising", '2p_vsync', "seconds") + return dataset.get_edges("rising", ['2p_vsync','vsync_2p'], "seconds") except KeyError: + # try: + # return dataset.get_edges("rising", 'vsync_2p', "seconds") + # except KeyError: if not permissive: raise return + def get_lick_times( @@ -142,9 +146,11 @@ def get_trigger( correspond to acquired ophys frames. """ - return dataset.get_edges( - "rising", ["2p_trigger", "acq_trigger"], "seconds", permissive) - + try: + return dataset.get_edges( + "rising", ["2p_trigger", "acq_trigger", "2p_acq_trigger","stim_running"], "seconds", permissive) + except KeyError: + return None def get_eye_tracking( dataset: SyncDataset, @@ -166,7 +172,7 @@ def get_eye_tracking( """ return dataset.get_edges( - "rising", ["cam2_exposure", "eye_tracking"], "seconds", permissive) + "rising", ["cam2_exposure", "eye_tracking", "eye_frame_received"], "seconds", permissive) def get_behavior_monitoring( @@ -190,7 +196,7 @@ def get_behavior_monitoring( """ return dataset.get_edges( - "rising", ["cam1_exposure", "behavior_monitoring"], "seconds", + "rising", ["cam1_exposure", "behavior_monitoring", "beh_frame_received"], "seconds", permissive) diff --git a/allensdk/brain_observatory/behavior/visualcoding_ophys_experiment.py b/allensdk/brain_observatory/behavior/visualcoding_ophys_experiment.py new file mode 100644 index 000000000..447e5bc60 --- /dev/null +++ b/allensdk/brain_observatory/behavior/visualcoding_ophys_experiment.py @@ -0,0 +1,792 @@ +from typing import Optional + +import numpy as np +import pandas as pd +from allensdk.brain_observatory.behavior.data_objects.metadata.behavior_metadata.visualcoding_session_id import VisualCodingSessionId +from allensdk.brain_observatory.behavior.visualcoding_session import VisualCodingSession +from pynwb import NWBFile + +from allensdk.brain_observatory.behavior.behavior_session import ( + BehaviorSession) +from allensdk.brain_observatory.behavior.data_files import SyncFile +from allensdk.brain_observatory.behavior.data_files.eye_tracking_file import \ + EyeTrackingFile +from allensdk.brain_observatory.behavior.data_files\ + .rigid_motion_transform_file import \ + RigidMotionTransformFile +from allensdk.brain_observatory.behavior.data_objects import \ + BehaviorSessionId, StimulusTimestamps +from allensdk.brain_observatory.behavior.data_objects.cell_specimens \ + .cell_specimens import \ + CellSpecimens, EventsParams +from allensdk.brain_observatory.behavior.data_objects.eye_tracking\ + .eye_tracking_table import \ + EyeTrackingTable +from allensdk.brain_observatory.behavior.data_objects.eye_tracking\ + .rig_geometry import \ + RigGeometry as EyeTrackingRigGeometry +from allensdk.brain_observatory.behavior.data_objects.metadata \ + .behavior_metadata.date_of_acquisition import \ + DateOfAcquisitionOphys, DateOfAcquisition +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_ophys_metadata import \ + BehaviorOphysMetadata +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .visualcoding_ophys_metadata import \ + VisualCodingOphysMetadata +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .ophys_experiment_metadata.multi_plane_metadata.imaging_plane_group \ + import \ + ImagingPlaneGroup +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .ophys_experiment_metadata.multi_plane_metadata.multi_plane_metadata \ + import \ + MultiplaneMetadata +from allensdk.brain_observatory.behavior.data_objects.motion_correction \ + import \ + MotionCorrection +from allensdk.brain_observatory.behavior.data_objects.projections import \ + Projections +from allensdk.brain_observatory.behavior.data_objects.stimuli.util import \ + calculate_monitor_delay +from allensdk.brain_observatory.behavior.data_objects.timestamps \ + .ophys_timestamps import \ + OphysTimestamps, OphysTimestampsMultiplane +from allensdk.core.auth_config import LIMS_DB_CREDENTIAL_MAP +from allensdk.deprecated import legacy +from allensdk.brain_observatory.behavior.image_api import Image +from allensdk.internal.api import db_connection_creator +import os + +class VisualCodingOphysExperiment(VisualCodingSession): + """Represents data from a single Visual Behavior Ophys imaging session. + Initialize by using class methods `from_lims` or `from_nwb_path`. + """ + + def __init__(self, + visualcoding_session: VisualCodingSession, + projections: Projections, + ophys_timestamps: OphysTimestamps, + cell_specimens: CellSpecimens, + metadata: VisualCodingOphysMetadata, + motion_correction: MotionCorrection, + eye_tracking_table: Optional[EyeTrackingTable], + eye_tracking_rig_geometry: Optional[EyeTrackingRigGeometry], + date_of_acquisition: DateOfAcquisition): + super().__init__( + ophys_session_id=visualcoding_session._ophys_session_id, + licks=visualcoding_session._licks, + metadata=visualcoding_session._metadata, + raw_running_speed=visualcoding_session._raw_running_speed, + rewards=visualcoding_session._rewards, + running_speed=visualcoding_session._running_speed, + running_acquisition=visualcoding_session._running_acquisition, + stimuli=visualcoding_session._stimuli, + stimulus_timestamps=visualcoding_session._stimulus_timestamps, + task_parameters=visualcoding_session._task_parameters, + trials=visualcoding_session._trials, + date_of_acquisition=date_of_acquisition + ) + + self._metadata = metadata + self._projections = projections + self._ophys_timestamps = ophys_timestamps + self._cell_specimens = cell_specimens + self._motion_correction = motion_correction + self._eye_tracking = eye_tracking_table + self._eye_tracking_rig_geometry = eye_tracking_rig_geometry + + self._event_table = None + + def to_nwb(self) -> NWBFile: + nwbfile = super().to_nwb(add_metadata=False) + + self._metadata.to_nwb(nwbfile=nwbfile) + self._projections.to_nwb(nwbfile=nwbfile) + self._cell_specimens.to_nwb(nwbfile=nwbfile, + ophys_timestamps=self._ophys_timestamps) + self._motion_correction.to_nwb(nwbfile=nwbfile) + self._eye_tracking.to_nwb(nwbfile=nwbfile) + self._eye_tracking_rig_geometry.to_nwb(nwbfile=nwbfile) + + return nwbfile + # ==================== class and utility methods ====================== + + @classmethod + def from_lims(cls, + ophys_experiment_id: int, + eye_tracking_z_threshold: float = 3.0, + eye_tracking_dilation_frames: int = 2, + events_filter_scale: float = 2.0, + events_filter_n_time_steps: int = 20, + exclude_invalid_rois=True, + skip_eye_tracking=False) -> \ + "VisualCodingOphysExperiment": + """ + Parameters + ---------- + ophys_experiment_id + eye_tracking_z_threshold + See `BehaviorOphysExperiment.from_nwb` + eye_tracking_dilation_frames + See `BehaviorOphysExperiment.from_nwb` + events_filter_scale + See `BehaviorOphysExperiment.from_nwb` + events_filter_n_time_steps + See `BehaviorOphysExperiment.from_nwb` + exclude_invalid_rois + Whether to exclude invalid rois + skip_eye_tracking + Used to skip returning eye tracking data + """ + def _is_multi_plane_session(): + imaging_plane_group_meta = ImagingPlaneGroup.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db) + return cls._is_multi_plane_session( + imaging_plane_group_meta=imaging_plane_group_meta) + + def _get_motion_correction(): + rigid_motion_transform_file = RigidMotionTransformFile.from_lims( + ophys_experiment_id=ophys_experiment_id, db=lims_db + ) + return MotionCorrection.from_data_file( + rigid_motion_transform_file=rigid_motion_transform_file) + + def _get_eye_tracking_table(sync_file: SyncFile): + eye_tracking_file = EyeTrackingFile.from_lims( + db=lims_db, ophys_experiment_id=ophys_experiment_id) + eye_tracking_table = EyeTrackingTable.from_data_file( + data_file=eye_tracking_file, + sync_file=sync_file, + z_threshold=eye_tracking_z_threshold, + dilation_frames=eye_tracking_dilation_frames + ) + return eye_tracking_table + + lims_db = db_connection_creator( + fallback_credentials=LIMS_DB_CREDENTIAL_MAP + ) + sync_file = SyncFile.from_lims(db=lims_db, + ophys_experiment_id=ophys_experiment_id) + stimulus_timestamps = StimulusTimestamps.from_sync_file( + sync_file=sync_file) + ophys_session_id = VisualCodingSessionId.from_lims(db=lims_db, ophys_experiment_id=ophys_experiment_id) + is_multiplane_session = _is_multi_plane_session() + meta = VisualCodingOphysMetadata.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db, + is_multiplane=is_multiplane_session + ) + monitor_delay = calculate_monitor_delay( + sync_file=sync_file, equipment=meta.visualcoding_metadata.equipment) + date_of_acquisition = DateOfAcquisitionOphys.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db) + visualcoding_session = VisualCodingSession.from_lims( + lims_db=lims_db, + ophys_session_id=ophys_session_id.value, + stimulus_timestamps=stimulus_timestamps, + monitor_delay=monitor_delay, + date_of_acquisition=date_of_acquisition + ) + if is_multiplane_session: + ophys_timestamps = OphysTimestampsMultiplane.from_sync_file( + sync_file=sync_file, + group_count=meta.ophys_metadata.imaging_plane_group_count, + plane_group=meta.ophys_metadata.imaging_plane_group + ) + else: + ophys_timestamps = OphysTimestamps.from_sync_file( + sync_file=sync_file) + + projections = Projections.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db) + # cell_specimens = CellSpecimens.from_lims( + # ophys_experiment_id=ophys_experiment_id, lims_db=lims_db, + # ophys_timestamps=ophys_timestamps, + # segmentation_mask_image_spacing=projections.max_projection.spacing, + # events_params=EventsParams( + # filter_scale=events_filter_scale, + # filter_n_time_steps=events_filter_n_time_steps), + # exclude_invalid_rois=exclude_invalid_rois + # ) + cell_specimens = CellSpecimens.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db, + ophys_timestamps=ophys_timestamps, + segmentation_mask_image_spacing=projections.max_projection.spacing, + events_params=EventsParams( + filter_scale=events_filter_scale, + filter_n_time_steps=events_filter_n_time_steps), + exclude_invalid_rois=exclude_invalid_rois, + include_events=False + ) + motion_correction = _get_motion_correction() + if skip_eye_tracking: + eye_tracking_table = None + eye_tracking_rig_geometry = None + else: + eye_tracking_table = _get_eye_tracking_table(sync_file=sync_file) + eye_tracking_rig_geometry = EyeTrackingRigGeometry.from_lims( + ophys_experiment_id=ophys_experiment_id, lims_db=lims_db) + + return VisualCodingOphysExperiment( + visualcoding_session=visualcoding_session, + cell_specimens=cell_specimens, + ophys_timestamps=ophys_timestamps, + metadata=meta, + projections=projections, + motion_correction=motion_correction, + eye_tracking_table=eye_tracking_table, + eye_tracking_rig_geometry=eye_tracking_rig_geometry, + date_of_acquisition=date_of_acquisition + ) + + @classmethod + def from_nwb(cls, nwbfile: NWBFile, + eye_tracking_z_threshold: float = 3.0, + eye_tracking_dilation_frames: int = 2, + events_filter_scale: float = 2.0, + events_filter_n_time_steps: int = 20, + exclude_invalid_rois=True + ) -> "BehaviorOphysExperiment": + """ + + Parameters + ---------- + nwbfile + eye_tracking_z_threshold : float, optional + The z-threshold when determining which frames likely contain + outliers for eye or pupil areas. Influences which frames + are considered 'likely blinks'. By default 3.0 + eye_tracking_dilation_frames : int, optional + Determines the number of adjacent frames that will be marked + as 'likely_blink' when performing blink detection for + `eye_tracking` data, by default 2 + events_filter_scale : float, optional + Stdev of halfnorm distribution used to convolve ophys events with + a 1d causal half-gaussian filter to smooth it for visualization, + by default 2.0 + events_filter_n_time_steps : int, optional + Number of time steps to use for convolution of ophys events + exclude_invalid_rois + Whether to exclude invalid rois + """ + def _is_multi_plane_session(): + imaging_plane_group_meta = ImagingPlaneGroup.from_nwb( + nwbfile=nwbfile) + return cls._is_multi_plane_session( + imaging_plane_group_meta=imaging_plane_group_meta) + + behavior_session = BehaviorSession.from_nwb(nwbfile=nwbfile) + projections = Projections.from_nwb(nwbfile=nwbfile) + cell_specimens = CellSpecimens.from_nwb( + nwbfile=nwbfile, + segmentation_mask_image_spacing=projections.max_projection.spacing, + events_params=EventsParams( + filter_scale=events_filter_scale, + filter_n_time_steps=events_filter_n_time_steps + ), + exclude_invalid_rois=exclude_invalid_rois + ) + eye_tracking_rig_geometry = EyeTrackingRigGeometry.from_nwb( + nwbfile=nwbfile) + eye_tracking_table = EyeTrackingTable.from_nwb( + nwbfile=nwbfile, z_threshold=eye_tracking_z_threshold, + dilation_frames=eye_tracking_dilation_frames) + motion_correction = MotionCorrection.from_nwb(nwbfile=nwbfile) + is_multiplane_session = _is_multi_plane_session() + metadata = BehaviorOphysMetadata.from_nwb( + nwbfile=nwbfile, is_multiplane=is_multiplane_session) + if is_multiplane_session: + ophys_timestamps = OphysTimestampsMultiplane.from_nwb( + nwbfile=nwbfile) + else: + ophys_timestamps = OphysTimestamps.from_nwb(nwbfile=nwbfile) + date_of_acquisition = DateOfAcquisitionOphys.from_nwb(nwbfile=nwbfile) + + return BehaviorOphysExperiment( + behavior_session=behavior_session, + cell_specimens=cell_specimens, + eye_tracking_rig_geometry=eye_tracking_rig_geometry, + eye_tracking_table=eye_tracking_table, + motion_correction=motion_correction, + metadata=metadata, + ophys_timestamps=ophys_timestamps, + projections=projections, + date_of_acquisition=date_of_acquisition + ) + + @classmethod + def from_json(cls, + session_data: dict, + eye_tracking_z_threshold: float = 3.0, + eye_tracking_dilation_frames: int = 2, + events_filter_scale: float = 2.0, + events_filter_n_time_steps: int = 20, + exclude_invalid_rois=True, + skip_eye_tracking=False) -> \ + "BehaviorOphysExperiment": + """ + + Parameters + ---------- + session_data + eye_tracking_z_threshold + See `BehaviorOphysExperiment.from_nwb` + eye_tracking_dilation_frames + See `BehaviorOphysExperiment.from_nwb` + events_filter_scale + See `BehaviorOphysExperiment.from_nwb` + events_filter_n_time_steps + See `BehaviorOphysExperiment.from_nwb` + exclude_invalid_rois + Whether to exclude invalid rois + skip_eye_tracking + Used to skip returning eye tracking data + + """ + def _is_multi_plane_session(): + imaging_plane_group_meta = ImagingPlaneGroup.from_json( + dict_repr=session_data) + return cls._is_multi_plane_session( + imaging_plane_group_meta=imaging_plane_group_meta) + + def _get_motion_correction(): + rigid_motion_transform_file = RigidMotionTransformFile.from_json( + dict_repr=session_data) + return MotionCorrection.from_data_file( + rigid_motion_transform_file=rigid_motion_transform_file) + + def _get_eye_tracking_table(sync_file: SyncFile): + eye_tracking_file = EyeTrackingFile.from_json( + dict_repr=session_data) + eye_tracking_table = EyeTrackingTable.from_data_file( + data_file=eye_tracking_file, + sync_file=sync_file, + z_threshold=eye_tracking_z_threshold, + dilation_frames=eye_tracking_dilation_frames + ) + return eye_tracking_table + + sync_file = SyncFile.from_json(dict_repr=session_data) + is_multiplane_session = _is_multi_plane_session() + meta = BehaviorOphysMetadata.from_json( + dict_repr=session_data, is_multiplane=is_multiplane_session) + monitor_delay = calculate_monitor_delay( + sync_file=sync_file, equipment=meta.behavior_metadata.equipment) + behavior_session = BehaviorSession.from_json( + session_data=session_data, + monitor_delay=monitor_delay + ) + + if is_multiplane_session: + ophys_timestamps = OphysTimestampsMultiplane.from_sync_file( + sync_file=sync_file, + group_count=meta.ophys_metadata.imaging_plane_group_count, + plane_group=meta.ophys_metadata.imaging_plane_group + ) + else: + ophys_timestamps = OphysTimestamps.from_sync_file( + sync_file=sync_file) + + projections = Projections.from_json(dict_repr=session_data) + cell_specimens = CellSpecimens.from_json( + dict_repr=session_data, + ophys_timestamps=ophys_timestamps, + segmentation_mask_image_spacing=projections.max_projection.spacing, + events_params=EventsParams( + filter_scale=events_filter_scale, + filter_n_time_steps=events_filter_n_time_steps), + exclude_invalid_rois=exclude_invalid_rois + ) + motion_correction = _get_motion_correction() + if skip_eye_tracking: + eye_tracking_table = None + eye_tracking_rig_geometry = None + else: + eye_tracking_table = _get_eye_tracking_table(sync_file=sync_file) + eye_tracking_rig_geometry = EyeTrackingRigGeometry.from_json( + dict_repr=session_data) + + return BehaviorOphysExperiment( + behavior_session=behavior_session, + cell_specimens=cell_specimens, + ophys_timestamps=ophys_timestamps, + metadata=meta, + projections=projections, + motion_correction=motion_correction, + eye_tracking_table=eye_tracking_table, + eye_tracking_rig_geometry=eye_tracking_rig_geometry, + date_of_acquisition=behavior_session._date_of_acquisition + ) + + # ========================= 'get' methods ========================== + + def get_segmentation_mask_image(self) -> Image: + """a 2D binary image of all valid cell masks + + Returns + ---------- + allensdk.brain_observatory.behavior.image_api.Image: + array-like interface to segmentation_mask image data and + metadata + """ + return self._cell_specimens.segmentation_mask_image + + @legacy('Consider using "dff_traces" instead.') + def get_dff_traces(self, cell_specimen_ids=None): + + if cell_specimen_ids is None: + cell_specimen_ids = self.get_cell_specimen_ids() + + csid_table = \ + self.cell_specimen_table.reset_index()[['cell_specimen_id']] + csid_subtable = csid_table[csid_table['cell_specimen_id'].isin( + cell_specimen_ids)].set_index('cell_specimen_id') + dff_table = csid_subtable.join(self.dff_traces, how='left') + dff_traces = np.vstack(dff_table['dff'].values) + timestamps = self.ophys_timestamps + + assert (len(cell_specimen_ids), len(timestamps)) == dff_traces.shape + return timestamps, dff_traces + + @legacy() + def get_cell_specimen_indices(self, cell_specimen_ids): + return [self.cell_specimen_table.index.get_loc(csid) + for csid in cell_specimen_ids] + + @legacy("Consider using cell_specimen_table['cell_specimen_id'] instead.") + def get_cell_specimen_ids(self): + cell_specimen_ids = self.cell_specimen_table.index.values + + if np.isnan(cell_specimen_ids.astype(float)).sum() == \ + len(self.cell_specimen_table): + raise ValueError("cell_specimen_id values not assigned " + f"for {self.ophys_experiment_id}") + return cell_specimen_ids + + # ====================== properties ======================== + + @property + def ophys_experiment_id(self) -> int: + """Unique identifier for this experimental session. + :rtype: int + """ + return self._metadata.ophys_metadata.ophys_experiment_id + + @property + def ophys_session_id(self) -> int: + """Unique identifier for this ophys session. + :rtype: int + """ + return self._metadata.ophys_metadata.ophys_session_id + + @property + def metadata(self): + visualcoding_meta = super()._get_metadata( + visualcoding_metadata=self._metadata.visualcoding_metadata) + ophys_meta = { + 'indicator': self._cell_specimens.meta.imaging_plane.indicator, + 'emission_lambda': self._cell_specimens.meta.emission_lambda, + 'excitation_lambda': + self._cell_specimens.meta.imaging_plane.excitation_lambda, + 'experiment_container_id': + self._metadata.ophys_metadata.experiment_container_id, + 'field_of_view_height': + self._metadata.ophys_metadata.field_of_view_shape.height, + 'field_of_view_width': + self._metadata.ophys_metadata.field_of_view_shape.width, + 'imaging_depth': self._metadata.ophys_metadata.imaging_depth, + 'imaging_plane_group': + self._metadata.ophys_metadata.imaging_plane_group + if isinstance(self._metadata.ophys_metadata, + MultiplaneMetadata) else None, + 'imaging_plane_group_count': + self._metadata.ophys_metadata.imaging_plane_group_count + if isinstance(self._metadata.ophys_metadata, + MultiplaneMetadata) else 0, + 'ophys_experiment_id': + self._metadata.ophys_metadata.ophys_experiment_id, + 'ophys_frame_rate': + self._cell_specimens.meta.imaging_plane.ophys_frame_rate, + 'ophys_session_id': self._metadata.ophys_metadata.ophys_session_id, + 'project_code': self._metadata.ophys_metadata.project_code, + 'targeted_structure': + self._cell_specimens.meta.imaging_plane.targeted_structure + } + return { + **visualcoding_meta, + **ophys_meta + } + + @property + def max_projection(self) -> Image: + """2D max projection image. + :rtype: allensdk.brain_observatory.behavior.image_api.Image + """ + return self._projections.max_projection + + @property + def average_projection(self) -> Image: + """2D image of the microscope field of view, averaged across the + experiment + :rtype: allensdk.brain_observatory.behavior.image_api.Image + """ + return self._projections.avg_projection + + @property + def ophys_timestamps(self) -> np.ndarray: + """Timestamps associated with frames captured by the microscope + :rtype: numpy.ndarray + """ + return self._ophys_timestamps.value + + @property + def dff_traces(self) -> pd.DataFrame: + """traces of change in fluoescence / fluorescence + + Returns + ------- + pd.DataFrame + dataframe of traces of dff + (change in fluorescence / fluorescence) + + dataframe columns: + cell_specimen_id [index]: (int) + unified id of segmented cell across experiments + assigned after cell matching + cell_roi_id: (int) + experiment specific id of segmented roi, + assigned before cell matching + dff: (list of float) + fluorescence fractional values relative to baseline + (arbitrary units) + + """ + return self._cell_specimens.dff_traces + + @property + def events(self) -> pd.DataFrame: + """A dataframe containing spiking events in traces derived + from the two photon movies, organized by cell specimen id. + For more information on event detection processing + please see the event detection portion of the white paper. + + Returns + ------- + pd.DataFrame + cell_specimen_id [index]: (int) + unified id of segmented cell across experiments + (assigned after cell matching) + cell_roi_id: (int) + experiment specific id of segmented roi (assigned + before cell matching) + events: (np.array of float) + event trace where events correspond to the rise time + of a calcium transient in the dF/F trace, with a + magnitude roughly proportional the magnitude of the + increase in dF/F. + filtered_events: (np.array of float) + Events array with a 1d causal half-gaussian filter to + smooth it for visualization. Uses a halfnorm + distribution as weights to the filter + lambdas: (float64) + regularization value selected to make the minimum + event size be close to N * noise_std + noise_stds: (float64) + estimated noise standard deviation for the events trace + + """ + + if self._event_table is None: + events_file_name = 'events_' + str(self.ophys_experiment_id) + '.npz' + events_file_path = os.path.join('/allen/programs/mindscope/workgroups/task-trained/michaelbu/signal_noise/events', events_file_name) + + events_data = np.load(events_file_path, allow_pickle=True) + + self.event_min_size = events_data['event_min_size'] + + event_table = self.dff_traces.copy() + + event_table['events'] = [row for row in events_data['events']] + + event_table['noise_std'] = events_data['noise_stds'] + + event_table['lambda'] = events_data['lambdas'] + + + self._event_table = event_table + + return self._event_table + + + @property + def cell_specimen_table(self) -> pd.DataFrame: + """Cell information organized into a dataframe. Table only + contains roi_valid = True entries, as invalid ROIs/ non cell + segmented objects have been filtered out + + Returns + ------- + pd.DataFrame + dataframe columns: + cell_specimen_id [index]: (int) + unified id of segmented cell across experiments + (assigned after cell matching) + cell_roi_id: (int) + experiment specific id of segmented roi + (assigned before cell matching) + height: (int) + height of ROI/cell in pixels + mask_image_plane: (int) + which image plane an ROI resides on. Overlapping + ROIs are stored on different mask image planes + max_corretion_down: (float) + max motion correction in down direction in pixels + max_correction_left: (float) + max motion correction in left direction in pixels + max_correction_right: (float) + max motion correction in right direction in pixels + max_correction_up: (float) + max motion correction in up direction in pixels + roi_mask: (array of bool) + an image array that displays the location of the + roi mask in the field of view + valid_roi: (bool) + indicates if cell classification found the segmented + ROI to be a cell or not (True = cell, False = not cell). + width: (int) + width of ROI in pixels + x: (float) + x position of ROI in field of view in pixels (top + left corner) + y: (float) + y position of ROI in field of view in pixels (top + left corner) + """ + return self._cell_specimens.table + + @property + def corrected_fluorescence_traces(self) -> pd.DataFrame: + """Corrected fluorescence traces which are neuropil corrected + and demixed. Sampling rate can be found in metadata + ‘ophys_frame_rate’ + + Returns + ------- + pd.DataFrame + Dataframe that contains the corrected fluorescence traces + for all valid cells. + + dataframe columns: + cell_specimen_id [index]: (int) + unified id of segmented cell across experiments + (assigned after cell matching) + cell_roi_id: (int) + experiment specific id of segmented roi + (assigned before cell matching) + corrected_fluorescence: (list of float) + fluorescence values (arbitrary units) + + """ + return self._cell_specimens.corrected_fluorescence_traces + + @property + def motion_correction(self) -> pd.DataFrame: + """a dataframe containing the x and y offsets applied during + motion correction + + Returns + ------- + pd.DataFrame + dataframe columns: + x: (int) + frame shift along x axis + y: (int) + frame shift along y axis + """ + return self._motion_correction.value + + @property + def segmentation_mask_image(self) -> Image: + """A 2d binary image of all valid cell masks + :rtype: allensdk.brain_observatory.behavior.image_api.Image + """ + return self._cell_specimens.segmentation_mask_image + + @property + def eye_tracking(self) -> pd.DataFrame: + """A dataframe containing ellipse fit parameters for the eye, pupil + and corneal reflection (cr). Fits are derived from tracking points + from a DeepLabCut model applied to video frames of a subject's + right eye. Raw tracking points and raw video frames are not exposed + by the SDK. + + Notes: + - All columns starting with 'pupil_' represent ellipse fit parameters + relating to the pupil. + - All columns starting with 'eye_' represent ellipse fit parameters + relating to the eyelid. + - All columns starting with 'cr_' represent ellipse fit parameters + relating to the corneal reflection, which is caused by an infrared + LED positioned near the eye tracking camera. + - All positions are in units of pixels. + - All areas are in units of pixels^2 + - All values are in the coordinate space of the eye tracking camera, + NOT the coordinate space of the stimulus display (i.e. this is not + gaze location), with (0, 0) being the upper-left corner of the + eye-tracking image. + - The 'likely_blink' column is True for any row (frame) where the pupil + fit failed OR eye fit failed OR an outlier fit was identified on the + pupil or eye fit. + - The pupil_area, cr_area, eye_area columns are set to NaN wherever + 'likely_blink' == True. + - The pupil_area_raw, cr_area_raw, eye_area_raw columns contains all + pupil fit values (including where 'likely_blink' == True). + - All ellipse fits are derived from tracking points that were output by + a DeepLabCut model that was trained on hand-annotated data from a + subset of imaging sessions on optical physiology rigs. + - Raw DeepLabCut tracking points are not publicly available. + + :rtype: pandas.DataFrame + """ + return self._eye_tracking.value + + @property + def eye_tracking_rig_geometry(self) -> dict: + """the eye tracking equipment geometry associate with a + given ophys experiment session. + + Returns + ------- + dict + dictionary with the following keys: + camera_eye_position_mm (array of float) + camera_rotation_deg (array of float) + equipment (string) + led_position (array of float) + monitor_position_mm (array of float) + monitor_rotation_deg (array of float) + """ + return self._eye_tracking_rig_geometry.to_dict()['rig_geometry'] + + @property + def roi_masks(self) -> pd.DataFrame: + return self.cell_specimen_table[['cell_roi_id', 'roi_mask']] + + def _get_identifier(self) -> str: + return str(self.ophys_experiment_id) + + @staticmethod + def _is_multi_plane_session( + imaging_plane_group_meta: ImagingPlaneGroup) -> bool: + """Returns whether this experiment is part of a multiplane session""" + return imaging_plane_group_meta is not None and \ + imaging_plane_group_meta.plane_group_count > 1 + + def _get_session_type(self) -> str: + return self._metadata.behavior_metadata.session_type + + @staticmethod + def _get_keywords(): + """Keywords for NWB file""" + return ["2-photon", "calcium imaging", "visual cortex", + "behavior", "task"] diff --git a/allensdk/brain_observatory/behavior/visualcoding_session.py b/allensdk/brain_observatory/behavior/visualcoding_session.py new file mode 100644 index 000000000..69cdf5d30 --- /dev/null +++ b/allensdk/brain_observatory/behavior/visualcoding_session.py @@ -0,0 +1,987 @@ +import datetime +from typing import Any, List, Dict, Optional +from allensdk.brain_observatory.behavior.data_objects import task_parameters +import pynwb +import pandas as pd +import numpy as np +import pytz + +from pynwb import NWBFile + +from allensdk.brain_observatory.behavior.data_files import StimulusFile, SyncFile +from allensdk.brain_observatory.behavior.data_objects.base \ + .readable_interfaces import \ + JsonReadableInterface, NwbReadableInterface, \ + LimsReadableInterface +from allensdk.brain_observatory.behavior.data_objects.base \ + .writable_interfaces import \ + NwbWritableInterface +from allensdk.brain_observatory.behavior.data_objects.licks import Licks +from allensdk.brain_observatory.behavior.data_objects.metadata \ + .behavior_metadata.behavior_metadata import \ + BehaviorMetadata, get_expt_description +from allensdk.brain_observatory.behavior.data_objects.metadata \ + .behavior_metadata.visualcoding_metadata import \ + VisualCodingMetadata +from allensdk.brain_observatory.behavior.data_objects.metadata\ + .behavior_metadata.date_of_acquisition import \ + DateOfAcquisition +from allensdk.brain_observatory.behavior.data_objects.rewards import Rewards +# from allensdk.brain_observatory.behavior.data_objects.stimuli.stimuli import \ +# Stimuli +from allensdk.brain_observatory.behavior.data_objects.stimuli.densemovie_stimuli import \ + DenseMovieStimuli +from allensdk.brain_observatory.behavior.data_objects.stimuli.densemovie_presentations import \ + shorten_stimulus_presentation +from allensdk.brain_observatory.behavior.data_objects.task_parameters import \ + TaskParameters +from allensdk.brain_observatory.behavior.data_objects.trials.trial_table \ + import \ + TrialTable +from allensdk.brain_observatory.behavior.trials_processing import ( + construct_rolling_performance_df, calculate_reward_rate_fix_nans) +from allensdk.brain_observatory.behavior.data_objects import ( + VisualCodingSessionId, StimulusTimestamps, RunningSpeed, RunningAcquisition, + DataObject +) + +from allensdk.core.auth_config import LIMS_DB_CREDENTIAL_MAP +from allensdk.internal.api import db_connection_creator, PostgresQueryMixin + + +class VisualCodingSession(DataObject, LimsReadableInterface, + NwbReadableInterface, + JsonReadableInterface, NwbWritableInterface): + """Represents data from a single Visual Behavior behavior session. + Initialize by using class methods `from_lims` or `from_nwb_path`. + """ + def __init__( + self, + ophys_session_id: VisualCodingSessionId, + stimulus_timestamps: StimulusTimestamps, + running_acquisition: RunningAcquisition, + raw_running_speed: RunningSpeed, + running_speed: RunningSpeed, + licks: Licks, + rewards: Rewards, + stimuli: DenseMovieStimuli, + task_parameters: TaskParameters, + trials: TrialTable, + metadata: VisualCodingMetadata, + date_of_acquisition: DateOfAcquisition + ): + super().__init__(name='ophys_session', value=self) + + self._ophys_session_id = ophys_session_id + self._licks = licks + self._rewards = rewards + self._running_acquisition = running_acquisition + self._running_speed = running_speed + self._raw_running_speed = raw_running_speed + self._stimuli = stimuli + self._stimulus_timestamps = stimulus_timestamps + self._task_parameters = task_parameters + self._trials = trials + self._metadata = metadata + self._date_of_acquisition = date_of_acquisition + + # ==================== class and utility methods ====================== + + @classmethod + def from_json(cls, + session_data: dict, + monitor_delay: Optional[float] = None) \ + -> "VisualCodingSession": + """ + + Parameters + ---------- + session_data + Dict of input data necessary to construct a session + monitor_delay + Monitor delay. If not provided, will use an estimate. + To provide this value, see for example + allensdk.brain_observatory.behavior.data_objects.stimuli.util. + calculate_monitor_delay + + Returns + ------- + `BehaviorSession` instance + + """ + behavior_session_id = BehaviorSessionId.from_json( + dict_repr=session_data) + stimulus_file = StimulusFile.from_json(dict_repr=session_data) + stimulus_timestamps = StimulusTimestamps.from_json( + dict_repr=session_data) + running_acquisition = RunningAcquisition.from_json( + dict_repr=session_data) + raw_running_speed = RunningSpeed.from_json( + dict_repr=session_data, filtered=False + ) + running_speed = RunningSpeed.from_json(dict_repr=session_data) + metadata = BehaviorMetadata.from_json(dict_repr=session_data) + + if monitor_delay is None: + monitor_delay = cls._get_monitor_delay() + + licks, rewards, stimuli, task_parameters, trials = \ + cls._read_data_from_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps, + trial_monitor_delay=monitor_delay + ) + date_of_acquisition = DateOfAcquisition.from_json( + dict_repr=session_data)\ + .validate( + stimulus_file=stimulus_file, + behavior_session_id=behavior_session_id.value) + + return BehaviorSession( + behavior_session_id=behavior_session_id, + stimulus_timestamps=stimulus_timestamps, + running_acquisition=running_acquisition, + raw_running_speed=raw_running_speed, + running_speed=running_speed, + metadata=metadata, + licks=licks, + rewards=rewards, + stimuli=stimuli, + task_parameters=task_parameters, + trials=trials, + date_of_acquisition=date_of_acquisition + ) + + @classmethod + def from_lims(cls, ophys_session_id: int, + lims_db: Optional[PostgresQueryMixin] = None, + stimulus_timestamps: Optional[StimulusTimestamps] = None, + monitor_delay: Optional[float] = None, + date_of_acquisition: Optional[DateOfAcquisition] = None) \ + -> "VisualCodingSession": + """ + + Parameters + ---------- + ophys_session_id + ophys session id + lims_db + Database connection. If not provided will create a new one. + stimulus_timestamps + Stimulus timestamps. If not provided, will calculate stimulus + timestamps from stimulus file. + monitor_delay + Monitor delay. If not provided, will use an estimate. + To provide this value, see for example + allensdk.brain_observatory.behavior.data_objects.stimuli.util. + calculate_monitor_delay + date_of_acquisition + Date of acquisition. If not provided, will read from + behavior_sessions table. + Returns + ------- + `BehaviorSession` instance + """ + if lims_db is None: + lims_db = db_connection_creator( + fallback_credentials=LIMS_DB_CREDENTIAL_MAP + ) + + ophys_session_id = VisualCodingSessionId(ophys_session_id) + stimulus_file = StimulusFile.from_lims_for_ophys_session( + db=lims_db, ophys_session_id=ophys_session_id.value) + + # should use syncfile + if stimulus_timestamps is None: + sync_file = SyncFile.from_lims_for_ophys_session(db=lims_db, ophys_session_id=ophys_session_id.value) + stimulus_timestamps = StimulusTimestamps.from_sync_file( + sync_file=sync_file) + + running_acquisition = RunningAcquisition.from_lims_for_ophys_session( + lims_db, ophys_session_id.value + ) + raw_running_speed = RunningSpeed.from_lims_for_ophys_session( + lims_db, ophys_session_id.value, filtered=False, + stimulus_timestamps=stimulus_timestamps + ) + running_speed = RunningSpeed.from_lims_for_ophys_session( + lims_db, ophys_session_id.value, + stimulus_timestamps=stimulus_timestamps + ) + visualcoding_metadata = VisualCodingMetadata.from_lims( + ophys_session_id=ophys_session_id, lims_db=lims_db + ) + + if monitor_delay is None: + monitor_delay = cls._get_monitor_delay() + + # licks, rewards, stimuli, task_parameters, trials = \ + # cls._read_data_from_stimulus_file( + # stimulus_file=stimulus_file, + # stimulus_timestamps=stimulus_timestamps, + # trial_monitor_delay=monitor_delay + # ) + licks = None + rewards = None + # stimuli = None + stimuli = cls._read_data_from_visualcoding_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps) + task_parameters = None + trials = None + if date_of_acquisition is None: + date_of_acquisition = DateOfAcquisition.from_lims_for_ophys_session( + ophys_session_id=ophys_session_id.value, lims_db=lims_db) + # date_of_acquisition = date_of_acquisition.validate( + # stimulus_file=stimulus_file, + # behavior_session_id=behavior_session_id.value) + + return VisualCodingSession( + ophys_session_id=ophys_session_id, + stimulus_timestamps=stimulus_timestamps, + running_acquisition=running_acquisition, + raw_running_speed=raw_running_speed, + running_speed=running_speed, + metadata=visualcoding_metadata, + licks=licks, + rewards=rewards, + stimuli=stimuli, + task_parameters=task_parameters, + trials=trials, + date_of_acquisition=date_of_acquisition + ) + + @classmethod + def from_nwb(cls, nwbfile: NWBFile, **kwargs) -> "BehaviorSession": + behavior_session_id = BehaviorSessionId.from_nwb(nwbfile) + stimulus_timestamps = StimulusTimestamps.from_nwb(nwbfile) + running_acquisition = RunningAcquisition.from_nwb(nwbfile) + raw_running_speed = RunningSpeed.from_nwb(nwbfile, filtered=False) + running_speed = RunningSpeed.from_nwb(nwbfile) + metadata = BehaviorMetadata.from_nwb(nwbfile) + licks = Licks.from_nwb(nwbfile=nwbfile) + rewards = Rewards.from_nwb(nwbfile=nwbfile) + stimuli = Stimuli.from_nwb(nwbfile=nwbfile) + task_parameters = TaskParameters.from_nwb(nwbfile=nwbfile) + trials = TrialTable.from_nwb(nwbfile=nwbfile) + date_of_acquisition = DateOfAcquisition.from_nwb(nwbfile=nwbfile) + + return BehaviorSession( + behavior_session_id=behavior_session_id, + stimulus_timestamps=stimulus_timestamps, + running_acquisition=running_acquisition, + raw_running_speed=raw_running_speed, + running_speed=running_speed, + metadata=metadata, + licks=licks, + rewards=rewards, + stimuli=stimuli, + task_parameters=task_parameters, + trials=trials, + date_of_acquisition=date_of_acquisition + ) + + @classmethod + def from_nwb_path(cls, nwb_path: str, **kwargs) -> "BehaviorSession": + """ + + Parameters + ---------- + nwb_path + Path to nwb file + kwargs + Kwargs to be passed to `from_nwb` + + Returns + ------- + An instantiation of a `BehaviorSession` + """ + with pynwb.NWBHDF5IO(str(nwb_path), 'r') as read_io: + nwbfile = read_io.read() + return cls.from_nwb(nwbfile=nwbfile, **kwargs) + + def to_nwb(self, add_metadata=True) -> NWBFile: + """ + + Parameters + ---------- + add_metadata + Set this to False to prevent adding metadata to the nwb + instance. + """ + nwbfile = NWBFile( + session_description=self._get_session_type(), + identifier=self._get_identifier(), + session_start_time=self._date_of_acquisition.value, + file_create_date=pytz.utc.localize(datetime.datetime.now()), + institution="Allen Institute for Brain Science", + keywords=self._get_keywords(), + experiment_description=get_expt_description( + session_type=self._get_session_type()) + ) + + self._stimulus_timestamps.to_nwb(nwbfile=nwbfile) + self._running_acquisition.to_nwb(nwbfile=nwbfile) + self._raw_running_speed.to_nwb(nwbfile=nwbfile) + self._running_speed.to_nwb(nwbfile=nwbfile) + + if add_metadata: + self._metadata.to_nwb(nwbfile=nwbfile) + + self._licks.to_nwb(nwbfile=nwbfile) + self._rewards.to_nwb(nwbfile=nwbfile) + self._stimuli.to_nwb(nwbfile=nwbfile) + self._task_parameters.to_nwb(nwbfile=nwbfile) + self._trials.to_nwb(nwbfile=nwbfile) + + return nwbfile + + def list_data_attributes_and_methods(self) -> List[str]: + """Convenience method for end-users to list attributes and methods + that can be called to access data for a BehaviorSession. + + NOTE: Because BehaviorOphysExperiment inherits from BehaviorSession, + this method will also be available there. + + Returns + ------- + List[str] + A list of attributes and methods that end-users can access or call + to get data. + """ + attrs_and_methods_to_ignore: set = { + "from_json", + "from_lims", + "from_nwb_path", + "list_data_attributes_and_methods" + } + attrs_and_methods_to_ignore.update(dir(NwbReadableInterface)) + attrs_and_methods_to_ignore.update(dir(NwbWritableInterface)) + attrs_and_methods_to_ignore.update(dir(DataObject)) + class_dir = dir(self) + attrs_and_methods = [ + r for r in class_dir + if (r not in attrs_and_methods_to_ignore and not r.startswith("_")) + ] + return attrs_and_methods + + # ========================= 'get' methods ========================== + + def get_reward_rate(self) -> np.ndarray: + """ Get the reward rate of the subject for the task calculated over a + 25 trial rolling window and provides a measure of the rewards + earned per unit time (in units of rewards/minute). + + Returns + ------- + np.ndarray + The reward rate (rewards/minute) of the subject for the + task calculated over a 25 trial rolling window. + """ + return calculate_reward_rate_fix_nans( + self.trials, + self.task_parameters['response_window_sec'][0]) + + def get_rolling_performance_df(self) -> pd.DataFrame: + """Return a DataFrame containing trial by trial behavior response + performance metrics. + + Returns + ------- + pd.DataFrame + A pandas DataFrame containing: + trials_id [index]: (int) + Index of the trial. All trials, including aborted trials, + are assigned an index starting at 0 for the first trial. + reward_rate: (float) + Rewards earned in the previous 25 trials, normalized by + the elapsed time of the same 25 trials. Units are + rewards/minute. + hit_rate_raw: (float) + Fraction of go trials where the mouse licked in the + response window, calculated over the previous 100 + non-aborted trials. Without trial count correction applied. + hit_rate: (float) + Fraction of go trials where the mouse licked in the + response window, calculated over the previous 100 + non-aborted trials. With trial count correction applied. + false_alarm_rate_raw: (float) + Fraction of catch trials where the mouse licked in the + response window, calculated over the previous 100 + non-aborted trials. Without trial count correction applied. + false_alarm_rate: (float) + Fraction of catch trials where the mouse licked in + the response window, calculated over the previous 100 + non-aborted trials. Without trial count correction applied. + rolling_dprime: (float) + d prime calculated using the rolling hit_rate and + rolling false_alarm _rate. + + """ + return construct_rolling_performance_df( + self.trials, + self.task_parameters['response_window_sec'][0], + self.task_parameters["session_type"]) + + def get_performance_metrics( + self, + engaged_trial_reward_rate_threshold: float = 2.0 + ) -> dict: + """Get a dictionary containing a subject's behavior response + summary data. + + Parameters + ---------- + engaged_trial_reward_rate_threshold : float, optional + The number of rewards per minute that needs to be attained + before a subject is considered 'engaged', by default 2.0 + + Returns + ------- + dict + Returns a dict of performance metrics with the following fields: + trial_count: (int) + The length of the trial dataframe + (including all 'go', 'catch', and 'aborted' trials) + go_trial_count: (int) + Number of 'go' trials in a behavior session + catch_trial_count: (int) + Number of 'catch' trial types during a behavior session + hit_trial_count: (int) + Number of trials with a hit behavior response + type in a behavior session + miss_trial_count: (int) + Number of trials with a miss behavior response + type in a behavior session + false_alarm_trial_count: (int) + Number of trials where the mouse had a false alarm + behavior response + correct_reject_trial_count: (int) + Number of trials with a correct reject behavior + response during a behavior session + auto_reward_count: + Number of trials where the mouse received an auto + reward of water. + earned_reward_count: + Number of trials where the mouse was eligible to receive a + water reward ('go' trials) and did receive an earned + water reward + total_reward_count: + Number of trials where the mouse received a + water reward (earned or auto rewarded) + total_reward_volume: (float) + Volume of all water rewards received during a + behavior session (earned and auto rewarded) + maximum_reward_rate: (float) + The peak of the rolling reward rate (rewards/minute) + engaged_trial_count: (int) + Number of trials where the mouse is engaged + (reward rate > 2 rewards/minute) + mean_hit_rate: (float) + The mean of the rolling hit_rate + mean_hit_rate_uncorrected: + The mean of the rolling hit_rate_raw + mean_hit_rate_engaged: (float) + The mean of the rolling hit_rate, excluding epochs + when the rolling reward rate was below 2 rewards/minute + mean_false_alarm_rate: (float) + The mean of the rolling false_alarm_rate, excluding + epochs when the rolling reward rate was below 2 + rewards/minute + mean_false_alarm_rate_uncorrected: (float) + The mean of the rolling false_alarm_rate_raw + mean_false_alarm_rate_engaged: (float) + The mean of the rolling false_alarm_rate, + excluding epochs when the rolling reward rate + was below 2 rewards/minute + mean_dprime: (float) + The mean of the rolling d_prime + mean_dprime_engaged: (float) + The mean of the rolling d_prime, excluding + epochs when the rolling reward rate was + below 2 rewards/minute + max_dprime: (float) + The peak of the rolling d_prime + max_dprime_engaged: (float) + The peak of the rolling d_prime, excluding epochs + when the rolling reward rate was below 2 rewards/minute + """ + performance_metrics = {} + performance_metrics['trial_count'] = len(self.trials) + performance_metrics['go_trial_count'] = self.trials.go.sum() + performance_metrics['catch_trial_count'] = self.trials.catch.sum() + performance_metrics['hit_trial_count'] = self.trials.hit.sum() + performance_metrics['miss_trial_count'] = self.trials.miss.sum() + performance_metrics['false_alarm_trial_count'] = \ + self.trials.false_alarm.sum() + performance_metrics['correct_reject_trial_count'] = \ + self.trials.correct_reject.sum() + performance_metrics['auto_reward_count'] = \ + self.trials.auto_rewarded.sum() + # Although 'earned_reward_count' will currently have the same value as + # 'hit_trial_count', in the future there may be variants of the + # task where rewards are withheld. In that case the + # 'earned_reward_count' will be smaller than (and different from) + # the 'hit_trial_count'. + performance_metrics['earned_reward_count'] = self.trials.hit.sum() + performance_metrics['total_reward_count'] = len(self.rewards) + performance_metrics['total_reward_volume'] = self.rewards.volume.sum() + + rpdf = self.get_rolling_performance_df() + engaged_trial_mask = ( + rpdf['reward_rate'] > + engaged_trial_reward_rate_threshold) + performance_metrics['maximum_reward_rate'] = \ + np.nanmax(rpdf['reward_rate'].values) + performance_metrics['engaged_trial_count'] = (engaged_trial_mask).sum() + performance_metrics['mean_hit_rate'] = \ + rpdf['hit_rate'].mean() + performance_metrics['mean_hit_rate_uncorrected'] = \ + rpdf['hit_rate_raw'].mean() + performance_metrics['mean_hit_rate_engaged'] = \ + rpdf['hit_rate'][engaged_trial_mask].mean() + performance_metrics['mean_false_alarm_rate'] = \ + rpdf['false_alarm_rate'].mean() + performance_metrics['mean_false_alarm_rate_uncorrected'] = \ + rpdf['false_alarm_rate_raw'].mean() + performance_metrics['mean_false_alarm_rate_engaged'] = \ + rpdf['false_alarm_rate'][engaged_trial_mask].mean() + performance_metrics['mean_dprime'] = \ + rpdf['rolling_dprime'].mean() + performance_metrics['mean_dprime_engaged'] = \ + rpdf['rolling_dprime'][engaged_trial_mask].mean() + performance_metrics['max_dprime'] = \ + rpdf['rolling_dprime'].max() + performance_metrics['max_dprime_engaged'] = \ + rpdf['rolling_dprime'][engaged_trial_mask].max() + + return performance_metrics + + # ====================== properties ======================== + + @property + def behavior_session_id(self) -> int: + """Unique identifier for a behavioral session. + :rtype: int + """ + return self._behavior_session_id.value + + @property + def licks(self) -> pd.DataFrame: + """A dataframe containing lick timestmaps and frames, sampled + at 60 Hz. + + NOTE: For BehaviorSessions, returned timestamps are not + aligned to external 'synchronization' reference timestamps. + Synchronized timestamps are only available for + BehaviorOphysExperiments. + + Returns + ------- + np.ndarray + A dataframe containing lick timestamps. + dataframe columns: + timestamps: (float) + time of lick, in seconds + frame: (int) + frame of lick + + """ + return self._licks.value + + @property + def rewards(self) -> pd.DataFrame: + """Retrieves rewards from data file saved at the end of the + behavior session. + + NOTE: For BehaviorSessions, returned timestamps are not + aligned to external 'synchronization' reference timestamps. + Synchronized timestamps are only available for + BehaviorOphysExperiments. + + Returns + ------- + pd.DataFrame + A dataframe containing timestamps of delivered rewards. + Timestamps are sampled at 60Hz. + + dataframe columns: + volume: (float) + volume of individual water reward in ml. + 0.007 if earned reward, 0.005 if auto reward. + timestamps: (float) + time in seconds + autorewarded: (bool) + True if free reward was delivered for that trial. + Occurs during the first 5 trials of a session and + throughout as needed + + """ + return self._rewards.value + + @property + def running_speed(self) -> pd.DataFrame: + """Running speed and timestamps, sampled at 60Hz. By default + applies a 10Hz low pass filter to the data. To get the + running speed without the filter, use `raw_running_speed`. + + NOTE: For BehaviorSessions, returned timestamps are not + aligned to external 'synchronization' reference timestamps. + Synchronized timestamps are only available for + BehaviorOphysExperiments. + + Returns + ------- + pd.DataFrame + Dataframe containing running speed and timestamps + dataframe columns: + timestamps: (float) + time in seconds + speed: (float) + speed in cm/sec + """ + return self._running_speed.value + + @property + def raw_running_speed(self) -> pd.DataFrame: + """Get unfiltered running speed data. Sampled at 60Hz. + + NOTE: For BehaviorSessions, returned timestamps are not + aligned to external 'synchronization' reference timestamps. + Synchronized timestamps are only available for + BehaviorOphysExperiments. + + Returns + ------- + pd.DataFrame + Dataframe containing unfiltered running speed and timestamps + dataframe columns: + timestamps: (float) + time in seconds + speed: (float) + speed in cm/sec + """ + return self._raw_running_speed.value + + @property + def stimulus_presentations(self) -> pd.DataFrame: + """Table whose rows are stimulus presentations (i.e. a given image, + for a given duration, typically 250 ms) and whose columns are + presentation characteristics. + + Returns + ------- + pd.DataFrame + Table whose rows are stimulus presentations + (i.e. a given image, for a given duration, typically 250 ms) + and whose columns are presentation characteristics. + + dataframe columns: + stimulus_presentations_id [index]: (int) + identifier for a stimulus presentation + (presentation of an image) + duration: (float) + duration of an image presentation (flash) + in seconds (stop_time - start_time). NaN if omitted + end_frame: (float) + image presentation end frame + image_index: (int) + image index (0-7) for a given session, + corresponding to each image name + image_set: (string) + image set for this behavior session + index: (int) + an index assigned to each stimulus presentation + omitted: (bool) + True if no image was shown for this stimulus + presentation + start_frame: (int) + image presentation start frame + start_time: (float) + image presentation start time in seconds + stop_time: (float) + image presentation end time in seconds + """ + return self._stimuli.presentations.value + + @property + def stimulus_presentations_short(self) -> pd.DataFrame: + """Table whose rows are stimulus presentations (i.e. a given image, + for a given duration, typically 250 ms) and whose columns are + presentation characteristics. + + """ + return shorten_stimulus_presentation(self.stimulus_presentations) + + @property + def stimulus_templates(self) -> pd.DataFrame: + """Get stimulus templates (movies, scenes) for behavior session. + + Returns + ------- + pd.DataFrame + A pandas DataFrame object containing the stimulus images for the + experiment. + + dataframe columns: + image_name [index]: (string) + name of image presented, if 'omitted' + then no image was presented + unwarped: (array of int) + image array of unwarped stimulus image + warped: (array of int) + image array of warped stimulus image + + """ + # return self._stimuli.templates.value.to_dataframe() + return pd.DataFrame(self._stimuli.templates.value) + + @property + def stimulus_timestamps(self) -> np.ndarray: + """Timestamps associated with the stimulus presetntation on + the monitor retrieveddata file saved at the end of the + behavior session. Sampled at 60Hz. + + NOTE: For BehaviorSessions, returned timestamps are not + aligned to external 'synchronization' reference timestamps. + Synchronized timestamps are only available for + BehaviorOphysExperiments. + + Returns + ------- + np.ndarray + Timestamps associated with stimulus presentations on the monitor + """ + return self._stimulus_timestamps.value + + @property + def task_parameters(self) -> dict: + """Get task parameters from data file saved at the end of + the behavior session file. + + Returns + ------- + dict + A dictionary containing parameters used to define the task runtime + behavior. + auto_reward_volume: (float) + Volume of auto rewards in ml. + blank_duration_sec : (list of floats) + Duration in seconds of inter stimulus interval. + Inter-stimulus interval chosen as a uniform random value. + between the range defined by the two values. + Values are ignored if `stimulus_duration_sec` is null. + response_window_sec: (list of floats) + Range of period following an image change, in seconds, + where mouse response influences trial outcome. + First value represents response window start. + Second value represents response window end. + Values represent time before display lag is + accounted for and applied. + n_stimulus_frames: (int) + Total number of visual stimulus frames presented during + a behavior session. + task: (string) + Type of visual stimulus task. + session_type: (string) + Visual stimulus type run during behavior session. + omitted_flash_fraction: (float) + Probability that a stimulus image presentations is omitted. + Change stimuli, and the stimulus immediately preceding the + change, are never omitted. + stimulus_distribution: (string) + Distribution for drawing change times. + Either 'exponential' or 'geometric'. + stimulus_duration_sec: (float) + Duration in seconds of each stimulus image presentation + reward_volume: (float) + Volume of earned water reward in ml. + stimulus: (string) + Stimulus type ('gratings' or 'images'). + + """ + return self._task_parameters.to_dict()['task_parameters'] + + @property + def trials(self) -> pd.DataFrame: + """Get trials from data file saved at the end of the + behavior session. + + Returns + ------- + pd.DataFrame + A dataframe containing trial and behavioral response data, + by cell specimen id + + dataframe columns: + trials_id: (int) + trial identifier + lick_times: (array of float) + array of lick times in seconds during that trial. + Empty array if no licks occured during the trial. + reward_time: (NaN or float) + Time the reward is delivered following a correct + response or on auto rewarded trials. + reward_volume: (float) + volume of reward in ml. 0.005 for auto reward + 0.007 for earned reward + hit: (bool) + Behavior response type. On catch trial mouse licks + within reward window. + false_alarm: (bool) + Behavior response type. On catch trial mouse licks + within reward window. + miss: (bool) + Behavior response type. On a go trial, mouse either + does not lick at all, or licks after reward window + stimulus_change: (bool) + True if an image change occurs during the trial + (if the trial was both a 'go' trial and the trial + was not aborted) + aborted: (bool) + Behavior response type. True if the mouse licks + before the scheduled change time. + go: (bool) + Trial type. True if there was a change in stimulus + image identity on this trial + catch: (bool) + Trial type. True if there was not a change in stimulus + identity on this trial + auto_rewarded: (bool) + True if free reward was delivered for that trial. + Occurs during the first 5 trials of a session and + throughout as needed. + correct_reject: (bool) + Behavior response type. On a catch trial, mouse + either does not lick at all or licks after reward + window + start_time: (float) + start time of the trial in seconds + stop_time: (float) + end time of the trial in seconds + trial_length: (float) + duration of trial in seconds (stop_time -start_time) + response_time: (float) + time of first lick in trial in seconds and NaN if + trial aborted + initial_image_name: (string) + name of image presented at start of trial + change_image_name: (string) + name of image that is changed to at the change time, + on go trials + """ + return self._trials.value + + @property + def metadata(self) -> Dict[str, Any]: + """metadata for a given session + + Returns + ------- + Dict + A dictionary containing behavior session specific metadata + dictionary keys: + age_in_days: (int) + age of mouse in days + behavior_session_uuid: (int) + unique identifier for a behavior session + behavior_session_id: (int) + unique identifier for a behavior session + cre_line: (string) + cre driver line for a transgenic mouse + date_of_acquisition: (date time object) + date and time of experiment acquisition, + yyyy-mm-dd hh:mm:ss + driver_line: (list of string) + all driver lines for a transgenic mouse + equipment_name: (string) + identifier for equipment data was collected on + full_genotype: (string) + full genotype of transgenic mouse + mouse_id: (int) + unique identifier for a mouse + reporter_line: (string) + reporter line for a transgenic mouse + session_type: (string) + visual stimulus type displayed during behavior + session + sex: (string) + sex of the mouse + stimulus_frame_rate: (float) + frame rate (Hz) at which the visual stimulus is + displayed + """ + return self._get_metadata(visualcoding_metadata=self._metadata) + + @classmethod + def _read_data_from_visualcoding_stimulus_file( + cls, stimulus_file: StimulusFile, + stimulus_timestamps: StimulusTimestamps): + """Helper method to read data from stimulus file""" + stimuli = DenseMovieStimuli.from_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps) + return stimuli + + + @classmethod + def _read_data_from_stimulus_file( + cls, stimulus_file: StimulusFile, + stimulus_timestamps: StimulusTimestamps, + trial_monitor_delay: float): + """Helper method to read data from stimulus file""" + licks = Licks.from_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps) + rewards = Rewards.from_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps) + stimuli = Stimuli.from_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps) + task_parameters = TaskParameters.from_stimulus_file( + stimulus_file=stimulus_file) + trials = TrialTable.from_stimulus_file( + stimulus_file=stimulus_file, + stimulus_timestamps=stimulus_timestamps, + licks=licks, + rewards=rewards, + monitor_delay=trial_monitor_delay + ) + return licks, rewards, stimuli, task_parameters, trials + + def _get_metadata(self, visualcoding_metadata: VisualCodingMetadata) -> dict: + """Returns dict of metadata""" + return { + 'equipment_name': visualcoding_metadata.equipment.value, + 'sex': visualcoding_metadata.subject_metadata.sex, + 'age_in_days': visualcoding_metadata.subject_metadata.age_in_days, + 'stimulus_frame_rate': visualcoding_metadata.stimulus_frame_rate, + 'session_type': visualcoding_metadata.session_type, + 'date_of_acquisition': self._date_of_acquisition.value, + 'reporter_line': visualcoding_metadata.subject_metadata.reporter_line, + 'cre_line': visualcoding_metadata.subject_metadata.cre_line, + 'behavior_session_uuid': visualcoding_metadata.behavior_session_uuid, + 'driver_line': visualcoding_metadata.subject_metadata.driver_line, + 'mouse_id': visualcoding_metadata.subject_metadata.mouse_id, + 'full_genotype': visualcoding_metadata.subject_metadata.full_genotype, + 'ophys_session_id': visualcoding_metadata.ophys_session_id + } + + def _get_identifier(self) -> str: + return str(self._behavior_session_id) + + def _get_session_type(self) -> str: + return self._metadata.session_type + + @staticmethod + def _get_keywords(): + """Keywords for NWB file""" + return ["visual", "behavior", "task"] + + @staticmethod + def _get_monitor_delay(): + # This is the median estimate across all rigs + # as discussed in + # https://github.com/AllenInstitute/AllenSDK/issues/1318 + return 0.02115 diff --git a/allensdk/internal/brain_observatory/time_sync.py b/allensdk/internal/brain_observatory/time_sync.py index e191525f2..46158880c 100644 --- a/allensdk/internal/brain_observatory/time_sync.py +++ b/allensdk/internal/brain_observatory/time_sync.py @@ -37,13 +37,13 @@ def get_keys(sync_dset: Dataset) -> dict: # line labels key_dict = { "photodiode": ["stim_photodiode", "photodiode"], - "2p": ["2p_vsync"], + "2p": ["2p_vsync", "vsync_2p"], "stimulus": ["stim_vsync", "vsync_stim"], "eye_camera": ["cam2_exposure", "eye_tracking", "eye_frame_received"], "behavior_camera": ["cam1_exposure", "behavior_monitoring", "beh_frame_received"], - "acquiring": ["2p_acquiring", "acq_trigger"], + "acquiring": ["2p_acquiring", "acq_trigger","2p_acq_trigger","stim_running"], "lick_sensor": ["lick_1", "lick_sensor"] } label_set = set(sync_dset.line_labels)