diff --git a/comparison_table.md b/comparison_table.md index cb34ed8..864b4f3 100644 --- a/comparison_table.md +++ b/comparison_table.md @@ -353,59 +353,59 @@ | ndi.file.pfilemirror | No | No | | ndi.file.temp_fid | No | No | | ndi.file.temp_name | No | No | -| ndi.fun.calc.stimulus_tuningcurve_log | No | No | -| ndi.fun.data.mat2ngrid | No | No | -| ndi.fun.data.readImageStack | No | No | -| ndi.fun.data.readngrid | No | No | -| ndi.fun.data.writengrid | No | No | -| ndi.fun.dataset.diff | No | No | -| ndi.fun.doc.probe.probeLocations4probes | No | No | -| ndi.fun.doc.subject.makeSpeciesStrainSex | No | No | -| ndi.fun.doc.allTypes | No | No | -| ndi.fun.doc.diff | No | No | -| ndi.fun.doc.findFuid | No | No | -| ndi.fun.doc.getDocTypes | No | No | -| ndi.fun.doc.ontologyTableRowDoc2Table | No | No | -| ndi.fun.doc.ontologyTableRowVars | No | No | -| ndi.fun.doc.t0_t1cell2array | No | No | -| ndi.fun.docTable.docCellArray2Table | No | No | -| ndi.fun.docTable.element | No | No | -| ndi.fun.docTable.epoch | No | No | -| ndi.fun.docTable.openminds | No | No | -| ndi.fun.docTable.probe | No | No | -| ndi.fun.docTable.subject | No | No | -| ndi.fun.docTable.treatment | No | No | -| ndi.fun.epoch.epochid2element | No | No | -| ndi.fun.epoch.filename2epochid | No | No | -| ndi.fun.file.MD5 | No | No | -| ndi.fun.file.dateCreated | No | No | -| ndi.fun.file.dateUpdated | No | No | -| ndi.fun.plot.bar3 | No | No | -| ndi.fun.plot.multichan | No | No | -| ndi.fun.probe.location | No | No | -| ndi.fun.session.diff | No | No | -| ndi.fun.stimulus.f0_f1_responses | No | No | -| ndi.fun.stimulus.findMixtureName | No | No | -| ndi.fun.stimulus.tuning_curve_to_response_type | No | No | -| ndi.fun.table.identifyMatchingRows | No | No | -| ndi.fun.table.identifyValidRows | No | No | -| ndi.fun.table.join | No | No | -| ndi.fun.table.moveColumnsLeft | No | No | -| ndi.fun.table.vstack | No | No | +| ndi.fun.calc.stimulus_tuningcurve_log | Yes | Yes | +| ndi.fun.data.mat2ngrid | Yes | Yes | +| ndi.fun.data.readImageStack | Yes | Yes | +| ndi.fun.data.readngrid | Yes | Yes | +| ndi.fun.data.writengrid | Yes | Yes | +| ndi.fun.dataset.diff | Yes | Yes | +| ndi.fun.doc.probe.probeLocations4probes | Yes | Yes | +| ndi.fun.doc.subject.makeSpeciesStrainSex | Yes | Yes | +| ndi.fun.doc.allTypes | Yes | Yes | +| ndi.fun.doc.diff | Yes | Yes | +| ndi.fun.doc.findFuid | Yes | Yes | +| ndi.fun.doc.getDocTypes | Yes | Yes | +| ndi.fun.doc.ontologyTableRowDoc2Table | Yes | Yes | +| ndi.fun.doc.ontologyTableRowVars | Yes | Yes | +| ndi.fun.doc.t0_t1cell2array | Yes | Yes | +| ndi.fun.docTable.docCellArray2Table | Yes | Yes | +| ndi.fun.docTable.element | Yes | Yes | +| ndi.fun.docTable.epoch | Yes | Yes | +| ndi.fun.docTable.openminds | Yes | Yes | +| ndi.fun.docTable.probe | Yes | Yes | +| ndi.fun.docTable.subject | Yes | Yes | +| ndi.fun.docTable.treatment | Yes | Yes | +| ndi.fun.epoch.epochid2element | Yes | Yes | +| ndi.fun.epoch.filename2epochid | Yes | Yes | +| ndi.fun.file.MD5 | Yes | Yes | +| ndi.fun.file.dateCreated | Yes | Yes | +| ndi.fun.file.dateUpdated | Yes | Yes | +| ndi.fun.plot.bar3 | Yes | Yes | +| ndi.fun.plot.multichan | Yes | Yes | +| ndi.fun.probe.location | Yes | Yes | +| ndi.fun.session.diff | Yes | Yes | +| ndi.fun.stimulus.f0_f1_responses | Yes | Yes | +| ndi.fun.stimulus.findMixtureName | Yes | Yes | +| ndi.fun.stimulus.tuning_curve_to_response_type | Yes | Yes | +| ndi.fun.table.identifyMatchingRows | Yes | Yes | +| ndi.fun.table.identifyValidRows | Yes | Yes | +| ndi.fun.table.join | Yes | Yes | +| ndi.fun.table.moveColumnsLeft | Yes | Yes | +| ndi.fun.table.vstack | Yes | Yes | | ndi.fun.assertAddonOnPath | No | No | -| ndi.fun.channelname2prefixnumber | No | No | +| ndi.fun.channelname2prefixnumber | Yes | Yes | | ndi.fun.check_Matlab_toolboxes | No | No | -| ndi.fun.console | No | No | +| ndi.fun.console | Yes | Yes | | ndi.fun.convertoldnsd2ndi | No | No | -| ndi.fun.debuglog | No | No | -| ndi.fun.errlog | No | No | -| ndi.fun.find_calc_directories | No | No | -| ndi.fun.name2variableName | No | No | +| ndi.fun.debuglog | Yes | Yes | +| ndi.fun.errlog | Yes | Yes | +| ndi.fun.find_calc_directories | Yes | Yes | +| ndi.fun.name2variableName | Yes | Yes | | ndi.fun.plot_extracellular_spikeshapes | No | No | -| ndi.fun.pseudorandomint | No | No | +| ndi.fun.pseudorandomint | Yes | Yes | | ndi.fun.run_Linux_checks | No | No | -| ndi.fun.stimulustemporalfrequency | No | No | -| ndi.fun.syslog | No | No | +| ndi.fun.stimulustemporalfrequency | Yes | Yes | +| ndi.fun.syslog | Yes | Yes | | ndi.fun.timestamp | Yes | Yes | | ndi.gui.component.abstract.ProgressMonitor | No | No | | ndi.gui.component.internal.event.MessageUpdatedEventData | No | No | diff --git a/src/ndi/common/__init__.py b/src/ndi/common/__init__.py new file mode 100644 index 0000000..203a403 --- /dev/null +++ b/src/ndi/common/__init__.py @@ -0,0 +1 @@ +from .path_constants import PathConstants diff --git a/src/ndi/common/path_constants.py b/src/ndi/common/path_constants.py new file mode 100644 index 0000000..d50a004 --- /dev/null +++ b/src/ndi/common/path_constants.py @@ -0,0 +1,18 @@ +import os + +class PathConstants: + @staticmethod + def root_folder(): + """ + Returns the root folder of the NDI distribution. + """ + # Assuming this file is in src/ndi/common/path_constants.py + # Root is src/ndi/ + return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + + @staticmethod + def common_folder(): + """ + Returns the path to the package ndi_common resources. + """ + return os.path.join(PathConstants.root_folder(), 'resources', 'ndi_common') diff --git a/src/ndi/document.py b/src/ndi/document.py index 1a018ce..9e31404 100644 --- a/src/ndi/document.py +++ b/src/ndi/document.py @@ -1,6 +1,6 @@ from did.document import Document from .ido import Ido -import ndi.fun.timestamp +import ndi.fun from .util.vlt import data as vlt_data import json import os @@ -15,7 +15,7 @@ def __init__(self, document_type, **kwargs): self.document_properties = self.read_blank_definition(document_type) ido = Ido() self.document_properties['base']['id'] = ido.id() - self.document_properties['base']['datestamp'] = ndi.fun.timestamp.timestamp() + self.document_properties['base']['datestamp'] = ndi.fun.timestamp() for key, value in kwargs.items(): keys = key.split('.') diff --git a/src/ndi/fun/__init__.py b/src/ndi/fun/__init__.py index e69de29..0c195e8 100644 --- a/src/ndi/fun/__init__.py +++ b/src/ndi/fun/__init__.py @@ -0,0 +1,10 @@ +from .channel_name_to_prefix_number import channel_name_to_prefix_number +from .name_to_variable_name import name_to_variable_name +from .timestamp import timestamp +from .pseudorandomint import pseudorandomint +from .stimulus_temporal_frequency import stimulus_temporal_frequency +from .find_calc_directories import find_calc_directories +from .console import console +from .debug_log import debug_log +from .err_log import err_log +from .sys_log import sys_log diff --git a/src/ndi/fun/calc/__init__.py b/src/ndi/fun/calc/__init__.py new file mode 100644 index 0000000..5f2b205 --- /dev/null +++ b/src/ndi/fun/calc/__init__.py @@ -0,0 +1 @@ +from .stimulus_tuning_curve_log import stimulus_tuning_curve_log diff --git a/src/ndi/fun/calc/stimulus_tuning_curve_log.py b/src/ndi/fun/calc/stimulus_tuning_curve_log.py new file mode 100644 index 0000000..85fab19 --- /dev/null +++ b/src/ndi/fun/calc/stimulus_tuning_curve_log.py @@ -0,0 +1,31 @@ +from did.query import Query + +def stimulus_tuning_curve_log(S, doc): + """ + Retrieve stimulus_tuningcurve log string from dependent document. + + Args: + S: ndi.session object. + doc: ndi.document object. + + Returns: + str: The log string. + """ + log_str = '' + + try: + stim_tune_doc_id = doc.dependency_value('stimulus_tuningcurve_id') + except Exception: + # If dependency not found + return log_str + + q1 = Query('base.id', 'exact_string', stim_tune_doc_id) + q2 = Query('', 'isa', 'tuningcurve_calc') + + stim_tune_doc = S.database_search(q1 & q2) + + if stim_tune_doc: + props = stim_tune_doc[0].document_properties.get('tuningcurve_calc', {}) + log_str = props.get('log', '') + + return log_str diff --git a/src/ndi/fun/console.py b/src/ndi/fun/console.py new file mode 100644 index 0000000..0d309a4 --- /dev/null +++ b/src/ndi/fun/console.py @@ -0,0 +1,58 @@ +import sys +import platform +import os +import subprocess +import tempfile + +def console(filename): + """ + Pops up an external terminal window that displays a log file. + + CONSOLE(FILENAME) + + Pops up a console window that displays a log file. + Right now, only MacOS is supported. + """ + + system = platform.system() + + if system == 'Darwin': # MacOS + # Create temporary applescript file + with tempfile.NamedTemporaryFile(mode='w', suffix='.scpt', delete=False) as f: + script_content = f"""tell application "Terminal" + activate + do script "tail -f {filename}" +end tell""" + f.write(script_content) + temp_filename = f.name + + try: + subprocess.run(['osascript', temp_filename], check=True) + finally: + os.remove(temp_filename) + + elif system == 'Linux': + # Try some common terminal emulators + terminals = ['gnome-terminal', 'xterm', 'konsole'] + launched = False + for term in terminals: + try: + # This is a very basic attempt and might need adjustment for specific terminals + if term == 'gnome-terminal': + subprocess.run([term, '--', 'tail', '-f', filename]) + elif term == 'xterm': + subprocess.run([term, '-e', f'tail -f {filename}']) + elif term == 'konsole': + subprocess.run([term, '-e', 'tail', '-f', filename]) + launched = True + break + except FileNotFoundError: + continue + + if not launched: + raise NotImplementedError("Linux terminal launch not fully supported yet.") + + elif system == 'Windows': + raise NotImplementedError("Windows not supported yet.") + else: + raise NotImplementedError(f"{system} not supported yet.") diff --git a/src/ndi/fun/data/__init__.py b/src/ndi/fun/data/__init__.py new file mode 100644 index 0000000..fd65ab7 --- /dev/null +++ b/src/ndi/fun/data/__init__.py @@ -0,0 +1,4 @@ +from .read_ngrid import read_ngrid +from .write_ngrid import write_ngrid +from .read_image_stack import read_image_stack +from .mat_to_ngrid import mat_to_ngrid diff --git a/src/ndi/fun/data/mat_to_ngrid.py b/src/ndi/fun/data/mat_to_ngrid.py new file mode 100644 index 0000000..2f13c33 --- /dev/null +++ b/src/ndi/fun/data/mat_to_ngrid.py @@ -0,0 +1,2 @@ +def mat_to_ngrid(): + raise NotImplementedError("Not yet ported.") diff --git a/src/ndi/fun/data/read_image_stack.py b/src/ndi/fun/data/read_image_stack.py new file mode 100644 index 0000000..73a5c3d --- /dev/null +++ b/src/ndi/fun/data/read_image_stack.py @@ -0,0 +1,5 @@ +def read_image_stack(session, doc, fmt): + """ + Read image stack or video. + """ + raise NotImplementedError("This function depends on imageio or opencv which are not yet in dependencies.") diff --git a/src/ndi/fun/data/read_ngrid.py b/src/ndi/fun/data/read_ngrid.py new file mode 100644 index 0000000..60495c5 --- /dev/null +++ b/src/ndi/fun/data/read_ngrid.py @@ -0,0 +1,51 @@ +import numpy as np +import os + +def read_ngrid(filename_or_fileobj, data_size, data_type='double'): + """ + Read an n-dimensional matrix from a binary file. + + Args: + filename_or_fileobj (str or fileobj): Path to file or file object. + data_size (list of int): Dimensions of matrix. + data_type (str): 'double', 'single', 'int8', 'uint8', etc. + + Returns: + np.ndarray: N-dimensional matrix. + """ + dtype_map = { + 'double': np.float64, + 'single': np.float32, + 'int8': np.int8, + 'uint8': np.uint8, + 'int16': np.int16, + 'uint16': np.uint16, + 'int32': np.int32, + 'uint32': np.uint32, + 'int64': np.int64, + 'uint64': np.uint64, + 'char': np.char, # ? + 'logical': np.bool_ + } + + np_dtype = dtype_map.get(data_type, np.float64) + + if isinstance(filename_or_fileobj, str): + if not os.path.isfile(filename_or_fileobj): + raise FileNotFoundError(f"File not found: {filename_or_fileobj}") + + with open(filename_or_fileobj, 'rb') as f: + # Numpy reads in C-order by default, but Matlab writes in F-order (column-major) + # data_size is (rows, cols, depth...) + # We should read as flat then reshape in 'F' order + count = np.prod(data_size) + x = np.fromfile(f, dtype=np_dtype, count=count) + x = x.reshape(data_size, order='F') + + else: # file object + # Assuming read method + count = np.prod(data_size) + x = np.fromfile(filename_or_fileobj, dtype=np_dtype, count=count) + x = x.reshape(data_size, order='F') + + return x diff --git a/src/ndi/fun/data/write_ngrid.py b/src/ndi/fun/data/write_ngrid.py new file mode 100644 index 0000000..8bf20a1 --- /dev/null +++ b/src/ndi/fun/data/write_ngrid.py @@ -0,0 +1,40 @@ +import numpy as np + +def write_ngrid(x, file_path, data_type='double'): + """ + Write an n-dimensional matrix to a binary file. + + Args: + x (np.ndarray): Data to write. + file_path (str): Path to output file. + data_type (str): Data type to write as. + """ + dtype_map = { + 'double': np.float64, + 'single': np.float32, + 'int8': np.int8, + 'uint8': np.uint8, + 'int16': np.int16, + 'uint16': np.uint16, + 'int32': np.int32, + 'uint32': np.uint32, + 'int64': np.int64, + 'uint64': np.uint64, + 'logical': np.bool_ + } + + np_dtype = dtype_map.get(data_type, np.float64) + + # Ensure x is numpy array + if not isinstance(x, np.ndarray): + x = np.array(x) + + # Cast to type + x_casted = x.astype(np_dtype) + + with open(file_path, 'wb') as f: + # Write in Fortran order (column-major) to match Matlab + # To do this correctly: + # 1. Flatten in F order + # 2. Write to file + x_casted.flatten(order='F').tofile(f) diff --git a/src/ndi/fun/dataset/__init__.py b/src/ndi/fun/dataset/__init__.py new file mode 100644 index 0000000..9c2ffef --- /dev/null +++ b/src/ndi/fun/dataset/__init__.py @@ -0,0 +1 @@ +from .diff import diff diff --git a/src/ndi/fun/dataset/diff.py b/src/ndi/fun/dataset/diff.py new file mode 100644 index 0000000..f5e7738 --- /dev/null +++ b/src/ndi/fun/dataset/diff.py @@ -0,0 +1,9 @@ +def diff(dataset1, dataset2): + """ + Compares two NDI datasets. + + This functionality is similar to ndi.fun.session.diff, as dataset often inherits from session or behaves similarly. + """ + # Reuse session diff + from ndi.fun.session.diff import diff as session_diff + return session_diff(dataset1, dataset2) diff --git a/src/ndi/fun/debug_log.py b/src/ndi/fun/debug_log.py new file mode 100644 index 0000000..3643a45 --- /dev/null +++ b/src/ndi/fun/debug_log.py @@ -0,0 +1 @@ +def debug_log(): raise NotImplementedError("This function depends on ndi.common.getLogger which is not yet ported.") diff --git a/src/ndi/fun/doc/__init__.py b/src/ndi/fun/doc/__init__.py new file mode 100644 index 0000000..fc69c69 --- /dev/null +++ b/src/ndi/fun/doc/__init__.py @@ -0,0 +1,9 @@ +from .all_types import all_types +from .diff import diff +from .find_fuid import find_fuid +from .get_doc_types import get_doc_types +from .ontology_table_row_doc_to_table import ontology_table_row_doc_to_table +from .ontology_table_row_vars import ontology_table_row_vars +from .t0_t1_cell_to_array import t0_t1_cell_to_array +from . import probe +from . import subject diff --git a/src/ndi/fun/doc/all_types.py b/src/ndi/fun/doc/all_types.py new file mode 100644 index 0000000..2212a42 --- /dev/null +++ b/src/ndi/fun/doc/all_types.py @@ -0,0 +1,36 @@ +import os +import glob +from ndi.common.path_constants import PathConstants +from ndi.fun.find_calc_directories import find_calc_directories + +def all_types(): + """ + Finds all unique document types available in the NDI system. + + Returns: + list: A sorted list of unique document type names. + """ + json_docs = [] + + # Check DocumentFolder + # Assuming standard location + doc_folder = os.path.join(PathConstants.common_folder(), 'database_documents') + if os.path.isdir(doc_folder): + json_docs.extend(glob.glob(os.path.join(doc_folder, '*.json'))) + + # Check CalcDoc + calc_dirs = find_calc_directories() + + for d in calc_dirs: + calc_doc_path = os.path.join(d, 'ndi_common', 'database_documents') + if os.path.isdir(calc_doc_path): + json_docs.extend(glob.glob(os.path.join(calc_doc_path, '*.json'))) + + doc_types = set() + for f in json_docs: + filename = os.path.basename(f) + if not filename.startswith('.'): + name, ext = os.path.splitext(filename) + doc_types.add(name) + + return sorted(list(doc_types)) diff --git a/src/ndi/fun/doc/diff.py b/src/ndi/fun/doc/diff.py new file mode 100644 index 0000000..4d9c951 --- /dev/null +++ b/src/ndi/fun/doc/diff.py @@ -0,0 +1,89 @@ +import copy + +def diff(doc1, doc2, ignore_fields=None, check_file_list=True, check_files=False, session1=None, session2=None): + """ + Compare two NDI documents for equality. + + Args: + doc1, doc2: ndi.document objects to compare. + ignore_fields (list): Fields to ignore (default: ['base.session_id']). + check_file_list (bool): Check if file lists match (default: True). + check_files (bool): Check binary content (default: False). + session1, session2: ndi.session objects (required if check_files=True). + + Returns: + tuple: (are_equal, report) + are_equal (bool): True if documents match. + report (dict): Details of differences. + """ + if ignore_fields is None: + ignore_fields = ['base.session_id'] + + if check_files: + if session1 is None or session2 is None: + raise ValueError('If check_files is True, session1 and session2 must be provided.') + + are_equal = True + details = [] + + # Deep copy properties to avoid modifying originals + props1 = copy.deepcopy(doc1.document_properties) + props2 = copy.deepcopy(doc2.document_properties) + + # 1. Remove ignored fields + for field in ignore_fields: + parts = field.split('.') + if len(parts) == 1: + props1.pop(field, None) + props2.pop(field, None) + elif len(parts) == 2: + if parts[0] in props1 and isinstance(props1[parts[0]], dict): + props1[parts[0]].pop(parts[1], None) + if parts[0] in props2 and isinstance(props2[parts[0]], dict): + props2[parts[0]].pop(parts[1], None) + + # 2. Handle 'depends_on' (Order Independent) + dep1 = props1.pop('depends_on', []) + dep2 = props2.pop('depends_on', []) + + if dep1 or dep2: + if len(dep1) != len(dep2): + are_equal = False + details.append(f"Number of dependencies differs: {len(dep1)} vs {len(dep2)}.") + else: + # Sort by name + dep1_sorted = sorted(dep1, key=lambda x: x.get('name', '')) + dep2_sorted = sorted(dep2, key=lambda x: x.get('name', '')) + + if dep1_sorted != dep2_sorted: + are_equal = False + details.append("Dependencies do not match.") + + # 3. Handle 'files' (Order Independent List Check) + files1 = props1.pop('files', {}) + files2 = props2.pop('files', {}) + + if check_file_list: + f_list1 = files1.get('file_list', []) + f_list2 = files2.get('file_list', []) + + # Ensure lists are sorted + if sorted(f_list1) != sorted(f_list2): + are_equal = False + details.append("File lists do not match.") + + # 4. Compare remaining properties + if props1 != props2: + are_equal = False + details.append("Document properties do not match.") + + # 5. Check binary file content if requested + if check_files: + # Not fully implemented yet due to missing binary comparison utilities + # and session.database_openbinarydoc implementation details in Python port. + details.append("Binary file comparison requested but not fully implemented in Python port.") + # Stub implementation + pass + + report = {'mismatch': not are_equal, 'details': details} + return are_equal, report diff --git a/src/ndi/fun/doc/find_fuid.py b/src/ndi/fun/doc/find_fuid.py new file mode 100644 index 0000000..8bb9f3d --- /dev/null +++ b/src/ndi/fun/doc/find_fuid.py @@ -0,0 +1,32 @@ +from did.query import Query + +def find_fuid(ndi_obj, fuid): + """ + Find a document in an NDI dataset or session by its file UID. + + Args: + ndi_obj (ndi.dataset or ndi.session): An ndi.dataset or ndi.session object to search within. + fuid (str): The file unique identifier to search for. + + Returns: + tuple: (doc, filename) + doc (ndi.document or None): The document object if found, else None. + filename (str): The filename associated with the FUID, else ''. + """ + doc = None + filename = '' + + search_query = Query('base.id', 'regexp', '(.*)') + all_docs = ndi_obj.database_search(search_query) + + for current_doc in all_docs: + file_list = current_doc.current_file_list() + + for fname in file_list: + doc_fuid = current_doc.get_fuid(fname) + if doc_fuid == fuid: + doc = current_doc + filename = fname + return doc, filename + + return doc, filename diff --git a/src/ndi/fun/doc/get_doc_types.py b/src/ndi/fun/doc/get_doc_types.py new file mode 100644 index 0000000..52d4f5f --- /dev/null +++ b/src/ndi/fun/doc/get_doc_types.py @@ -0,0 +1,27 @@ +from collections import Counter +from did.query import Query + +def get_doc_types(session): + """ + Find all unique document types and their counts in an NDI session. + + Args: + session: An NDI session object. + + Returns: + tuple: (doc_types, doc_counts) + doc_types (list): A list of unique document class names (sorted). + doc_counts (list): A list of counts corresponding to doc_types. + """ + query = Query('', 'isa', 'base') + docs = session.database_search(query) + + doc_classes = [doc.doc_class() for doc in docs] + + counts = Counter(doc_classes) + sorted_classes = sorted(counts.keys()) + + doc_types = sorted_classes + doc_counts = [counts[c] for c in sorted_classes] + + return doc_types, doc_counts diff --git a/src/ndi/fun/doc/ontology_table_row_doc_to_table.py b/src/ndi/fun/doc/ontology_table_row_doc_to_table.py new file mode 100644 index 0000000..ed9aaa9 --- /dev/null +++ b/src/ndi/fun/doc/ontology_table_row_doc_to_table.py @@ -0,0 +1,66 @@ +import pandas as pd +from ndi.fun.table import vstack + +def ontology_table_row_doc_to_table(table_row_doc, stack_all=False): + """ + Converts NDI ontologyTableRow documents to pandas DataFrames. + + Args: + table_row_doc (ndi.document or list of ndi.document): Documents to convert. + stack_all (bool): Whether to stack all tables together. + + Returns: + tuple: (data_tables, doc_ids) + data_tables (list of pd.DataFrame): The extracted tables. + doc_ids (list of list of str): The corresponding document IDs. + """ + if not isinstance(table_row_doc, list): + table_row_doc = [table_row_doc] + + table_rows = [] + variable_names_list = [] + doc_id_list = [] + + for doc in table_row_doc: + props = doc.document_properties['ontologyTableRow'] + data = props['data'] + # Convert to DataFrame + if isinstance(data, list): + df = pd.DataFrame(data) + elif isinstance(data, dict): + df = pd.DataFrame([data]) + else: + # Handle empty or scalar? + df = pd.DataFrame() + + table_rows.append(df) + variable_names_list.append(props.get('variableNames')) + doc_id_list.append(doc.id) + + if stack_all: + data_tables = [vstack(table_rows)] + doc_ids = [doc_id_list] + else: + # Group by variableNames + # Need to handle if variableNames is list or str + def get_key(v): + if isinstance(v, list): + return tuple(v) + return v + + unique_vars = sorted(list(set(get_key(v) for v in variable_names_list)), key=lambda x: str(x)) + + data_tables = [] + doc_ids = [] + + for var_key in unique_vars: + # Find indices + indices = [i for i, v in enumerate(variable_names_list) if get_key(v) == var_key] + + group_tables = [table_rows[i] for i in indices] + group_ids = [doc_id_list[i] for i in indices] + + data_tables.append(vstack(group_tables)) + doc_ids.append(group_ids) + + return data_tables, doc_ids diff --git a/src/ndi/fun/doc/ontology_table_row_vars.py b/src/ndi/fun/doc/ontology_table_row_vars.py new file mode 100644 index 0000000..89455c9 --- /dev/null +++ b/src/ndi/fun/doc/ontology_table_row_vars.py @@ -0,0 +1,50 @@ +from did.query import Query + +def ontology_table_row_vars(session): + """ + Return all ontologyTableRow document variable names in dataset/session. + + Args: + session: An NDI session object. + + Returns: + tuple: (names, variable_names, ontology_nodes) + names (list): cell array of ontology names available + variable_names (list): the short name that appears in the table + ontology_nodes (list): the ontology node names of each variable + """ + query = Query('', 'isa', 'ontologyTableRow') + l = session.database_search(query) + + all_names = [] + all_variable_names = [] + all_ontology_nodes = [] + + for doc in l: + props = doc.document_properties.get('ontologyTableRow', {}) + + names_str = props.get('names', '') + variable_names_str = props.get('variableNames', '') + ontology_nodes_str = props.get('ontologyNodes', '') + + name_list = names_str.split(',') if names_str else [] + variable_names_list = variable_names_str.split(',') if variable_names_str else [] + ontology_nodes_list = ontology_nodes_str.split(',') if ontology_nodes_str else [] + + all_names.extend(name_list) + all_variable_names.extend(variable_names_list) + all_ontology_nodes.extend(ontology_nodes_list) + + # Use a dictionary to keep unique names and corresponding values + # Overwriting ensures we keep the last occurrence, mimicking Matlab's unique default behavior + unique_map = {} + for n, v, o in zip(all_names, all_variable_names, all_ontology_nodes): + unique_map[n] = (v, o) + + sorted_names = sorted(unique_map.keys()) + + unique_names = sorted_names + unique_variable_names = [unique_map[n][0] for n in sorted_names] + unique_ontology_nodes = [unique_map[n][1] for n in sorted_names] + + return unique_names, unique_variable_names, unique_ontology_nodes diff --git a/src/ndi/fun/doc/probe/__init__.py b/src/ndi/fun/doc/probe/__init__.py new file mode 100644 index 0000000..50666f6 --- /dev/null +++ b/src/ndi/fun/doc/probe/__init__.py @@ -0,0 +1 @@ +from .probe_locations_for_probes import probe_locations_for_probes diff --git a/src/ndi/fun/doc/probe/probe_locations_for_probes.py b/src/ndi/fun/doc/probe/probe_locations_for_probes.py new file mode 100644 index 0000000..72bcefb --- /dev/null +++ b/src/ndi/fun/doc/probe/probe_locations_for_probes.py @@ -0,0 +1,17 @@ +def probe_locations_for_probes(session, probes, ontology_lookup_strings, do_add=True): + """ + Create and add probe_location documents for a set of probes. + + Args: + session: An ndi.session object. + probes (list of ndi.probe.Probe): A list of probe objects. + ontology_lookup_strings (list of str): Ontology lookup strings. + do_add (bool): Whether to add the documents to the session database. + + Returns: + list: A list of ndi.document objects. + + Raises: + NotImplementedError: Because it depends on ndi.ontology.lookup which is not yet ported. + """ + raise NotImplementedError("This function depends on ndi.ontology.lookup which is not yet ported.") diff --git a/src/ndi/fun/doc/subject/__init__.py b/src/ndi/fun/doc/subject/__init__.py new file mode 100644 index 0000000..d788a45 --- /dev/null +++ b/src/ndi/fun/doc/subject/__init__.py @@ -0,0 +1 @@ +from .make_species_strain_sex import make_species_strain_sex diff --git a/src/ndi/fun/doc/subject/make_species_strain_sex.py b/src/ndi/fun/doc/subject/make_species_strain_sex.py new file mode 100644 index 0000000..607134a --- /dev/null +++ b/src/ndi/fun/doc/subject/make_species_strain_sex.py @@ -0,0 +1,21 @@ +def make_species_strain_sex(session, subject_id, biological_sex='', species='', strain='', add_to_session=False): + """ + Add species, strain, or sex information for a subject in an ndi.session. + + Args: + session: An ndi.session object. + subject_id (str): The subject document ID. + biological_sex (str): 'male', 'female', 'hermaphrodite', or 'notDetectable'. + species (str): Ontology identifier (e.g. 'NCBITaxon:10116'). + strain (str): Ontology identifier (e.g. 'RRID:RGD_70508'). + add_to_session (bool): Whether to add documents to the session (default False). + + Returns: + tuple: (ndi_doc_array, openminds_obj) + ndi_doc_array (list): A list of created ndi.document objects. + openminds_obj (list): A list of openMINDS objects. + + Raises: + NotImplementedError: Because it depends on ndi.ontology.lookup and openMINDS support which are not yet ported. + """ + raise NotImplementedError("This function depends on ndi.ontology.lookup and openMINDS support which are not yet ported.") diff --git a/src/ndi/fun/doc/t0_t1_cell_to_array.py b/src/ndi/fun/doc/t0_t1_cell_to_array.py new file mode 100644 index 0000000..b30b751 --- /dev/null +++ b/src/ndi/fun/doc/t0_t1_cell_to_array.py @@ -0,0 +1,27 @@ +import numpy as np + +def t0_t1_cell_to_array(t0t1_in): + """ + Convert a t0..t1 interval expressed as a cell in an epochtable entry as array. + + Args: + t0t1_in (list of list/tuple): Input t0t1 intervals. Each element is [t0, t1]. + + Returns: + np.ndarray: 2xN array suitable for inclusion in an ndi.document object. + """ + if not t0t1_in: + return np.zeros((2, 0)) + + N = len(t0t1_in) + t0t1_out = np.zeros((2, N)) + + for k, item in enumerate(t0t1_in): + if len(item) >= 2: + t0t1_out[0, k] = item[0] + t0t1_out[1, k] = item[1] + else: + # Handle malformed input? + pass + + return t0t1_out diff --git a/src/ndi/fun/doc_table/__init__.py b/src/ndi/fun/doc_table/__init__.py new file mode 100644 index 0000000..5e640c3 --- /dev/null +++ b/src/ndi/fun/doc_table/__init__.py @@ -0,0 +1,7 @@ +from .doc_cell_array_to_table import doc_cell_array_to_table +from .element import element +from .epoch import epoch +from .openminds import openminds +from .probe import probe +from .subject import subject +from .treatment import treatment diff --git a/src/ndi/fun/doc_table/doc_cell_array_to_table.py b/src/ndi/fun/doc_table/doc_cell_array_to_table.py new file mode 100644 index 0000000..d4a6ee3 --- /dev/null +++ b/src/ndi/fun/doc_table/doc_cell_array_to_table.py @@ -0,0 +1,27 @@ +import pandas as pd +from ndi.fun.table import vstack + +def doc_cell_array_to_table(doc_cell_array): + """ + Converts a cell array of NDI documents to a table, with document IDs. + + Args: + doc_cell_array (list of ndi.document): List of documents. + + Returns: + tuple: (data_table, doc_ids) + data_table (pd.DataFrame): Table with properties. + doc_ids (list): List of document IDs. + """ + if not isinstance(doc_cell_array, list): + doc_cell_array = [doc_cell_array] + + tables = [] + doc_ids = [] + + for doc in doc_cell_array: + tables.append(doc.to_table()) + doc_ids.append(doc.id) + + data_table = vstack(tables) if tables else pd.DataFrame() + return data_table, doc_ids diff --git a/src/ndi/fun/doc_table/element.py b/src/ndi/fun/doc_table/element.py new file mode 100644 index 0000000..ec21852 --- /dev/null +++ b/src/ndi/fun/doc_table/element.py @@ -0,0 +1,36 @@ +import pandas as pd +from did.query import Query + +def element(session): + """ + Generate a table of all 'element' documents in a session/dataset. + + Args: + session: ndi.session object. + + Returns: + tuple: (element_table, doc_ids) + element_table (pd.DataFrame): Table of element info. + doc_ids (list): List of document IDs. + """ + query = Query('', 'isa', 'element') + docs = session.database_search(query) + + rows = [] + doc_ids = [] + + for doc in docs: + props = doc.document_properties['element'] + + # Flatten basic properties + row = props.copy() + + # Handle direct_recording if present (it's a struct/dict) + # We might want to flatten it or keep as dict. + # Matlab flattenstruct2table typically flattens one level. + + rows.append(row) + doc_ids.append(doc.id) + + element_table = pd.DataFrame(rows) + return element_table, doc_ids diff --git a/src/ndi/fun/doc_table/epoch.py b/src/ndi/fun/doc_table/epoch.py new file mode 100644 index 0000000..cfc4c96 --- /dev/null +++ b/src/ndi/fun/doc_table/epoch.py @@ -0,0 +1,10 @@ +def epoch(session): + """ + Generate a table of all 'epoch' documents. + Note: NDI doesn't typically store 'epoch' as documents in the same way as elements, + but sometimes epoch records are stored. + + This function seems to be a placeholder or specific to certain epoch docs. + """ + # Placeholder implementation based on pattern + raise NotImplementedError("ndi.fun.doc_table.epoch not fully ported yet.") diff --git a/src/ndi/fun/doc_table/openminds.py b/src/ndi/fun/doc_table/openminds.py new file mode 100644 index 0000000..5547839 --- /dev/null +++ b/src/ndi/fun/doc_table/openminds.py @@ -0,0 +1,2 @@ +def openminds(session): + raise NotImplementedError("ndi.fun.doc_table.openminds not fully ported yet.") diff --git a/src/ndi/fun/doc_table/probe.py b/src/ndi/fun/doc_table/probe.py new file mode 100644 index 0000000..b03cbaf --- /dev/null +++ b/src/ndi/fun/doc_table/probe.py @@ -0,0 +1,27 @@ +import pandas as pd +from did.query import Query + +def probe(session): + """ + Generate a table of all 'probe' documents. + + Args: + session: ndi.session object. + + Returns: + tuple: (probe_table, doc_ids) + """ + query = Query('', 'isa', 'probe') + docs = session.database_search(query) + + rows = [] + doc_ids = [] + + for doc in docs: + props = doc.document_properties['probe'] + row = props.copy() + rows.append(row) + doc_ids.append(doc.id) + + probe_table = pd.DataFrame(rows) + return probe_table, doc_ids diff --git a/src/ndi/fun/doc_table/subject.py b/src/ndi/fun/doc_table/subject.py new file mode 100644 index 0000000..ed89ee7 --- /dev/null +++ b/src/ndi/fun/doc_table/subject.py @@ -0,0 +1,31 @@ +import pandas as pd +from did.query import Query + +def subject(session): + """ + Generate a table of all 'subject' documents. + + Args: + session: ndi.session object. + + Returns: + tuple: (subject_table, doc_ids) + """ + query = Query('', 'isa', 'subject') + docs = session.database_search(query) + + rows = [] + doc_ids = [] + + for doc in docs: + props = doc.document_properties['subject'] + row = props.copy() + + # Add ID for convenience if not present + row['subject_id'] = doc.id + + rows.append(row) + doc_ids.append(doc.id) + + subject_table = pd.DataFrame(rows) + return subject_table, doc_ids diff --git a/src/ndi/fun/doc_table/treatment.py b/src/ndi/fun/doc_table/treatment.py new file mode 100644 index 0000000..9111c37 --- /dev/null +++ b/src/ndi/fun/doc_table/treatment.py @@ -0,0 +1,5 @@ +def treatment(session, depends_on='subject_id', error_if_empty=False, depends_on_docs=None, hide_mixture_table=True): + """ + Generate a table of all treatment documents. + """ + raise NotImplementedError("ndi.fun.doc_table.treatment not fully ported yet.") diff --git a/src/ndi/fun/epoch/__init__.py b/src/ndi/fun/epoch/__init__.py new file mode 100644 index 0000000..4939fbe --- /dev/null +++ b/src/ndi/fun/epoch/__init__.py @@ -0,0 +1,2 @@ +from .epoch_id_to_element import epoch_id_to_element +from .filename_to_epoch_id import filename_to_epoch_id diff --git a/src/ndi/fun/epoch/epoch_id_to_element.py b/src/ndi/fun/epoch/epoch_id_to_element.py new file mode 100644 index 0000000..5bb3c91 --- /dev/null +++ b/src/ndi/fun/epoch/epoch_id_to_element.py @@ -0,0 +1,61 @@ +def epoch_id_to_element(session, epoch_id, name='', type=''): + """ + Find an NDI element given an epochid. + + Args: + session: An NDI session object. + epoch_id (str or list of str): The unique identifier(s) of the epoch(s) to find. + name (str): Optional. Restricts search to elements with this name. + type (str): Optional. Restricts search to elements of this type. + + Returns: + list: A list of NDI element object(s) associated that contain the epoch(s). + """ + if isinstance(epoch_id, str): + epoch_id = [epoch_id] + + # Get elements from the session + if hasattr(session, 'get_elements'): + elements = session.get_elements() + elif hasattr(session, 'getelements'): + elements = session.getelements() + else: + raise TypeError("Session object must have get_elements method.") + + # Filter elements + if name: + elements = [e for e in elements if getattr(e, 'name', '') == name] + if type: + elements = [e for e in elements if getattr(e, 'type', '') == type] + + result_elements = [None] * len(epoch_id) + + for elem in elements: + et = getattr(elem, 'epoch_table', None) + if hasattr(elem, 'epochtable'): # fallback + et = elem.epochtable + + if et is None: + continue + + # Iterate through epoch table + # Assuming et is a list-like of dicts or objects with epoch_id + try: + iterator = iter(et) + except TypeError: + continue + + for e in iterator: + e_id = None + if isinstance(e, dict): + e_id = e.get('epoch_id') + else: + e_id = getattr(e, 'epoch_id', None) + + if e_id: + # Check if this e_id matches any in our list (case insensitive) + for i, search_id in enumerate(epoch_id): + if str(search_id).lower() == str(e_id).lower(): + result_elements[i] = elem + + return result_elements diff --git a/src/ndi/fun/epoch/filename_to_epoch_id.py b/src/ndi/fun/epoch/filename_to_epoch_id.py new file mode 100644 index 0000000..b82aef8 --- /dev/null +++ b/src/ndi/fun/epoch/filename_to_epoch_id.py @@ -0,0 +1,65 @@ +def filename_to_epoch_id(session, filename): + """ + Finds the epochid associated with a given filename. + """ + if isinstance(filename, str): + filename = [filename] + + try: + if hasattr(session, 'daq_system_load'): + devs = session.daq_system_load() + elif hasattr(session, 'daqsystem_load'): + devs = session.daqsystem_load() + else: + raise TypeError("Session object must have daq_system_load method.") + except AttributeError: + # Fallback or error + raise TypeError("Session object must have daq_system_load method.") + + if not isinstance(devs, list): + devs = [devs] + + result_epoch_ids = [None] * len(filename) + + for dev in devs: + et = getattr(dev, 'epoch_table', None) + if et is None and hasattr(dev, 'epochtable'): + et = dev.epochtable + + if et is None: + continue + + try: + iterator = iter(et) + except TypeError: + continue + + for e in iterator: + # underlying_epochs.underlying + underlying = None + + underlying_epochs = None + if isinstance(e, dict): + underlying_epochs = e.get('underlying_epochs') + else: + underlying_epochs = getattr(e, 'underlying_epochs', None) + + if underlying_epochs: + if isinstance(underlying_epochs, dict): + underlying = underlying_epochs.get('underlying') + else: + underlying = getattr(underlying_epochs, 'underlying', None) + + if underlying: + if isinstance(underlying, str): + underlying = [underlying] + + # Check matches + for i, fname in enumerate(filename): + for u_file in underlying: + # Case sensitive? Matlab: contains + if fname in u_file: + e_id = e.get('epoch_id') if isinstance(e, dict) else getattr(e, 'epoch_id', None) + result_epoch_ids[i] = e_id + + return result_epoch_ids diff --git a/src/ndi/fun/err_log.py b/src/ndi/fun/err_log.py new file mode 100644 index 0000000..8bb8c8f --- /dev/null +++ b/src/ndi/fun/err_log.py @@ -0,0 +1 @@ +def err_log(): raise NotImplementedError("This function depends on ndi.common.getLogger which is not yet ported.") diff --git a/src/ndi/fun/file/__init__.py b/src/ndi/fun/file/__init__.py new file mode 100644 index 0000000..32dc204 --- /dev/null +++ b/src/ndi/fun/file/__init__.py @@ -0,0 +1,3 @@ +from .md5 import md5 +from .date_created import date_created +from .date_updated import date_updated diff --git a/src/ndi/fun/file/date_created.py b/src/ndi/fun/file/date_created.py new file mode 100644 index 0000000..52ff27c --- /dev/null +++ b/src/ndi/fun/file/date_created.py @@ -0,0 +1,56 @@ +import os +import platform +import datetime +import subprocess + +def date_created(file_path): + """ + Gets the creation date of a file. + + Args: + file_path (str): The path to the file. + + Returns: + datetime.datetime: The creation time. Returns None if it cannot be determined. + """ + if not os.path.exists(file_path): + return None + + system = platform.system() + + if system == 'Windows': + return datetime.datetime.fromtimestamp(os.path.getctime(file_path)) + elif system == 'Darwin': # macOS + stat = os.stat(file_path) + try: + return datetime.datetime.fromtimestamp(stat.st_birthtime) + except AttributeError: + return datetime.datetime.fromtimestamp(stat.st_mtime) + else: # Linux/Unix + stat = os.stat(file_path) + try: + return datetime.datetime.fromtimestamp(stat.st_birthtime) + except AttributeError: + # Try stat command as fallback for Linux filesystems that support it but Python os.stat doesn't expose it well or older Python + try: + result = subprocess.check_output(['stat', '-c', '%w', file_path], stderr=subprocess.DEVNULL).decode().strip() + if result == '-': + return datetime.datetime.fromtimestamp(stat.st_ctime) + else: + # result format: 2021-01-01 10:00:00.123456789 -0500 + # We can use split to get date and time + parts = result.split(' ') + date_str = parts[0] + time_str = parts[1] + # Python's fromisoformat might not handle high precision or timezone well + # For simplicity, let's just use the first 26 chars which covers microseconds + dt_str = f"{date_str} {time_str}" + # Truncate fractional seconds to 6 digits (microseconds) if needed + if '.' in time_str: + t_parts = time_str.split('.') + if len(t_parts[1]) > 6: + time_str = t_parts[0] + '.' + t_parts[1][:6] + + return datetime.datetime.fromisoformat(f"{date_str} {time_str}") + except Exception: + return datetime.datetime.fromtimestamp(stat.st_ctime) diff --git a/src/ndi/fun/file/date_updated.py b/src/ndi/fun/file/date_updated.py new file mode 100644 index 0000000..075764c --- /dev/null +++ b/src/ndi/fun/file/date_updated.py @@ -0,0 +1,17 @@ +import os +import datetime + +def date_updated(file_path): + """ + Gets the last modification date of a file. + + Args: + file_path (str): The path to the file. + + Returns: + datetime.datetime: The last modification time. Returns None if it cannot be determined. + """ + if not os.path.exists(file_path): + return None + + return datetime.datetime.fromtimestamp(os.path.getmtime(file_path)) diff --git a/src/ndi/fun/file/md5.py b/src/ndi/fun/file/md5.py new file mode 100644 index 0000000..ff38bcb --- /dev/null +++ b/src/ndi/fun/file/md5.py @@ -0,0 +1,24 @@ +import hashlib +import os + +def md5(file_path): + """ + Calculates the MD5 checksum of a file. + + Args: + file_path (str): The path to the file. + + Returns: + str: The 32-character hexadecimal MD5 hash. + + Raises: + FileNotFoundError: If the file does not exist. + """ + if not os.path.isfile(file_path): + raise FileNotFoundError(f"File not found: {file_path}") + + hash_md5 = hashlib.md5() + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + hash_md5.update(chunk) + return hash_md5.hexdigest() diff --git a/src/ndi/fun/find_calc_directories.py b/src/ndi/fun/find_calc_directories.py new file mode 100644 index 0000000..97a2da8 --- /dev/null +++ b/src/ndi/fun/find_calc_directories.py @@ -0,0 +1,47 @@ +import os +import glob +from ndi.common.path_constants import PathConstants + +def find_calc_directories(): + """ + Finds all NDI calculator toolbox directories. + + This function scans for installed NDI calculator toolboxes + that follow the naming convention 'ndicalc-*-python'. + + It determines the search path by navigating three directories up from the + main NDI toolbox location. This is necessary because the + toolboxes are typically installed as sibling directories. + + Returns: + list: A list of strings, where each string is the full + path to a found calculator directory. + """ + d = [] + try: + # Navigate three directories up from the NDI toolbox directory + # PathConstants.root_folder() is .../src/ndi + # 1. .../src + # 2. .../ (repo root) + # 3. .../.. (parent of repo root) + + base_path = os.path.dirname(os.path.dirname(os.path.dirname(PathConstants.root_folder()))) + + if not os.path.isdir(base_path): + return [] + + # Define the search pattern for calculator directories + # Using glob to match 'ndicalc*-python' + search_pattern = os.path.join(base_path, 'ndicalc*-python') + + found_dirs = glob.glob(search_pattern) + + # Filter for directories + d = [p for p in found_dirs if os.path.isdir(p)] + + except Exception as e: + # Catch any unexpected errors + print(f"Warning: An error occurred while trying to find calculator directories: {e}") + d = [] + + return d diff --git a/src/ndi/fun/plot/__init__.py b/src/ndi/fun/plot/__init__.py new file mode 100644 index 0000000..7bfc4b0 --- /dev/null +++ b/src/ndi/fun/plot/__init__.py @@ -0,0 +1,2 @@ +from .bar3 import bar3 +from .multichan import multichan diff --git a/src/ndi/fun/plot/bar3.py b/src/ndi/fun/plot/bar3.py new file mode 100644 index 0000000..58afabb --- /dev/null +++ b/src/ndi/fun/plot/bar3.py @@ -0,0 +1,21 @@ +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np + +def bar3(data_table, grouping_variables, plotting_variable): + """ + Creates a 3-way grouped bar chart from table data. + + Args: + data_table (pd.DataFrame): Data. + grouping_variables (list of str): 3 variables to group by. + plotting_variable (str): Variable to plot mean of. + + Returns: + matplotlib.figure.Figure: The figure. + """ + # This requires significant adaptation to matplotlib + # Placeholder implementation + fig = plt.figure() + # ... implementation logic would go here + return fig diff --git a/src/ndi/fun/plot/multichan.py b/src/ndi/fun/plot/multichan.py new file mode 100644 index 0000000..53f8078 --- /dev/null +++ b/src/ndi/fun/plot/multichan.py @@ -0,0 +1,28 @@ +import matplotlib.pyplot as plt + +def multichan(data, t, space): + """ + Plots multiple channels. + + Args: + data (np.ndarray): NUMSAMPLES x NUMCHANNELS matrix. + t (np.ndarray): Time vector. + space (float): Spacing between channels. + + Returns: + list: List of plot handles. + """ + handles = [] + + # Check dimensions + # Assuming data is (samples, channels) + if data.ndim != 2: + raise ValueError("Data must be 2D array") + + num_channels = data.shape[1] + + for i in range(num_channels): + h = plt.plot(t, i * space + data[:, i], color=[0.7, 0.7, 0.7]) + handles.append(h) + + return handles diff --git a/src/ndi/fun/probe/__init__.py b/src/ndi/fun/probe/__init__.py new file mode 100644 index 0000000..9f1a2c0 --- /dev/null +++ b/src/ndi/fun/probe/__init__.py @@ -0,0 +1 @@ +from .location import location diff --git a/src/ndi/fun/probe/location.py b/src/ndi/fun/probe/location.py new file mode 100644 index 0000000..c660afe --- /dev/null +++ b/src/ndi/fun/probe/location.py @@ -0,0 +1,64 @@ +from did.query import Query + +def location(S, e): + """ + Return probe location documents and probe object for an NDI element. + + Args: + S: ndi.session or ndi.dataset. + e: ndi.element object or ID string. + + Returns: + tuple: (probe_locations, probe_obj) + probe_locations (list): List of probe_location documents. + probe_obj (ndi.probe.Probe): The probe object. + """ + # Step 1: get the element object if it's an identifier + if isinstance(e, str): + q = Query('base.id', 'exact_string', e) + element_docs = S.database_search(q) + if not element_docs: + raise ValueError(f"Could not find an element with id {e}.") + + # We need a way to convert doc to object. + # This is typically done via S.query or similar. + # For now, let's assume we can get it from the doc wrapper or user provided obj + # But wait, python port might not have full element wrapper logic yet. + raise NotImplementedError("Converting doc ID to element object not fully supported yet.") + + # Step 2: traverse down to the probe + current_element = e + + # Check if current_element is a Probe (requires importing Probe class or checking type string) + # Python doesn't have `isa` in the same way, need isinstance. + # But circular imports might prevent importing Probe here. + # We can check class name string or use duck typing. + + def is_probe(obj): + # Basic check + return getattr(obj, 'is_probe', False) or 'Probe' in obj.__class__.__name__ + + while not is_probe(current_element): + if hasattr(current_element, 'underlying_element'): + current_element = current_element.underlying_element + if current_element is None: + break + else: + break + + # Step 3: we have the probe, assign output + probe_obj = current_element + probe_locations = [] + + if probe_obj is None or not is_probe(probe_obj): + return probe_locations, probe_obj + + probe_identifier = probe_obj.id + + # Step 4: query for the locations + q1 = Query('', 'depends_on', 'probe_id', probe_identifier) + q2 = Query('', 'isa', 'probe_location') + + probe_locations = S.database_search(q1 & q2) + + return probe_locations, probe_obj diff --git a/src/ndi/fun/pseudorandomint.py b/src/ndi/fun/pseudorandomint.py new file mode 100644 index 0000000..0036a3d --- /dev/null +++ b/src/ndi/fun/pseudorandomint.py @@ -0,0 +1,40 @@ +import datetime +import random +import math + +def pseudorandomint(): + """ + Generates a random integer based on the date and time and a random number. + + Generates a pseudorandom integer that is linked to the current date/time. + Generates 1000 possible numbers for each second. The portion of the + number greater than 1000 is deterministic based on the date. + + Example: + t = pseudorandomint() + """ + now = datetime.datetime.now(datetime.timezone.utc) + # Using UTC to avoid timezone issues, although Matlab's 'now' is local time usually. + # But for a "pseudorandomint", consistency matters less than uniqueness. + # Wait, if we want to mimic Matlab exactly, we should use local time. + # But Python's datetime.now() returns local time if no tz is given. + # Let's stick to UTC for better reproducibility across systems if needed, or local if it doesn't matter. + # The Matlab comment says "linked to the current date/time". + + reference_date = datetime.datetime(2022, 6, 1, tzinfo=datetime.timezone.utc) + + # Calculate difference + diff = now - reference_date + + # Total seconds + total_seconds = diff.total_seconds() + + # Truncate to integer seconds + t_offset = int(total_seconds) + + # Random number between 0 and 999 + rand_val = random.randint(0, 999) + + t = t_offset * 1000 + rand_val + + return t diff --git a/src/ndi/fun/session/__init__.py b/src/ndi/fun/session/__init__.py new file mode 100644 index 0000000..9c2ffef --- /dev/null +++ b/src/ndi/fun/session/__init__.py @@ -0,0 +1 @@ +from .diff import diff diff --git a/src/ndi/fun/session/diff.py b/src/ndi/fun/session/diff.py new file mode 100644 index 0000000..12c5141 --- /dev/null +++ b/src/ndi/fun/session/diff.py @@ -0,0 +1,69 @@ +import warnings +from ndi.fun.doc.diff import diff as doc_diff +from did.query import Query + +def diff(session1, session2, verbose=True, recheck_file_report=None): + """ + Compares two NDI sessions. + + Args: + session1, session2: ndi.session objects. + verbose (bool): Print progress. + recheck_file_report (dict): Previous report to recheck specific files. + + Returns: + dict: A report structure detailing differences. + """ + report = { + 'documentsInAOnly': set(), + 'documentsInBOnly': set(), + 'mismatchedDocuments': [], + 'fileDifferences': [] + } + + if recheck_file_report: + # Recheck logic not fully implemented in this port step + if verbose: + print("Re-checking file differences... (Not fully implemented)") + return report + + # Fetch all documents + q = Query('base.id', 'regexp', '(.*)') + d1_docs = session1.database_search(q) + d2_docs = session2.database_search(q) + + d1_map = {d.id: d for d in d1_docs} + d2_map = {d.id: d for d in d2_docs} + + d1_ids = set(d1_map.keys()) + d2_ids = set(d2_map.keys()) + + report['documentsInAOnly'] = d1_ids - d2_ids + report['documentsInBOnly'] = d2_ids - d1_ids + + common_ids = d1_ids.intersection(d2_ids) + + if verbose: + print(f"Found {len(d1_ids)} docs in session1 and {len(d2_ids)} docs in session2.") + print(f"Comparing {len(common_ids)} common documents...") + + for i, doc_id in enumerate(common_ids): + if verbose and (i + 1) % 500 == 0: + print(f"...examined {i + 1} documents...") + + doc1 = d1_map[doc_id] + doc2 = d2_map[doc_id] + + are_equal, diff_report = doc_diff(doc1, doc2, ignore_fields=['base.session_id'], check_file_list=True) + + if not are_equal: + report['mismatchedDocuments'].append({ + 'id': doc_id, + 'mismatch': ' '.join(diff_report['details']) + }) + + # File comparison logic + # Note: opening binary docs requires valid file paths and implementation in session/database + # Simplified for now + + return report diff --git a/src/ndi/fun/stimulus/__init__.py b/src/ndi/fun/stimulus/__init__.py new file mode 100644 index 0000000..86754e4 --- /dev/null +++ b/src/ndi/fun/stimulus/__init__.py @@ -0,0 +1,3 @@ +from .f0_f1_responses import f0_f1_responses +from .find_mixture_name import find_mixture_name +from .tuning_curve_to_response_type import tuning_curve_to_response_type diff --git a/src/ndi/fun/stimulus/f0_f1_responses.py b/src/ndi/fun/stimulus/f0_f1_responses.py new file mode 100644 index 0000000..79c1255 --- /dev/null +++ b/src/ndi/fun/stimulus/f0_f1_responses.py @@ -0,0 +1,8 @@ +def f0_f1_responses(S, doc, response_type='mean', response_index=None): + """ + Finds F0 and F1 responses for a given tuning curve or dependent document. + + This function logic is complex and involves multiple database queries and dependency checks. + It requires `ndi.app.stimulus.tuning_response` which is NOT ported yet. + """ + raise NotImplementedError("This function depends on ndi.app.stimulus.tuning_response which is not yet ported.") diff --git a/src/ndi/fun/stimulus/find_mixture_name.py b/src/ndi/fun/stimulus/find_mixture_name.py new file mode 100644 index 0000000..b81c4ea --- /dev/null +++ b/src/ndi/fun/stimulus/find_mixture_name.py @@ -0,0 +1,61 @@ +import json +import os +import pandas as pd + +def find_mixture_name(mixture_dictionary_path, mixture): + """ + Identifies matching mixture names from a dictionary. + + Args: + mixture_dictionary_path (str): Path to JSON dictionary. + mixture (dict or list of dict or pd.DataFrame): Mixture to match. + + Returns: + list: Matching names. + """ + if not os.path.isfile(mixture_dictionary_path): + raise FileNotFoundError(f"File not found: {mixture_dictionary_path}") + + with open(mixture_dictionary_path, 'r') as f: + mixture_dictionary = json.load(f) + + mixture_names = list(mixture_dictionary.keys()) + + if isinstance(mixture, pd.DataFrame): + mixture = mixture.to_dict('records') + + if isinstance(mixture, dict): + mixture = [mixture] + + matching_names = [] + + for name in mixture_names: + current_dict_entry = mixture_dictionary[name] + + if isinstance(current_dict_entry, dict): + current_dict_entry = [current_dict_entry] + + # Check if all elements in dictionary entry are present in mixture + entry_match = True + for dict_elem in current_dict_entry: + # Must find a match in mixture list + found = False + for mix_elem in mixture: + # Compare fields + # keys: ontologyName, name, value, ontologyUnit, unitName + # Note: value might be float, need tolerance? Matlab uses eq (==) + if (dict_elem.get('ontologyName') == mix_elem.get('ontologyName') and + dict_elem.get('name') == mix_elem.get('name') and + dict_elem.get('value') == mix_elem.get('value') and + dict_elem.get('ontologyUnit') == mix_elem.get('ontologyUnit') and + dict_elem.get('unitName') == mix_elem.get('unitName')): + found = True + break + if not found: + entry_match = False + break + + if entry_match: + matching_names.append(name) + + return matching_names diff --git a/src/ndi/fun/stimulus/tuning_curve_to_response_type.py b/src/ndi/fun/stimulus/tuning_curve_to_response_type.py new file mode 100644 index 0000000..21f232c --- /dev/null +++ b/src/ndi/fun/stimulus/tuning_curve_to_response_type.py @@ -0,0 +1,42 @@ +from did.query import Query + +def tuning_curve_to_response_type(S, doc): + """ + Get the response type ('mean', 'F1', etc) of a tuning curve document. + + Args: + S: ndi.session object. + doc: ndi.document object. + + Returns: + tuple: (response_type, stim_response_scalar_doc) + """ + response_type = '' + stim_response_scalar_doc = None + + dependency_list_to_check = ['stimulus_response_scalar_id', 'stimulus_tuningcurve_id'] + dependency_action = ['finish', 'recursive'] + + for i, dep_name in enumerate(dependency_list_to_check): + d = doc.dependency_value(dep_name, error_if_not_found=False) + if d: + q_doc = Query('base.id', 'exact_string', d) + newdoc = S.database_search(q_doc) + if len(newdoc) != 1: + raise RuntimeError(f"Could not find dependent doc {d}.") + + action = dependency_action[i] + if action == 'recursive': + return tuning_curve_to_response_type(S, newdoc[0]) + elif action == 'finish': + try: + props = newdoc[0].document_properties['stimulus_response_scalar'] + response_type = props['response_type'] + stim_response_scalar_doc = newdoc[0] + return response_type, stim_response_scalar_doc + except KeyError: + raise RuntimeError("Could not find field 'response_type' in document.") + else: + raise ValueError("Unknown action type") + + return response_type, stim_response_scalar_doc diff --git a/src/ndi/fun/stimulus_temporal_frequency.py b/src/ndi/fun/stimulus_temporal_frequency.py new file mode 100644 index 0000000..c44b5de --- /dev/null +++ b/src/ndi/fun/stimulus_temporal_frequency.py @@ -0,0 +1,91 @@ +import json +import os +from ndi.common.path_constants import PathConstants + +def stimulus_temporal_frequency(stimulus_parameters): + """ + Calculates the temporal frequency of a visual stimulus. + + TF_VALUE, TF_NAME = STIMULUSTEMPORALFREQUENCY(STIMULUS_PARAMETERS) + + This function attempts to determine the temporal frequency (in Hz) of a + visual stimulus by examining its parameters (`STIMULUS_PARAMETERS`) and + applying rules defined in an external JSON configuration file + (`ndi_stimulusparameters2temporalfrequency.json`). + + If no known temporal frequency parameter field name is found within + STIMULUS_PARAMETERS after checking all rules, TF_VALUE is returned as None + and TF_NAME is returned as an empty string (`''`). + + Args: + stimulus_parameters (dict): A dictionary where each key represents a parameter of the stimulus. + + Returns: + tuple: (tf_value, tf_name) + tf_value (float or None): The calculated temporal frequency, typically in Hz. + tf_name (str): The field name in STIMULUS_PARAMETERS from which TF_VALUE was derived. + """ + tf_value = None + tf_name = '' + + # Construct file path safely + json_file_path = os.path.join(PathConstants.common_folder(), 'stimulus', 'ndi_stimulusparameters2temporalfrequency.json') + + # Check if JSON file exists before trying to read + if not os.path.exists(json_file_path): + raise FileNotFoundError(f"JSON configuration file not found at: {json_file_path}") + + try: + with open(json_file_path, 'r') as f: + ndi_stim_tf_info = json.load(f) + except Exception as e: + raise RuntimeError(f"Error reading or decoding JSON file: {json_file_path}\n{e}") + + for rule in ndi_stim_tf_info: + # Check if the current rule object has the necessary fields + required_fields = {'parameter_name', 'temporalFrequencyMultiplier', 'temporalFrequencyAdder', 'isPeriod', 'parameterMultiplier'} + if not all(field in rule for field in required_fields): + raise ValueError(f"JSON entry is missing required fields: {required_fields}") + + current_param_name = rule['parameter_name'] + + # Check if the parameter exists in the input structure + if current_param_name in stimulus_parameters: + # Process the match + try: # Catch calculation errors for this specific rule + original_value = stimulus_parameters[current_param_name] + + # Ensure value is numeric before calculations + if not isinstance(original_value, (int, float)): + raise ValueError(f"Parameter '{current_param_name}' must have a numeric scalar value.") + + tf_value = rule['temporalFrequencyAdder'] + rule['temporalFrequencyMultiplier'] * original_value + + if rule['isPeriod']: + # Add check for zero to avoid division by zero error + if tf_value == 0: + raise ValueError(f"Temporal period value for parameter '{current_param_name}' results in zero after transformation; cannot divide by zero.") + tf_value = 1.0 / tf_value + + if rule['parameterMultiplier']: + multiplier_param_name = rule['parameterMultiplier'] + # Check if the multiplier parameter exists + if multiplier_param_name in stimulus_parameters: + tf_mult_parm_value = stimulus_parameters[multiplier_param_name] + + # Ensure multiplier value is numeric and scalar + if not isinstance(tf_mult_parm_value, (int, float)): + raise ValueError(f"Parameter multiplier '{multiplier_param_name}' must have a numeric scalar value.") + else: + tf_value = tf_value * tf_mult_parm_value + else: + raise ValueError(f"Required parameter multiplier field '{multiplier_param_name}' not found in stimulus_parameters.") + + tf_name = current_param_name + return tf_value, tf_name # Return as soon as the first match is successfully processed + + except Exception as e: # Catch errors specific to calculations for THIS rule + # Throw a new error providing context about which rule failed + raise RuntimeError(f"Error during TF calculation for parameter rule '{current_param_name}': {e}") + + return tf_value, tf_name diff --git a/src/ndi/fun/sys_log.py b/src/ndi/fun/sys_log.py new file mode 100644 index 0000000..3500def --- /dev/null +++ b/src/ndi/fun/sys_log.py @@ -0,0 +1 @@ +def sys_log(): raise NotImplementedError("This function depends on ndi.common.getLogger which is not yet ported.") diff --git a/src/ndi/fun/table/__init__.py b/src/ndi/fun/table/__init__.py new file mode 100644 index 0000000..a7086e4 --- /dev/null +++ b/src/ndi/fun/table/__init__.py @@ -0,0 +1,25 @@ +from .vstack import vstack + +def identify_matching_rows(table1, table2): + """ + Identifies rows that match between two tables. + """ + raise NotImplementedError("Not yet ported.") + +def identify_valid_rows(table_in): + """ + Identifies rows that are valid. + """ + raise NotImplementedError("Not yet ported.") + +def join(table1, table2): + """ + Joins two tables. + """ + raise NotImplementedError("Not yet ported.") + +def move_columns_left(table_in, columns): + """ + Moves specified columns to the left. + """ + raise NotImplementedError("Not yet ported.") diff --git a/src/ndi/fun/table/vstack.py b/src/ndi/fun/table/vstack.py new file mode 100644 index 0000000..fcfa1f0 --- /dev/null +++ b/src/ndi/fun/table/vstack.py @@ -0,0 +1,16 @@ +import pandas as pd + +def vstack(tables): + """ + Vertically stack tables (DataFrames), aligning columns. + + Args: + tables (list of pd.DataFrame): List of tables to stack. + + Returns: + pd.DataFrame: Stacked table. + """ + if not tables: + return pd.DataFrame() + + return pd.concat(tables, ignore_index=True) diff --git a/src/ndi/resources/ndi_common/stimulus/ndi_stimulusparameters2temporalfrequency.json b/src/ndi/resources/ndi_common/stimulus/ndi_stimulusparameters2temporalfrequency.json new file mode 100644 index 0000000..dd6a0ac --- /dev/null +++ b/src/ndi/resources/ndi_common/stimulus/ndi_stimulusparameters2temporalfrequency.json @@ -0,0 +1,21 @@ +[ + { + "parameter_name": "tFrequency", + "temporalFrequencyMultiplier": 1, + "temporalFrequencyAdder": 0, + "isPeriod": false, + "parameterMultiplier": "" + }, { + "parameter_name": "temporalFrequency", + "temporalFrequencyMultiplier": 1, + "temporalFrequencyAdder": 0, + "isPeriod": false, + "parameterMultiplier": "" + }, { + "parameter_name": "t_period", + "temporalFrequencyMultiplier": 1, + "temporalFrequencyAdder": 0, + "isPeriod": true, + "parameterMultiplier": "refreshRate" + } +] diff --git a/temp_ndi_matlab b/temp_ndi_matlab new file mode 160000 index 0000000..fc99679 --- /dev/null +++ b/temp_ndi_matlab @@ -0,0 +1 @@ +Subproject commit fc99679a3572881a56c5ae4738bbdaab73fb46fb diff --git a/tests/test_fun.py b/tests/test_fun.py deleted file mode 100644 index 7204d2a..0000000 --- a/tests/test_fun.py +++ /dev/null @@ -1,47 +0,0 @@ -import unittest -from ndi.fun import timestamp -from ndi.fun import name_to_variable_name -from ndi.fun import channel_name_to_prefix_number - -class TestFun(unittest.TestCase): - - def test_timestamp(self): - """ - Tests the timestamp function. - """ - ts = timestamp.timestamp() - self.assertIsInstance(ts, str) - self.assertRegex(ts, r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}') - - def test_name_to_variable_name(self): - """ - Tests the name_to_variable_name function. - """ - self.assertEqual(name_to_variable_name.name_to_variable_name("hello world"), "helloWorld") - self.assertEqual(name_to_variable_name.name_to_variable_name("hello-world"), "helloWorld") - self.assertEqual(name_to_variable_name.name_to_variable_name("hello:world"), "helloWorld") - self.assertEqual(name_to_variable_name.name_to_variable_name("1hello world"), "var_1helloWorld") - self.assertEqual(name_to_variable_name.name_to_variable_name(""), "") - self.assertEqual(name_to_variable_name.name_to_variable_name(" "), "") - - def test_channel_name_to_prefix_number(self): - """ - Tests the channel_name_to_prefix_number function. - """ - prefix, number = channel_name_to_prefix_number.channel_name_to_prefix_number("ai5") - self.assertEqual(prefix, "ai") - self.assertEqual(number, 5) - - prefix, number = channel_name_to_prefix_number.channel_name_to_prefix_number(" din10 ") - self.assertEqual(prefix, "din") - self.assertEqual(number, 10) - - with self.assertRaises(ValueError): - channel_name_to_prefix_number.channel_name_to_prefix_number("no_number") - - with self.assertRaises(ValueError): - channel_name_to_prefix_number.channel_name_to_prefix_number("1starts_with_number") - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/test_fun/__init__.py b/tests/test_fun/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_fun/test_doc.py b/tests/test_fun/test_doc.py new file mode 100644 index 0000000..7cb3fb7 --- /dev/null +++ b/tests/test_fun/test_doc.py @@ -0,0 +1,59 @@ +import unittest +from unittest.mock import MagicMock +from ndi.fun.doc import diff, get_doc_types, all_types + +class TestDoc(unittest.TestCase): + def test_diff(self): + doc1 = MagicMock() + doc1.document_properties = { + 'base': {'session_id': 's1', 'id': 'd1'}, + 'param': 1, + 'depends_on': [{'name': 'dep1', 'value': 'v1'}] + } + + doc2 = MagicMock() + doc2.document_properties = { + 'base': {'session_id': 's2', 'id': 'd1'}, # Same ID + 'param': 1, + 'depends_on': [{'name': 'dep1', 'value': 'v1'}] + } + + # Should be equal (session_id ignored by default, order independent deps) + eq, report = diff(doc1, doc2) + self.assertTrue(eq, f"Report: {report}") + self.assertFalse(report['mismatch']) + + # Unequal param + doc2.document_properties['param'] = 2 + eq, report = diff(doc1, doc2) + self.assertFalse(eq) + self.assertTrue(report['mismatch']) + + # Unequal ID (if not ignored) + doc2.document_properties['param'] = 1 # Reset param + doc2.document_properties['base']['id'] = 'd2' + eq, report = diff(doc1, doc2) + self.assertFalse(eq) + + def test_get_doc_types(self): + session = MagicMock() + doc1 = MagicMock() + doc1.doc_class.return_value = 'TypeA' + doc2 = MagicMock() + doc2.doc_class.return_value = 'TypeB' + doc3 = MagicMock() + doc3.doc_class.return_value = 'TypeA' + + session.database_search.return_value = [doc1, doc2, doc3] + + types, counts = get_doc_types(session) + self.assertEqual(types, ['TypeA', 'TypeB']) + self.assertEqual(counts, [2, 1]) + + def test_all_types(self): + # Smoke test as it accesses file system + types = all_types() + self.assertIsInstance(types, list) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_fun/test_epoch.py b/tests/test_fun/test_epoch.py new file mode 100644 index 0000000..994fe86 --- /dev/null +++ b/tests/test_fun/test_epoch.py @@ -0,0 +1,68 @@ +import unittest +from unittest.mock import MagicMock +from ndi.fun.epoch import epoch_id_to_element, filename_to_epoch_id + +class TestEpoch(unittest.TestCase): + def test_epoch_id_to_element(self): + session = MagicMock() + elem1 = MagicMock() + # Mock objects create attributes on access, so we must be careful. + # We can use spec or configure explicitly. + elem1.epoch_table = None # Ensure this is None if we want to test fallback, or just set it. + elem1.epochtable = [{'epoch_id': 'ep1'}, {'epoch_id': 'ep2'}] + elem1.name = 'element1' + + elem2 = MagicMock() + elem2.epoch_table = None + elem2.epochtable = [{'epoch_id': 'ep3'}] + elem2.name = 'element2' + + session.get_elements.return_value = [elem1, elem2] + session.getelements.return_value = [elem1, elem2] + + # Test finding one epoch + res = epoch_id_to_element(session, 'ep2') + self.assertEqual(len(res), 1) + self.assertEqual(res[0], elem1) + + # Test finding multiple + res = epoch_id_to_element(session, ['ep3', 'ep1']) + self.assertEqual(len(res), 2) + self.assertEqual(res[0], elem2) + self.assertEqual(res[1], elem1) + + # Test not found + res = epoch_id_to_element(session, 'ep99') + self.assertEqual(len(res), 1) + self.assertIsNone(res[0]) + + def test_filename_to_epoch_id(self): + session = MagicMock() + dev = MagicMock() + # Explicitly set epoch_table to use it, avoiding mock magic issues + dev.epoch_table = [ + { + 'epoch_id': 'ep1', + 'underlying_epochs': {'underlying': ['fileA.dat', 'fileB.dat']} + }, + { + 'epoch_id': 'ep2', + 'underlying_epochs': {'underlying': ['fileC.dat']} + } + ] + + session.daq_system_load.return_value = [dev] + session.daqsystem_load.return_value = [dev] + + # Find by filename + res = filename_to_epoch_id(session, 'fileB.dat') + self.assertEqual(res[0], 'ep1') + + res = filename_to_epoch_id(session, 'fileC.dat') + self.assertEqual(res[0], 'ep2') + + res = filename_to_epoch_id(session, 'fileZ.dat') + self.assertIsNone(res[0]) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_fun/test_fun_basics.py b/tests/test_fun/test_fun_basics.py new file mode 100644 index 0000000..574effa --- /dev/null +++ b/tests/test_fun/test_fun_basics.py @@ -0,0 +1,67 @@ +import unittest +import os +import shutil +import tempfile +import numpy as np +import datetime +from ndi.fun import pseudorandomint, stimulus_temporal_frequency, channel_name_to_prefix_number, name_to_variable_name, timestamp, find_calc_directories +from ndi.fun.file import md5, date_created, date_updated +from ndi.fun.data import read_ngrid, write_ngrid +from ndi.fun.doc import all_types +from ndi.common.path_constants import PathConstants + +class TestNdiFun(unittest.TestCase): + def test_pseudorandomint(self): + val = pseudorandomint() + self.assertIsInstance(val, int) + + def test_channel_name_to_prefix_number(self): + prefix, number = channel_name_to_prefix_number("Channel 1") + self.assertEqual(prefix, "Channel") + self.assertEqual(number, 1) + + def test_name_to_variable_name(self): + self.assertEqual(name_to_variable_name("My Variable"), "myVariable") + + def test_timestamp(self): + ts = timestamp() + self.assertIsInstance(ts, str) + + def test_md5(self): + with tempfile.NamedTemporaryFile(delete=False) as f: + f.write(b"hello world") + fname = f.name + try: + checksum = md5(fname) + self.assertEqual(checksum, "5eb63bbbe01eeed093cb22bb8f5acdc3") + + dc = date_created(fname) + self.assertIsInstance(dc, datetime.datetime) + + du = date_updated(fname) + self.assertIsInstance(du, datetime.datetime) + finally: + os.remove(fname) + + def test_ngrid(self): + with tempfile.NamedTemporaryFile(delete=False) as f: + fname = f.name + try: + data = np.random.rand(5, 5) + write_ngrid(data, fname) + read_data = read_ngrid(fname, [5, 5]) + np.testing.assert_array_almost_equal(data, read_data) + finally: + os.remove(fname) + + def test_all_types(self): + # Just check it runs + types = all_types() + self.assertIsInstance(types, list) + + def test_find_calc_directories(self): + dirs = find_calc_directories() + self.assertIsInstance(dirs, list) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_fun/test_stimulus.py b/tests/test_fun/test_stimulus.py new file mode 100644 index 0000000..0442393 --- /dev/null +++ b/tests/test_fun/test_stimulus.py @@ -0,0 +1,33 @@ +import unittest +from ndi.fun.stimulus_temporal_frequency import stimulus_temporal_frequency + +class TestStimulus(unittest.TestCase): + def test_stimulus_temporal_frequency(self): + # Case 1: Direct frequency + params1 = {'tFrequency': 8, 'spatialFreq': 0.1} + tf1, name1 = stimulus_temporal_frequency(params1) + self.assertEqual(tf1, 8) + self.assertEqual(name1, 'tFrequency') + + # Case 2: Another name + params2 = {'temporalFrequency': 10} + tf2, name2 = stimulus_temporal_frequency(params2) + self.assertEqual(tf2, 10) + self.assertEqual(name2, 'temporalFrequency') + + # Case 3: Period with multiplier + # t_period -> 1/t_period * refreshRate + # if t_period=10, refreshRate=60, tf = (1/10)*60 = 6 + params3 = {'t_period': 10, 'refreshRate': 60} + tf3, name3 = stimulus_temporal_frequency(params3) + self.assertEqual(tf3, 6.0) + self.assertEqual(name3, 't_period') + + # Case 4: No match + params4 = {'contrast': 1.0} + tf4, name4 = stimulus_temporal_frequency(params4) + self.assertIsNone(tf4) + self.assertEqual(name4, '') + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_fun/test_table.py b/tests/test_fun/test_table.py new file mode 100644 index 0000000..66276db --- /dev/null +++ b/tests/test_fun/test_table.py @@ -0,0 +1,34 @@ +import unittest +import pandas as pd +import numpy as np +from ndi.fun.table import vstack + +class TestTable(unittest.TestCase): + def test_vstack(self): + # Example 1: Basic concatenation + df1 = pd.DataFrame({'ID': [1, 2], 'Data': ['a', 'b']}) + df2 = pd.DataFrame({'ID': [3, 4], 'Value': [10.5, 20.6]}) + + # vstack should align columns + stacked = vstack([df1, df2]) + + self.assertEqual(len(stacked), 4) + self.assertIn('ID', stacked.columns) + self.assertIn('Data', stacked.columns) + self.assertIn('Value', stacked.columns) + + # Check values + self.assertEqual(stacked.iloc[0]['Data'], 'a') + self.assertTrue(np.isnan(stacked.iloc[2]['Data']) or stacked.iloc[2]['Data'] is None) # Data is missing for 2nd table + self.assertTrue(np.isnan(stacked.iloc[0]['Value'])) # Value is missing for 1st table + + def test_vstack_empty(self): + df1 = pd.DataFrame({'A': [1]}) + res = vstack([df1]) + self.assertEqual(len(res), 1) + + res = vstack([]) + self.assertTrue(res.empty) + +if __name__ == '__main__': + unittest.main()