diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..1e88f1ba8 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,19 @@ +# Force LF line endings for WSL and cross-platform (Linux/Mac/GitHub Actions) compatibility +# This prevents obscure bugs when running Windows-edited scripts on Linux runners. +* text=auto eol=lf + +*.py text eol=lf +*.rst text eol=lf +*.md text eol=lf +*.yml text eol=lf +*.yaml text eol=lf +*.cfg text eol=lf +*.toml text eol=lf +*.ipynb text eol=lf + +*.png binary +*.jpg binary +*.jpeg binary +*.gif binary +*.ico binary +*.pdf binary diff --git a/.github/actions/setup-conda-env/action.yml b/.github/actions/setup-conda-env/action.yml new file mode 100644 index 000000000..4867fd1b0 --- /dev/null +++ b/.github/actions/setup-conda-env/action.yml @@ -0,0 +1,32 @@ +name: Set up conda env +description: > + Install Miniconda and create/activate an EEG-ExPy conda environment from + the given env yml. Shared by the Test and Typecheck jobs so the two + don't drift apart. Environment name is not set in the yml files so local + installs can use any name they like. + +inputs: + environment-file: + required: true + description: Path to the conda environment yml file to install from. + activate-environment: + required: true + description: Name to give the created environment. + python-version: + required: false + description: > + Python version to pin (e.g. '3.8'). Overrides the version conda would + otherwise resolve from the environment file's constraints. When omitted, + conda resolves freely within the environment file's range. + +runs: + using: composite + steps: + - uses: conda-incubator/setup-miniconda@v3 + with: + environment-file: ${{ inputs.environment-file }} + activate-environment: ${{ inputs.activate-environment }} + python-version: ${{ inputs.python-version }} + auto-activate-base: false + channels: conda-forge + miniconda-version: "latest" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 5f8953695..15afd487b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,29 +9,20 @@ on: jobs: build: runs-on: ubuntu-22.04 + defaults: + run: + shell: bash -el {0} steps: - name: Checkout repo uses: actions/checkout@v3 with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: 3.8 - - - name: Install dependencies - run: | - make install-deps-apt - python -m pip install --upgrade pip wheel - python -m pip install attrdict - - make install-deps-wxpython - - - name: Build project - run: | - make install-docs-build-dependencies + fetch-depth: 0 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env + with: + environment-file: environments/eeg-expy-docsbuild.yml + activate-environment: eeg-expy-docsbuild - name: Get list of changed files id: changes @@ -40,7 +31,6 @@ jobs: git diff --name-only origin/master...HEAD > changed_files.txt cat changed_files.txt - - name: Determine build mode id: mode run: | @@ -48,13 +38,13 @@ jobs: echo "FULL_BUILD=true" >> $GITHUB_ENV echo "Detected non-example file change. Full build triggered." else - CHANGED_EXAMPLES=$(grep '^examples/.*\.py$' changed_files.txt | paste -sd '|' -) + # || true prevents grep's exit code 1 (no matches) from aborting the step + CHANGED_EXAMPLES=$(grep '^examples/.*\.py$' changed_files.txt | paste -sd '|' - || true) echo "FULL_BUILD=false" >> $GITHUB_ENV echo "CHANGED_EXAMPLES=$CHANGED_EXAMPLES" >> $GITHUB_ENV echo "Changed examples: $CHANGED_EXAMPLES" fi - - name: Cache built documentation id: cache-docs uses: actions/cache@v4 @@ -65,12 +55,9 @@ jobs: restore-keys: | ${{ runner.os }}-sphinx- - - name: Build docs - run: | - make docs + run: make docs - - name: Deploy Docs uses: peaceiris/actions-gh-pages@v3 if: github.ref == 'refs/heads/master' # TODO: Deploy seperate develop-version of docs? diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5b4f3c6aa..6afc1f6ee 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -13,15 +13,33 @@ jobs: defaults: run: shell: bash -el {0} + continue-on-error: ${{ matrix.experimental == true }} strategy: fail-fast: false matrix: - os: ['ubuntu-22.04', windows-latest, macOS-latest] - python_version: ['3.8'] + os: [ubuntu-22.04, windows-latest, macOS-latest] + python_version: ['3.10'] + env_file: [environments/eeg-expy-full.yml] + env_name: [eeg-expy-full] include: - # PsychoPy currently restricted to <= 3.10 + # Experimental Full Build: Catch regressions on 3.11 early - os: ubuntu-22.04 - python_version: '3.10' + python_version: '3.11' + experimental: true + env_file: environments/eeg-expy-full.yml + env_name: eeg-expy-full + + # Experimental Streaming Builds: Verify acquisition/analysis on 3.12+ + - os: ubuntu-22.04 + python_version: '3.12' + experimental: true + env_file: environments/eeg-expy-streaming.yml + env_name: eeg-expy-streaming + - os: ubuntu-22.04 + python_version: '3.13' + experimental: true + env_file: environments/eeg-expy-streaming.yml + env_name: eeg-expy-streaming steps: - uses: actions/checkout@v2 @@ -29,22 +47,13 @@ jobs: if: "startsWith(runner.os, 'Linux')" run: | make install-deps-apt - - name: Install conda - uses: conda-incubator/setup-miniconda@v3 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env with: - environment-file: environments/eeg-expy-full.yml - auto-activate-base: false + environment-file: ${{ matrix.env_file }} + activate-environment: ${{ matrix.env_name }} python-version: ${{ matrix.python_version }} - activate-environment: eeg-expy-full - channels: conda-forge - miniconda-version: "latest" - - - name: Fix PsychXR numpy dependency DLL issues (Windows only) - if: matrix.os == 'windows-latest' - run: | - conda install --force-reinstall numpy - - - name: Run eegnb install test + - name: Run eeg-expy install test run: | if [ "$RUNNER_OS" == "Linux" ]; then Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log & @@ -58,7 +67,7 @@ jobs: Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log & export DISPLAY=:0 fi - make test PYTEST_ARGS="--ignore=tests/test_run_experiments.py" + make test typecheck: @@ -71,19 +80,16 @@ jobs: fail-fast: false matrix: os: ['ubuntu-22.04'] - python_version: [3.9] + python_version: ['3.10'] steps: - uses: actions/checkout@v2 - - name: Install conda - uses: conda-incubator/setup-miniconda@v3 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env with: environment-file: environments/eeg-expy-full.yml - auto-activate-base: false - python-version: ${{ matrix.python_version }} activate-environment: eeg-expy-full - channels: conda-forge - miniconda-version: "latest" + python-version: ${{ matrix.python_version }} - name: Typecheck run: | make typecheck diff --git a/.gitignore b/.gitignore index 8ce78c6c7..f2ca75a7e 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,8 @@ __pycache__ # Built as part of docs doc/auto_examples doc/_build +doc/generated/ +doc/sg_execution_times.rst # Built by auto_examples examples/visual_cueing/*.csv diff --git a/conftest.py b/conftest.py new file mode 100644 index 000000000..649b2f0ef --- /dev/null +++ b/conftest.py @@ -0,0 +1,19 @@ +import importlib.util + + +def _is_available(module_name: str) -> bool: + try: + return importlib.util.find_spec(module_name) is not None + except (ImportError, ValueError): + return False + + +collect_ignore: list[str] = [] + +if not _is_available("psychopy"): + collect_ignore += [ + "eegnb/experiments", + "eegnb/devices/vr.py", + ] +elif not _is_available("psychxr"): + collect_ignore += ["eegnb/devices/vr.py"] diff --git a/doc/conf.py b/doc/conf.py index c449ea6fc..4982fc9ff 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -255,9 +255,9 @@ def setup(app): # Configurations for sphinx gallery -sphinx_gallery_conf = {'filename_pattern': '(?=.*r__)(?=.*.py)', - 'examples_dirs': ['../examples','../examples/visual_n170', '../examples/visual_p300','../examples/visual_ssvep', '../examples/visual_cueing', '../examples/visual_gonogo'], - 'gallery_dirs': ['auto_examples','auto_examples/visual_n170', 'auto_examples/visual_p300','auto_examples/visual_ssvep', 'auto_examples/visual_cueing', 'auto_examples/visual_gonogo'], +sphinx_gallery_conf = {'filename_pattern': '(?=.*r__)(?=.*.py)', + 'examples_dirs': ['../examples/visual_n170', '../examples/visual_p300','../examples/visual_ssvep', '../examples/visual_cueing', '../examples/visual_gonogo'], + 'gallery_dirs': ['auto_examples/visual_n170', 'auto_examples/visual_p300','auto_examples/visual_ssvep', 'auto_examples/visual_cueing', 'auto_examples/visual_gonogo'], 'within_subsection_order': FileNameSortKey, 'default_thumb_file': 'img/eeg-notebooks_logo.png', 'backreferences_dir': 'generated', # Where to drop linking files between examples & API diff --git a/doc/experiments/vprvep.rst b/doc/experiments/vprvep.rst new file mode 100644 index 000000000..fe309f475 --- /dev/null +++ b/doc/experiments/vprvep.rst @@ -0,0 +1,337 @@ +Visual Pattern Reversal VEP +=========================== + +The Pattern Reversal VEP (PR-VEP) is the most widely studied visual +evoked potential paradigm. A checkerboard pattern swaps its black and +white squares at a regular rate (typically 2 reversals per second) while +the participant fixates on a central cross. Each reversal elicits a +stereotyped triphasic waveform whose most prominent feature is the +**P100**, a positive deflection occurring ~100 ms after the reversal at +midline occipital electrodes. The other components are a smaller N75 +before it and an N145 after it. + +This implementation runs both ISCEV check-size variants in a single +session (large + small) and supports stereoscopic monocular presentation +through a Meta Quest HMD via PsychoPy / psychxr / Meta-Link. Trials are +analysed under two reference schemes (Oz−Fz ISCEV-standard and linked +mastoid M1+M2) and a wide range of differential biomarkers are reported, +including inter-ocular latency difference (IOLD), check-size slope +difference, hemispheric asymmetry, inter-ocular Δ-asymmetry contrasts, +lateral extrastriate readouts at P7/P8, and bootstrap confidence +intervals on the P100 latency. + +**PR-VEP Experiment Notebook Examples:** + +.. include:: ../auto_examples/visual_vep/index.rst + + +Running the Experiment +---------------------- + +.. code-block:: python + + from eegnb.devices.eeg import EEG + from eegnb.experiments.visual_vep import VisualPatternReversalVEP + + eeg = EEG(device='cyton') + experiment = VisualPatternReversalVEP( + eeg=eeg, + save_fn='my_vep_recording.csv', + block_duration_seconds=50, + block_trial_size=100, + reps_per_condition=2, # 4 conditions × 2 reps = 8 blocks total + use_vr=True, # False for monitor mode + ) + experiment.run() + +The display refresh rate is auto-detected from the active window +(``displayRefreshRate`` in VR, ``getActualFrameRate()`` on a monitor) and +must be an integer multiple of the 2 reversals/sec rate; 60, 90, 120, and +144 Hz are all supported. + + +Participant Preparation +----------------------- + +The PR-VEP is sensitive to the optical quality of the retinal image. +Participants who normally wear glasses or contact lenses **must** wear +their corrective lenses during the test. Uncorrected refractive error +blurs the checkerboard's high spatial frequency edges, which attenuates +the P100 amplitude and can increase its latency — mimicking a genuine +neural conduction delay. This is especially important when comparing +latencies between eyes or across sessions, and even more so for the +small-check (high spatial-frequency) variant where blur dominates. + +ISCEV guidelines require that visual acuity be documented for each +recording session. If a participant's corrected acuity is worse than +6/9 (20/30), note it alongside the data so that downstream analysis can +account for it. + + +Stimulus Parameters +------------------- + +Two ISCEV check-size variants are presented per session [Odom2016]_: + +- **Large checks**: 1.0° of visual angle (60 arcmin, 0.5 cpd) — the + standard clinical PR-VEP stimulus, dominant low-spatial-frequency + drive of the foveal P100. +- **Small checks**: 0.25° of visual angle (15 arcmin, 2.0 cpd) — the + high-spatial-frequency variant. Demyelinated optic-nerve fibres + preferentially delay this response, so the latency *difference* + between large- and small-check P100 amplifies subtle demyelination + that the large-check IOLD alone misses. + +Other parameters: + +- **Reversal rate**: 2 reversals per second (ISCEV standard, range + 1–3 rev/s) [Odom2016]_ — fast enough for a discrete transient P100, + slow enough to stay well below photosensitive-seizure trigger + frequencies. +- **Field size**: 16° square (``ISCEV_FIELD_DEG = 16.0``), rendered at + the runtime-derived pixels-per-degree of the active display. +- **Contrast**: high-contrast black/white, mean luminance held constant + through the inter-trial interval to avoid adaptation. +- **Fixation**: thin red cross, 0.25° × 0.05° (15 × 3 arcmin), sized in + pixels via ``ppd`` so it stays sub-check at every variant and does not + mask foveal stimulation. + +Block scheduling: 4 conditions (left/right eye × large/small check) × +``reps_per_condition`` repetitions, shuffled at construction time. With +the default ``reps_per_condition=2`` this gives 8 blocks of 50 s, ~100 +reversals per block, ~200 reversals per (eye × size) condition, and +~400 reversals per eye total. + +Block-start markers (100, 101, 102, 103) are pushed at the first +reversal of each block, encoding the full condition (bit 0 = eye, bit 1 += size class). Per-reversal markers are ``1`` for left-eye blocks and +``2`` for right-eye blocks. Both decode to the per-condition labels in +the analysis pipeline. + + +Monitor vs VR +------------- + +The experiment supports both standard monitor presentation and Meta +Quest (VR) presentation via ``use_vr=True``. + +**VR mode is preferred** for several reasons: + +- Each eye sees the checkerboard independently via per-eye HMD buffers, + so monocular blocks need no manual eye closure and have no light + leakage from the unstimulated eye. +- The OpenXR / LibOVR compositor supplies a per-frame predicted photon + time (``app_motion_to_photon_latency_s``), which is logged to the + ``_timing.csv`` sidecar and applied trial-by-trial in the analysis to + cancel most of the output-side display latency (render queue, + compositor buffering, scan-out, HMD persistence). +- A keyboard (spacebar) or Quest controller trigger is required to advance + past block instructions and to exit the experiment. The presented + stimulus is otherwise hands-free during the block — there is no manual + eye covering required. + +A residual unmeasured chain delay (Quest Link transport + panel scan-out ++ LCD response + Cyton RF) of ~25 ± 15 ms remains, and is treated as a +fixed ``link_panel_lag`` constant in the analysis. Differential +biomarkers (IOLD, check-size slope difference, amplitude ratios, +Δ-asymmetry contrasts) are robust to this residual because both eyes +share the same chain — the residual cancels in any L−R contrast. +*Absolute* P100 latency (vs clinical norms) does *not* survive this +residual and is not reported as a clinical number. + +A separate stimulus calibration caveat applies to VR: the Quest panel is +not photometrically calibrated to a specific cd/m² or contrast ratio, so +absolute P100 *amplitude* will diverge from clinical norms by an unknown +constant factor. As with the latency offset, differential / within- +subject biomarkers remain interpretable; absolute amplitudes are not +interchangeable with clinical norms. + +In monitor mode the software flip is the only timing source, so any +fixed display-pipeline latency must be characterised separately if +absolute latency is needed. The most accurate option is a photodiode +taped over a sync patch on the screen with its analogue output routed +into a spare Cyton input (auxiliary channel or a free EEG channel) — +the diode fires when actual photons arrive at the screen, so epoching +off the diode trace aligns trials to true stimulus onset with sub-frame +precision and removes the entire display-chain residual. + + +Electrode Placement +------------------- + +The P100 is generated in occipital cortex. The default cap montage in +``00x__pattern_reversal_run_experiment.py`` is an 8-channel OpenBCI +Cyton layout: + +.. code-block:: python + + ch_names = ["Fz", "Pz", "P7", "P8", "O1", "O2", "Oz", "M2"] + # Reference: M1 (Cyton SRB / hardware reference) + # Ground: A2 + +Channel roles in the analysis pipeline: + +1. **Oz** — the primary ISCEV electrode; highest-amplitude P100. All + single-channel biomarkers (IOLD, check-size slope, amplitude ratio, + bootstrap CI) are computed at Oz. +2. **O1, O2** — lateral occipital. Used for the hemispheric-asymmetry + biomarker, but interpretation is confounded by paradoxical + lateralization (V1 dipoles in the calcarine fold project across the + midline). The inter-ocular Δ-asymmetry contrast separates eye- + dependent skew from stationary anatomical / electrode-stationary + asymmetries. +3. **P7, P8** — lateral parieto-occipital electrodes. Because they largely + bypass the "paradoxical lateralization" seen at O1/O2, they provide a + cleaner, more direct readout of each hemisphere independently. They are + essential for side-localizing cortical asymmetry and drive three biomarkers: + lateral propagation latency (7), P7/P8 hemispheric asymmetry (8), and the + lateral-hemisphere composite (9). +4. **Pz** — parietal midline. Used in the topology QC check to confirm + the expected ``Oz > O1/O2 > P7/P8/Pz`` posterior gradient. +5. **Fz** — frontal midline. Doubles as (a) the reference electrode for + the ISCEV Oz−Fz scheme, and (b) when the linked-mastoid scheme is + active, the Halliday polarity-inversion check: a genuine V1-generated + P100 produces an *inverted* (negative) deflection at Fz at the same + latency, confirming a posterior dipole. +6. **M1 (reference) + M2** — mastoids. M1 is the Cyton SRB hardware + reference (zero by construction). M2 is recorded; together they form + the linked-mastoid (M1+M2)/2 reference scheme used alongside Oz−Fz. + +For the alternative ``mark-iv`` montage (Thinkpulse dry actives at 4× +gain), edit ``ch_names`` in ``00x__pattern_reversal_run_experiment.py`` +to the Thinkpulse channel set. + + +Reference Schemes +----------------- + +The analysis pipeline runs every biomarker under two reference schemes +in sequence and prints both: + +- **Oz − Fz (ISCEV standard)** — directly comparable with clinical + PR-VEP norms. Sensitive to Fz contact quality. +- **Linked mastoid (M1+M2)/2** — more stable when Fz contact is weak, + unaffected by frontal EMG, and the only scheme under which the Fz + Halliday polarity-inversion check is meaningful (Fz is zero by + construction under the Oz−Fz reference). + +Each scheme produces its own set of plots and biomarkers; agreement +between the two is itself a quality indicator. + + +Biomarkers +---------- + +The analysis script ``01r__pattern_reversal_viz.py`` reports several differential biomarkers, in addition to the per-condition P100 latency and amplitude at Oz. These are grouped by their primary clinical utility: + +**Remyelination & Longitudinal Tracking (Demyelination Indicators)** +These metrics are the highest value for tracking repair because they isolate pathway delays and cancel out daily systemic noise or anatomical distortions. + +1. **Inter-ocular latency difference (IOLD)** — signed ``L − R`` P100 latency at Oz. ``> 8 ms`` is the most-cited clinical threshold for unilateral demyelination; robust to the ``link_panel_lag`` residual because both eyes share the same chain. The gold standard for longitudinal tracking of optic nerve repair. +2. **Inter-ocular check-size slope difference** — demyelinated (and repairing) optic nerves preferentially delay the small-check response. The ``L_eye − R_eye`` difference of latency-vs-check-size slopes amplifies asymmetric demyelination and is an excellent leading indicator of repair. +5. **Inter-ocular Δ-asymmetry contrast** — ``(O1−O2)|L_eye − (O1−O2)|R_eye``. By subtracting the spatial asymmetry between eyes, it cancels stationary anatomical distortions (e.g., asymmetrical cortical folding). The remaining Δ strictly represents pathway-driven changes over time. +6. **Bootstrap confidence intervals on P100 / IOLD** — 1000 trial-resamples per eye recompute the trimmed-mean P100 location, giving a 95% CI. The IOLD CI is flagged separately for excluding zero (statistically separable) and excluding ±8 ms (clinically meaningful). Tracking this CI width proves remyelination is statistically real. +7. **Lateral extrastriate P100 (P7 / P8) & Propagation** — per-channel P100 detection plus the Oz → lateral propagation latency (``P7−Oz``, ``P8−Oz``). While IOLD tracks *optic nerve* myelination, this tracks *intracortical* white matter myelination. A severe delay here indicates a cortical white matter issue (e.g., TBI, concussion, or cortical demyelination), not an optic nerve problem. + +**Axonal & Compressive Indicators** +While latency measures myelin, amplitude measures the surviving nerve fibers (axons). + +3. **Inter-ocular amplitude ratios** — ``L / R`` P100 amplitude at Oz. Tracks axonal loss (e.g., after severe optic neuritis) or compressive lesions (e.g., tumor blocking the signal). Less specific than latency but critical for differentiating slow recovery from permanent damage. +- **N75–P100–N145 Peak-to-Peak Amplitude** — measures the total "energy" of the visual response, which is more stable than absolute voltage against baseline drift. + +**Morphological & Cortical Indicators** +Focus on waveform shape and direct hemispheric comparisons. + +4. **Hemispheric asymmetry (O1 vs O2)** — per-eye P100 latency/amplitude at O1 and O2. Due to paradoxical lateralization, a deficit at O1 (left scalp) suggests *right* hemisphere involvement. +8. **Lateral extrastriate asymmetry (P7 vs P8)** — direct hemispheric read without the paradoxical lateralization of V1. Flags stationary asymmetry vs. eye-dependent asymmetry. +9. **Combined lateral hemisphere composites** — ``(O1+P7)/2`` vs ``(O2+P8)/2`` traces. Provides ~√2 better SNR than O1/O2 alone. +- **W-Peak (Bifurcated P100) Analysis** — detects if the P100 splits into two peaks (a "W" shape), which often indicates a central scotoma (macular dysfunction) or severe uncorrected refractive error causing spatial desynchronization. + +**Quality Control** + +10. **Topology QC** (linked-mastoid only) — confirms (a) the ``Oz > Pz > 0`` posterior-gradient at the Oz P100 latency, and (b) the Halliday frontal polarity inversion (Fz negative at the Oz P100 latency) which strongly confirms a true V1 generator. + +All biomarkers are computed twice (once per reference scheme) and printed alongside their expected normal ranges, so a session's clinical interpretability is visible at a glance. + + +Latency Resolution +------------------ + +The precision of a P100 latency estimate depends on three factors: + +1. **Display refresh rate** — determines the worst-case stimulus timing + jitter. At 120 Hz this is ~4.2 ms per frame; at 60 Hz, ~16.7 ms. +2. **EEG sampling rate** — the Cyton samples at 250 Hz, giving 4 ms + between samples. Without interpolation, the peak latency is locked + to the nearest sample and cannot resolve shifts smaller than 4 ms. +3. **Number of trials** — averaging more reversals reduces noise in the + ERP waveform, tightening the bootstrap CI around the peak estimate. + The default 8-block design yields ~400 reversals per eye (split + across the two check sizes). + +To achieve sub-sample precision, ``vep_utils.get_pr_vep_latencies`` +fits a parabola through the peak sample and its two neighbours and +takes the vertex as the true peak — bringing effective resolution to +~0.5 ms at 250 Hz, well below the sample interval. + +For studies that require detecting latency shifts of 1–2 ms (e.g. +within-subject longitudinal comparisons), the combination of 120 Hz +display, parabolic interpolation, and the default 8-block design is +recommended. The trimmed-mean evoked estimator (used throughout the +analysis) further reduces sensitivity to occasional blink-contaminated +trials that survive amplitude rejection. + + +Longitudinal Tracking +--------------------- + +To monitor P100 latency over time — for example during nerve recovery +or longitudinal intervention tracking — record multiple sessions using +the same subject and session numbering scheme. + +The analysis pipeline is split into two scripts so this is fast: + +- ``01r__pattern_reversal_viz.py`` runs the per-session analysis. In + addition to the figures and stdout output, it writes a + ``biomarkers.json`` file into the recording directory containing + every biomarker under both reference schemes plus session metadata + (subject, session, device, site, trial counts, PC-side and + ``link_panel_lag`` timing values). +- ``02r__pattern_reversal_longitudinal.py`` reads every session's + ``biomarkers.json`` for a given subject, builds a flattened + ``pandas.DataFrame``, and plots IOLD, per-eye P100 latency, + check-size slope difference, O1/O2 and P7/P8 Δ-asymmetry, and + amplitude ratio as a function of session. Bootstrap 95% CI bands are + drawn on the IOLD and per-eye P100 panels so a real shift is visually + separable from session noise. + +This split means new longitudinal points are added in seconds (read +JSON, plot) rather than minutes (re-load EEG, filter, epoch, +bootstrap), and individual sessions can be re-analysed without +invalidating the rest of the trend series. + +Before attributing a latency change to an intervention (like remyelination), establish a **baseline**: record at least 3–5 sessions over 1–2 weeks under the same conditions. Latency shifts can be caused by many non-pathological factors, including subject fatigue, alertness, caffeine intake, body temperature changes, or ocular differences (like pupil size or slight changes in focus/fixation). The longitudinal script supports a ``BASELINE_LAST_SESSION_NB`` cutoff that limits the summary statistics to those initial sessions, capturing the natural day-to-day variability for your setup and participant. Subsequent sessions need to fall significantly outside this baseline variability (e.g., beyond the ``mean ± std``) to be considered a true physiological or structural shift. + + +Timing Notes +------------ + +Two sidecar files are written alongside each recording to let you check +timing after the fact: + +- ``{save_fn}_timing.csv`` — per-trial software and compositor + timestamps and their delta, including the per-trial + ``app_motion_to_photon_latency_s`` used by the analysis. +- ``{save_fn}_frame_stats.json`` — per-frame intervals and dropped- + frame count (150%-of-refresh threshold). + + + + +References +---------- + +.. [Odom2016] Odom JV, Bach M, Brigell M, Holder GE, McCulloch DL, Mizota A, + Tormene AP; International Society for Clinical Electrophysiology of Vision. + **ISCEV standard for clinical visual evoked potentials: (2016 update).** + *Documenta Ophthalmologica* 133(1):1-9. doi:10.1007/s10633-016-9553-y diff --git a/doc/getting_started/installation.rst b/doc/getting_started/installation.rst index c4bcda118..e29352af5 100644 --- a/doc/getting_started/installation.rst +++ b/doc/getting_started/installation.rst @@ -44,7 +44,7 @@ Use the following commands to download the repo, create and activate a conda or **Environment file options** - *Python 3.8 - 3.10:* + *Python 3.10:* - `eeg-expy-full`: Install all dependencies @@ -52,7 +52,7 @@ Use the following commands to download the repo, create and activate a conda or - `eeg-expy-streamstim`: Combined streaming and stimulus presentation - *Python 3.8 - 3.13:* + *Python 3.10 - 3.13:* - `eeg-expy-docsbuild`: Documentation @@ -68,7 +68,7 @@ Use the following commands to download the repo, create and activate a conda or # Create conda environment from chosen eeg-expy-*.yml # The Python version will be pinned by the environment file - conda env create -n eeg-expy --file=environments/eeg-expy-full.yml + conda env create -n eeg-expy -f environments/eeg-expy-full.yml # Activate the environment conda activate eeg-expy diff --git a/doc/getting_started/loading_and_saving.md b/doc/getting_started/loading_and_saving.md index a71aa6928..db033312e 100644 --- a/doc/getting_started/loading_and_saving.md +++ b/doc/getting_started/loading_and_saving.md @@ -1,82 +1,82 @@ -# Loading and Saving Data -Knowing where the data is saved is integral to the functionality of EEG Notebooks. EEG Notebooks saves data to a default location in a hidden directory. From this directory, the individual files can be found based on a folder structure outlined below in the **naming convention.** - -## Locating the Default Data Directory - -#### Windows 10 -The default directory is found at the location `C:\Users\*USER_NAME*\.eegnb` an example of which is pictured below. -![fig](../img/windows_default_directory.PNG) - -#### Linux - -#### MacOS - -## Changing the Default Data Directory -The default directory for saving data is automatically set within the library. If you want to save and analyze data to/from a new directory, it must be passed as a parameter to both the `eegnb.generate_save_fn()` and `eegnb.analysis.load_data()` functions. - -**Saving to new directory:** -``` python -from eegnb import generate_save_fn -from eegnb.experiments.visual_n170 import n170 - -# Define session parameters -board = 'cyton' -experiment = 'visual-N170 -subject = 1 -session = 1 - -# Define new directory and generate save filename -new_dir = 'C:/Users/Jadin/Documents/EEG_Notebooks_Data' -save_fn = generate_save_fn(board, experiment, subject, session, new_dir) - -# Continue to run experiment as normal... -``` - -**Loading from new directory:** -``` python -from eegnb.analysis.utils import load_data - -# Define parameters for session you want to load -board = 'cyton' -experiment = 'visual-N170 -subject = 1 -session = 1 - -# Define new directory -new_dir = 'C:/Users/Jadin/Documents/EEG_Notebooks_Data' - -# Load data -raw = load_data( - subject_id = subject, - session_nb = session, - device_name = board, - experiment = experiment, - data_dir = new_dir - ) -``` - -## Naming Convention -From the specified data directory, EEG notebooks then follows a specific set of naming conventions to define subdirectories and save the data. The full path ends up taking the form -``` -DATA_DIR\experiment\site\device\subject#\session#\file_name.csv -``` -Each field is explained below: - -**Experiment:** This part is the name of the experiment being run. Example names of experiments as they appear in the example datasets are shown below. -``` -visual-N170 -visual-P300 -visual-SSVEP -``` - -**Site:** The site refers to the recording location, or generally the machine it was recorded to. If you are saving and analyzing only your own data on your local machine, you do not need to specify your site name as it will default to 'local'. When loading example datasets however, it is necessary to specify from which site you would like to load data. - -**Device:** The name of the device being recorded from. - -**Subject #:** When entering subject ID as a parameter, you only need to specify the integer value. The integer will be formatted to `subjectXXXX` where "XXXX" is a four-digit representation of the integer ID#. - -**Session #:** A session in this case would be the full period of time which you have the device on and are taking multiple recordings. For example: if you put the headset on and take five recordings, all five of these recording would belong to session number 1. Once you take a break from consecutive recordings, then this would constitute a new session. Just like the subject ID, this value is passed as an integer and gets converted to a read-able format. - -**File name:** The file name is automatically generated in the format `recording_date_time.csv` - +# Loading and Saving Data +Knowing where the data is saved is integral to the functionality of EEG Notebooks. EEG Notebooks saves data to a default location in a hidden directory. From this directory, the individual files can be found based on a folder structure outlined below in the **naming convention.** + +## Locating the Default Data Directory + +#### Windows 10 +The default directory is found at the location `C:\Users\*USER_NAME*\.eegnb` an example of which is pictured below. +![fig](../img/windows_default_directory.PNG) + +#### Linux + +#### MacOS + +## Changing the Default Data Directory +The default directory for saving data is automatically set within the library. If you want to save and analyze data to/from a new directory, it must be passed as a parameter to both the `eegnb.generate_save_fn()` and `eegnb.analysis.load_data()` functions. + +**Saving to new directory:** +``` python +from eegnb import generate_save_fn +from eegnb.experiments.visual_n170 import n170 + +# Define session parameters +board = 'cyton' +experiment = 'visual-N170 +subject = 1 +session = 1 + +# Define new directory and generate save filename +new_dir = 'C:/Users/Jadin/Documents/EEG_Notebooks_Data' +save_fn = generate_save_fn(board, experiment, subject, session, new_dir) + +# Continue to run experiment as normal... +``` + +**Loading from new directory:** +``` python +from eegnb.analysis.utils import load_data + +# Define parameters for session you want to load +board = 'cyton' +experiment = 'visual-N170 +subject = 1 +session = 1 + +# Define new directory +new_dir = 'C:/Users/Jadin/Documents/EEG_Notebooks_Data' + +# Load data +raw = load_data( + subject_id = subject, + session_nb = session, + device_name = board, + experiment = experiment, + data_dir = new_dir + ) +``` + +## Naming Convention +From the specified data directory, EEG notebooks then follows a specific set of naming conventions to define subdirectories and save the data. The full path ends up taking the form +``` +DATA_DIR\experiment\site\device\subject#\session#\file_name.csv +``` +Each field is explained below: + +**Experiment:** This part is the name of the experiment being run. Example names of experiments as they appear in the example datasets are shown below. +``` +visual-N170 +visual-P300 +visual-SSVEP +``` + +**Site:** The site refers to the recording location, or generally the machine it was recorded to. If you are saving and analyzing only your own data on your local machine, you do not need to specify your site name as it will default to 'local'. When loading example datasets however, it is necessary to specify from which site you would like to load data. + +**Device:** The name of the device being recorded from. + +**Subject #:** When entering subject ID as a parameter, you only need to specify the integer value. The integer will be formatted to `subjectXXXX` where "XXXX" is a four-digit representation of the integer ID#. + +**Session #:** A session in this case would be the full period of time which you have the device on and are taking multiple recordings. For example: if you put the headset on and take five recordings, all five of these recording would belong to session number 1. Once you take a break from consecutive recordings, then this would constitute a new session. Just like the subject ID, this value is passed as an integer and gets converted to a read-able format. + +**File name:** The file name is automatically generated in the format `recording_date_time.csv` + ### Examples \ No newline at end of file diff --git a/doc/getting_started/streaming.md b/doc/getting_started/streaming.md index f28ebde87..53792b74e 100644 --- a/doc/getting_started/streaming.md +++ b/doc/getting_started/streaming.md @@ -63,6 +63,9 @@ be run to begin the notebooks interfacing with the bluemuse backend. **Needed Parameters:** **Optional Parameters:** +**Cyton USB-Serial Latency (Windows):** +A note on Cyton USB-serial latency: the Windows FTDI driver default `LatencyTimer` of 16 ms causes batched marker delivery and corrupts millisecond-grade marker timing. The experiment scripts assert `LatencyTimer = 1 ms` at startup; recordings predating this assertion contain a ~15 ms hardware buffering lag on the marker channel that must be subtracted before comparing absolute latencies across sessions. + ### OpenBCI Cyton + Daisy ![fig](../img/cyton_daisy.png) **Device Name:** *'cyton_daisy'* or *'cyton_daisy_wifi'* with WiFi Shield diff --git a/doc/index.rst b/doc/index.rst index 278093db3..ccdd6f85c 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -19,6 +19,7 @@ experiments/vn170 experiments/vp300 experiments/vssvep + experiments/vprvep experiments/cueing experiments/gonogo experiments/all_examples diff --git a/eegnb/__init__.py b/eegnb/__init__.py index 02ebd828f..a59bd0949 100644 --- a/eegnb/__init__.py +++ b/eegnb/__init__.py @@ -32,7 +32,7 @@ def _get_recording_dir( """A subroutine of get_recording_dir that accepts subject and session as strings""" # folder structure is /DATA_DIR/experiment/board_name/site/subject/session/*.csv recording_dir = ( - Path(data_dir) / experiment / site / board_name / subject_str / session_str + Path(data_dir or DATA_DIR) / experiment / site / board_name / subject_str / session_str ) # check if directory exists, if not, make the directory diff --git a/eegnb/analysis/__init__.py b/eegnb/analysis/__init__.py index e69de29bb..2fde929d0 100644 --- a/eegnb/analysis/__init__.py +++ b/eegnb/analysis/__init__.py @@ -0,0 +1 @@ +from eegnb.analysis import vep_utils # noqa: F401 diff --git a/eegnb/analysis/recording_quality.py b/eegnb/analysis/recording_quality.py new file mode 100644 index 000000000..7917ce19f --- /dev/null +++ b/eegnb/analysis/recording_quality.py @@ -0,0 +1,256 @@ +"""Per-recording contact quality diagnostic for an eegnb session directory. + +Returns structured DataFrames for notebook display alongside a formatted text +report for console / experiment use. + +Notebook usage: + result = check_session(session_dir) + display(result['per_channel']) # styled DataFrame, columns aligned + display(result['summary']) + result['flagged_channels'] + result['shared_ref_suspect'] + +Console / experiment usage: + print(check_session(session_dir)['report']) +""" +import pathlib + +import numpy as np +import pandas as pd +from scipy.signal import butter, sosfiltfilt + + +EEG_CHANNEL_CANDIDATES = [ + 'Fz', 'Pz', 'P3', 'P4', 'P7', 'P8', 'O1', 'O2', 'Oz', 'M1', 'M2', + 'Cz', 'C3', 'C4', 'F3', 'F4', 'T7', 'T8', 'F7', 'F8', + 'AF7', 'AF8', 'TP9', 'TP10', 'PO3', 'PO4', 'POz', 'PO7', 'PO8', +] + +# AC-noise metrics (std, p99, max/min, pct_big) are computed on a high-pass- +# filtered copy of the signal so they reflect actual EEG noise rather than +# electrode half-cell DC offsets (typically 50-400 mV) and slow wander. +# The drift metric is computed on the unfiltered signal because slow wander +# is exactly what it's meant to detect. +HP_CUTOFF_HZ = 1.0 # high-pass before noise metrics +BIG_SAMPLE_UV = 200.0 # threshold for "%>200" (post high-pass) +DRIFT_WIN_SECS = 10 +NOISE_FACTOR_FLAG = 1.5 + + +def _detect_sfreq(timestamps: np.ndarray) -> float: + diffs = np.diff(timestamps) + diffs = diffs[(diffs > 0) & (diffs < 1.0)] + return float(1.0 / np.median(diffs)) if len(diffs) else 250.0 + + +def _highpass(x: np.ndarray, sfreq: float, cutoff_hz: float = HP_CUTOFF_HZ) -> np.ndarray: + """Zero-phase 4th-order Butterworth high-pass. Returns unchanged if signal + is too short for the filter.""" + nyq = sfreq / 2.0 + if cutoff_hz <= 0 or cutoff_hz >= nyq or len(x) < 32: + return x - np.mean(x) + sos = butter(4, cutoff_hz / nyq, btype='highpass', output='sos') + return sosfiltfilt(sos, x) + + +def _channel_metrics(x: np.ndarray, sfreq: float) -> dict: + # Drift on the unfiltered signal — that's what it's measuring. + x_dc_removed = x - np.mean(x) + win = max(1, int(DRIFT_WIN_SECS * sfreq)) + drift = float(pd.Series(x_dc_removed).rolling(win, center=False).mean().dropna().pipe( + lambda s: s.max() - s.min() + )) if len(x_dc_removed) >= win else 0.0 + + # Noise metrics on the high-pass-filtered signal — these reflect AC EEG + # quality, not DC drift / wander (which is captured separately above). + x_ac = _highpass(x_dc_removed, sfreq) + return { + 'std': float(np.std(x_ac)), + 'p99': float(np.percentile(np.abs(x_ac), 99)), + 'max': float(x_ac.max()), + 'min': float(x_ac.min()), + 'drift': drift, + 'pct_big': 100.0 * float(np.mean(np.abs(x_ac) > BIG_SAMPLE_UV)), + } + + +def _build_per_channel_df(rows: list) -> pd.DataFrame: + """Build a tidy per-channel DataFrame with relative metrics and flags.""" + df = pd.DataFrame(rows) + if df.empty: + return df + + med_by_rec = df.groupby('rec')[['std', 'drift']].transform('median') + df['std_x'] = df['std'] / med_by_rec['std'].where(med_by_rec['std'] > 0, 1.0) + df['drift_x'] = df['drift'] / med_by_rec['drift'].where(med_by_rec['drift'] > 0, 1.0) + df['flagged'] = (df['std_x'] > NOISE_FACTOR_FLAG) | (df['drift_x'] > NOISE_FACTOR_FLAG) + return df[['rec', 'ch', 'std', 'std_x', 'p99', 'max', 'min', + 'drift', 'drift_x', 'pct_big', 'flagged']] + + +def _build_summary_df(per_channel: pd.DataFrame) -> pd.DataFrame: + """Per-recording summary with exclusion factor relative to cleanest recording.""" + if per_channel.empty: + return pd.DataFrame() + summary = per_channel.groupby('rec').agg( + med_std=('std', 'median'), + med_p99=('p99', 'median'), + med_drift=('drift', 'median'), + n_flagged=('flagged', 'sum'), + n_channels=('ch', 'count'), + ) + baseline = float(summary.med_std.min()) + summary['factor_x'] = summary.med_std / baseline if baseline > 0 else 1.0 + summary['exclude'] = summary.factor_x > NOISE_FACTOR_FLAG + return summary + + +def _format_report(session_dir: pathlib.Path, + recs: list, + per_channel: pd.DataFrame, + summary: pd.DataFrame) -> str: + """Format the structured DataFrames into the text report string.""" + lines: list[str] = [] + w = lines.append + + if not recs: + w(f'No recording_*.csv files found in {session_dir}') + return '\n'.join(lines) + + w(f'Session: {session_dir.name}') + w(f'Found {len(recs)} recording(s)') + w('') + w('Column legend (all values in µV, raw signal — compare channels relative to each other):') + w(f' std Standard deviation — overall noise level per channel.') + w(f' p99|x| 99th percentile of |signal| — sustained noise floor, robust to single spikes.') + w(f' max/min Largest/smallest raw sample — flags electrode pops or movement artifacts.') + w(f' drift Range of 10 s rolling mean — slow DC wander from drying gel or loose contact.') + w(f' %>200 % of samples exceeding {BIG_SAMPLE_UV:.0f} µV — mainly useful comparing recordings, not channels.') + w('') + w('Interpreting the table: look for channels with std or drift significantly higher than') + w('the others. A single outlier = bad electrode contact. All channels inflated = loose reference.') + w('') + + header = (f' {"ch":>4} {"std":>8} {"std×":>5} {"p99|x|":>8} ' + f'{"max":>8} {"min":>8} {"drift":>8} {"drift×":>6} ' + f'{"%>{:.0f}".format(BIG_SAMPLE_UV):>6} {"flag":>4}') + + for rec_id in summary.index: + rec_rows = per_channel[per_channel['rec'] == rec_id] + n_samples = len(rec_rows) and int(rec_rows.iloc[0].get('n_samples', 0)) # placeholder + rec_meta = next((r for r in recs if r['rec'] == rec_id), None) + if rec_meta: + w(f'=== {rec_id} ({rec_meta["minutes"]:.1f} min, ' + f'{rec_meta["n_samples"]} samples, ~{rec_meta["sfreq"]:.0f} Hz) ===') + else: + w(f'=== {rec_id} ===') + w(header) + for _, m in rec_rows.iterrows(): + flag_str = '⚑' if m['flagged'] else ' ' + w(f' {m["ch"]:>4} {m["std"]:>8.1f} {m["std_x"]:>5.2f} {m["p99"]:>8.1f} ' + f'{m["max"]:>8.1f} {m["min"]:>8.1f} ' + f'{m["drift"]:>8.1f} {m["drift_x"]:>6.2f} {m["pct_big"]:>6.2f} {flag_str:>4}') + + s = summary.loc[rec_id] + w(f' (median std={s.med_std:.1f} µV median drift={s.med_drift:.1f} µV ' + f'flag threshold: ×>{NOISE_FACTOR_FLAG})') + if s.n_flagged >= s.n_channels / 2: + w(f' ⚑ SHARED REFERENCE SUSPECT — {int(s.n_flagged)}/{int(s.n_channels)} channels flagged ' + f'(all-channel inflation → M1/SRB loose)') + elif s.n_flagged: + w(f' ⚑ {int(s.n_flagged)} channel(s) flagged — isolated contact issue(s)') + w('') + + w('=== PER-RECORDING SUMMARY (median across EEG channels) ===') + w(f' {"rec":<22} {"med_std":>8} {"med_p99":>8} {"med_drift":>10}') + for rec_id in summary.index: + s = summary.loc[rec_id] + w(f' {rec_id:<22} {s.med_std:>8.1f} {s.med_p99:>8.1f} {s.med_drift:>10.1f}') + + w('') + w('=== EXCLUSION CANDIDATES ===') + for rec_id in summary.index: + s = summary.loc[rec_id] + w(f' {rec_id}: median_std={s.med_std:.1f} ' + f'({s.factor_x:.2f}x) [{"EXCLUDE?" if s.exclude else "ok"}]') + + w('') + if summary.exclude.any(): + w('⚑ One or more recordings flagged as substantially noisier.') + + return '\n'.join(lines) + + +def check_session(session_dir: pathlib.Path) -> dict: + """Return structured quality metrics for a session directory. + + Reads every ``recording_*.csv`` (excluding ``*_timing.csv``) and computes + per-channel std / p99 / drift metrics. + + Returns a dict with keys: + per_channel : DataFrame — one row per (recording, channel) with + std, std×, p99, max/min, drift, drift×, + pct_big, flagged + summary : DataFrame — one row per recording with med_std, + med_p99, med_drift, n_flagged, + n_channels, factor_x, exclude + report : str — formatted text report for console use + flagged_channels : list — channel names flagged in any recording + (std or drift > 1.5× group median) + shared_ref_suspect : bool — True when ≥ half of all unique channels + are flagged across the session, + indicating M1/SRB loose rather than + isolated electrode contacts + """ + session_dir = pathlib.Path(session_dir).expanduser().resolve() + rec_paths = sorted( + p for p in session_dir.glob('recording_*.csv') + if not p.stem.endswith('_timing') and not p.name.endswith('.excluded') + ) + + if not rec_paths: + empty = pd.DataFrame() + return { + 'per_channel': empty, + 'summary': empty, + 'report': f'No recording_*.csv files found in {session_dir}', + 'flagged_channels': [], + 'shared_ref_suspect': False, + } + + rows = [] + recs_meta = [] + + for p in rec_paths: + df = pd.read_csv(p) + eeg_chs = [c for c in df.columns if c in EEG_CHANNEL_CANDIDATES] + sfreq = _detect_sfreq(df['timestamps'].values) if 'timestamps' in df else 250.0 + rec_id = p.stem.split('recording_')[-1] + recs_meta.append({ + 'rec': rec_id, 'minutes': len(df) / sfreq / 60.0, + 'n_samples': len(df), 'sfreq': sfreq, + }) + for ch in eeg_chs: + rows.append({'rec': rec_id, 'ch': ch, + **_channel_metrics(df[ch].values.astype(float), sfreq)}) + + per_channel = _build_per_channel_df(rows) + summary = _build_summary_df(per_channel) + report = _format_report(session_dir, recs_meta, per_channel, summary) + + flagged_channels = sorted(per_channel.loc[per_channel.flagged, 'ch'].unique().tolist()) + all_ch_names = list(dict.fromkeys(per_channel['ch'])) + shared_ref_suspect = len(flagged_channels) >= len(all_ch_names) / 2 + + return { + 'per_channel': per_channel, + 'summary': summary, + 'report': report, + 'flagged_channels': flagged_channels, + 'shared_ref_suspect': shared_ref_suspect, + } + + +def report_session(session_dir: pathlib.Path) -> str: + """Return a formatted quality report string for a session directory.""" + return check_session(session_dir)['report'] diff --git a/eegnb/analysis/utils.py b/eegnb/analysis/utils.py index d9450981d..e7233ea76 100644 --- a/eegnb/analysis/utils.py +++ b/eegnb/analysis/utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy from copy import deepcopy import math @@ -5,10 +7,12 @@ import sys from collections import OrderedDict from glob import glob -from typing import Union, List -from time import sleep, time -import os +from pathlib import Path +from typing import TYPE_CHECKING, Union, List +if TYPE_CHECKING: + from eegnb.devices.eeg import EEG +from time import sleep, time import pandas as pd import numpy as np import matplotlib.pyplot as plt @@ -23,9 +27,11 @@ from scipy.signal import lfilter, lfilter_zi from eegnb import _get_recording_dir -from eegnb.devices.eeg import EEG from eegnb.devices.utils import EEG_INDICES, SAMPLE_FREQS -from pynput import keyboard +try: + from pynput import keyboard +except ImportError: + keyboard = None # this should probably not be done here sns.set_context("talk") @@ -181,9 +187,12 @@ def load_data( session_str = "*" if session == "all" else f"session{session_int:03}" recdir = _get_recording_dir(device_name, experiment, subject_str, session_str, site, data_dir) - data_path = os.path.join(data_dir, recdir, "*.csv") + data_path = recdir / "*.csv" - fnames = glob(str(data_path)) + # Primary recordings are named recording_.csv (one underscore). + # Sidecar files follow BIDS convention with an additional suffix, e.g. recording__timing.csv. + # Filter to only primary recordings. + fnames = [f for f in glob(str(data_path)) if Path(f).stem.count('_') == 1] if len(fnames) == 0: raise Exception("No filenames found in folder: %s" %data_path) diff --git a/eegnb/analysis/vep_utils.py b/eegnb/analysis/vep_utils.py new file mode 100644 index 000000000..264f6dc80 --- /dev/null +++ b/eegnb/analysis/vep_utils.py @@ -0,0 +1,532 @@ +import numpy as np +from mne import Evoked, EvokedArray +from scipy.stats import trim_mean + +ISCEV_CHECK_DEG_LARGE = 1.0 +ISCEV_CHECK_DEG_SMALL = 0.25 + +# Default thresholds used by clinical PR-VEP biomarkers. Override per-call +# only if you have a paradigm-specific reason to. +IOLD_FLAG_MS = 8.0 # |L − R P100 latency| above this flags suspected unilateral demyelination +LOG2_AMP_FLAG = 1.0 # |log2(L/R P100 amplitude)| above this flags inter-ocular drive imbalance + +# Minimum amplitude-to-pre-stim-noise ratio for a peak to be reported. Below +# this, the "peak" returned by argmax in the search window is dominated by +# noise rather than a real waveform feature, and its latency is unreliable. +# Particularly relevant for small-check / demyelinated-pathway conditions +# where P100 amplitude can drop below the per-channel noise floor. +PEAK_MIN_SNR = 2.0 + + +def print_latency(peak_name, peak_latency, peak_channel, uv): + peak_latency = round(peak_latency * 1e3, 2) # convert to milliseconds + uv = round(uv * 1e6, 2) # convert to µV + print('{} Peak of {} µV at {} ms in peak_channel {}'.format(peak_name, uv, peak_latency, peak_channel)) + + +def get_peak(erp_name, evoked_potential, peak_time_min, peak_time_max, mode, + min_snr=PEAK_MIN_SNR): + """Find peak latency with sub-sample precision using parabolic interpolation. + + MNE's get_peak returns the sample with the largest value, limiting + resolution to the sample interval (4 ms at 250 Hz). A parabolic fit + through the peak sample and its two neighbours recovers the true peak + location between samples, giving ~0.5 ms precision at 250 Hz. + + This implementation avoids MNE's strict positive/negative threshold + requirements to support waveforms with large baseline shifts. + + Pre-stim baseline std is computed per channel and the peak amplitude is + only reported when |amp| / noise_std >= ``min_snr``. Below that, the + "peak" returned by argmax in the search window is dominated by noise + rather than a real waveform feature; returning ``None`` lets callers + skip downstream analysis instead of chasing a spurious latency. + Set ``min_snr=None`` to disable the gate (returns the peak unconditionally). + """ + # Step 1: find the sample-level peak + time_mask = (evoked_potential.times >= peak_time_min) & (evoked_potential.times <= peak_time_max) + if not np.any(time_mask): + print(f'{erp_name}: no samples in window {peak_time_min}-{peak_time_max}') + return None + + data_win = evoked_potential.data[:, time_mask] + if mode == 'pos': + ch_idx, t_idx_win = np.unravel_index(np.argmax(data_win), data_win.shape) + elif mode == 'neg': + ch_idx, t_idx_win = np.unravel_index(np.argmin(data_win), data_win.shape) + else: + ch_idx, t_idx_win = np.unravel_index(np.argmax(np.abs(data_win)), data_win.shape) + + peak_channel = evoked_potential.ch_names[ch_idx] + peak_sample = np.where(time_mask)[0][t_idx_win] + sample_latency = evoked_potential.times[peak_sample] + + # Step 2: parabolic interpolation around the peak sample + times = evoked_potential.times + data = evoked_potential.data[ch_idx] + + # Need at least one sample on each side for the fit + if 0 < peak_sample < len(times) - 1: + y_prev = data[peak_sample - 1] + y_peak = data[peak_sample] + y_next = data[peak_sample + 1] + + # Parabolic interpolation: offset from centre sample + denom = y_prev - 2 * y_peak + y_next + if abs(denom) > 1e-30: + offset = 0.5 * (y_prev - y_next) / denom + dt = times[peak_sample] - times[peak_sample - 1] + interp_latency = times[peak_sample] + offset * dt + interp_uv = y_peak - 0.25 * (y_prev - y_next) * offset + else: + interp_latency = sample_latency + interp_uv = y_peak + else: + interp_latency = sample_latency + interp_uv = data[peak_sample] + + # Step 3: SNR check — pre-stim baseline std as the noise reference + pre_mask = evoked_potential.times < 0 + if pre_mask.any(): + noise_uv = float(evoked_potential.data[ch_idx, pre_mask].std()) + else: + noise_uv = 0.0 + snr = abs(interp_uv) / max(noise_uv, 1e-12) + + if min_snr is not None and snr < min_snr: + print(f'{erp_name}: SNR={snr:.2f} < {min_snr} ' + f'(amp={interp_uv*1e6:.2f}µV, noise={noise_uv*1e6:.2f}µV) — peak unreliable') + return None + + return { + 'name': erp_name, + 'latency': interp_latency, + 'channel': peak_channel, + 'amplitude': interp_uv, + 'noise_uv': noise_uv, + 'snr': snr, + } + + +def get_pr_vep_latencies(evoked_occipital: Evoked, min_snr=PEAK_MIN_SNR): + """Detect canonical PR-VEP peaks with SNR gating. + + P100 is measured **peak-to-trough** against the preceding N75. Clinical + PR-VEP convention: P100 amplitude = P100_peak − N75_trough. This cancels + any DC pedestal (slow drift, reference offset, residual baseline bias) + that would otherwise suppress the apparent P100 absolute amplitude. + The peak-to-trough value is used for SNR gating, so a P100 sitting + on top of a deep N75 is correctly recognised as a strong response + even when its absolute µV value looks small. + + Each peak is gated by ``min_snr`` (default ``PEAK_MIN_SNR`` = 2.0). A peak + whose amplitude (peak-to-trough where applicable) doesn't exceed the + per-channel pre-stim noise floor by that factor is returned as ``None`` + rather than reported as a spurious latency. Particularly important for + small-check / demyelinated conditions where P100 absolute amplitude can + drop below the noise floor. + + Pass ``min_snr=None`` to disable gating and report whichever sample is + largest in each search window (legacy behaviour). + """ + # N75 and N145 are gated by absolute amplitude (no obvious anchor for + # peak-to-trough; their clinical role is mostly as landmarks anyway). + n75 = get_peak(erp_name='N75', evoked_potential=evoked_occipital, + peak_time_min=0.060, peak_time_max=0.090, mode='neg', + min_snr=min_snr) + # P100 is detected raw first (no SNR gate) so we can compute the proper + # peak-to-trough metric before deciding whether to keep it. + p100 = get_peak(erp_name='P100', evoked_potential=evoked_occipital, + peak_time_min=0.080, peak_time_max=0.130, mode='pos', + min_snr=None) + if p100 is not None: + if n75 is not None: + peak_to_trough = p100['amplitude'] - n75['amplitude'] + else: + # Fall back to absolute amplitude if N75 wasn't detectable. + peak_to_trough = p100['amplitude'] + ptt_snr = abs(peak_to_trough) / max(p100['noise_uv'], 1e-12) + p100['peak_to_trough'] = peak_to_trough + p100['peak_to_trough_snr'] = ptt_snr + if min_snr is not None and ptt_snr < min_snr: + print(f'P100: peak-to-trough SNR={ptt_snr:.2f} < {min_snr} ' + f'(ptt={peak_to_trough*1e6:.2f}µV, noise={p100["noise_uv"]*1e6:.2f}µV) ' + '— peak unreliable') + p100 = None + n145 = get_peak(erp_name='N145', evoked_potential=evoked_occipital, + peak_time_min=0.120, peak_time_max=0.170, mode='neg', + min_snr=min_snr) + + return n75, p100, n145 + + +# --------------------------------------------------------------------------- +# Robust averaging + JSON helper +# --------------------------------------------------------------------------- + +def trimmed_average(epochs, proportiontocut=0.1): + """Per-sample trimmed-mean evoked across trials. + + Standard ``epochs.average()`` is biased by occasional single-trial outliers + that survive amplitude-based rejection. Dropping the top and bottom + ``proportiontocut`` fraction at each timepoint produces a more + representative waveform without raising the rejection threshold or + discarding whole epochs. + """ + data = epochs.get_data() # (n_trials, n_channels, n_times) + if data.shape[0] == 0: + return epochs.average() + avg = trim_mean(data, proportiontocut=proportiontocut, axis=0) + nave = int(round(data.shape[0] * (1 - 2 * proportiontocut))) + return EvokedArray(avg, epochs.info, tmin=epochs.tmin, nave=nave, + comment=f'trimmed-mean ({int(proportiontocut * 100)}%)') + + +def json_safe_float(x): + """Coerce numpy scalars / None / NaN / Inf to JSON-safe Python floats.""" + if x is None: + return None + try: + v = float(x) + except (TypeError, ValueError): + return None + if np.isnan(v) or np.isinf(v): + return None + return v + + +# --------------------------------------------------------------------------- +# PR-VEP biomarkers (pure computation; printing/plotting stays in callers) +# --------------------------------------------------------------------------- + +def compute_iold(p100_left, p100_right, flag_threshold_ms=IOLD_FLAG_MS): + """Inter-ocular latency difference at P100 (signed L − R, ms). + + Returns ``None`` if either eye is missing a P100 detection. Otherwise a + dict with per-eye latency/amplitude, the signed difference, and a + threshold-crossing flag suitable for clinical screening. + """ + if p100_left is None or p100_right is None: + return None + p100_left_ms = p100_left['latency'] * 1000.0 + p100_right_ms = p100_right['latency'] * 1000.0 + iold_ms = p100_left_ms - p100_right_ms + # Prefer peak-to-trough amplitude when available (cancels DC pedestal / + # baseline drift); fall back to absolute peak value if not. + amp_l = p100_left.get('peak_to_trough', p100_left['amplitude']) + amp_r = p100_right.get('peak_to_trough', p100_right['amplitude']) + return { + 'p100_left_ms': json_safe_float(p100_left_ms), + 'p100_right_ms': json_safe_float(p100_right_ms), + 'p100_left_uv': json_safe_float(amp_l * 1e6), + 'p100_right_uv': json_safe_float(amp_r * 1e6), + 'p100_left_snr': json_safe_float(p100_left.get('peak_to_trough_snr', + p100_left.get('snr'))), + 'p100_right_snr': json_safe_float(p100_right.get('peak_to_trough_snr', + p100_right.get('snr'))), + 'iold_ms': json_safe_float(iold_ms), + 'flag': bool(abs(iold_ms) > flag_threshold_ms), + } + + +def compute_iold_per_size(epochs, event_id, ch_name, + sizes=('large', 'small'), + eye_prefixes=('left_eye', 'right_eye'), + flag_threshold_ms=IOLD_FLAG_MS): + """Per-check-size IOLD. + + Demyelination preferentially delays the high-spatial-frequency (small- + check) response, so the per-size IOLD often surfaces lateralised + dysfunction that the size-pooled IOLD averages out. + + For each size in ``sizes``, trims-mean per-eye epochs at ``ch_name``, + locates P100, then runs :func:`compute_iold` on the pair. Returns + ``{size: iold_dict_or_None}``. + """ + out = {} + for size in sizes: + peaks = {} + for eye in eye_prefixes: + cond_key = f'{eye}/{size}' + if cond_key not in event_id or len(epochs[cond_key]) == 0: + peaks[eye] = None + continue + ev = trimmed_average(epochs[cond_key]).copy().pick([ch_name]) + _, p100, _ = get_pr_vep_latencies(ev) + peaks[eye] = p100 + iold = compute_iold(peaks.get('left_eye'), peaks.get('right_eye'), + flag_threshold_ms=flag_threshold_ms) + recovery = compute_p100_recovery(peaks.get('left_eye'), + peaks.get('right_eye')) + out[size] = {**(iold or {}), 'recovery': recovery} if iold else \ + {'recovery': recovery} + return out + + +def compute_p100_recovery(p100_left, p100_right, min_snr=PEAK_MIN_SNR): + """Detectable / not-detectable status for longitudinal P100 recovery tracking. + + Distinct from latency: tracks whether the P100 was *measurable at all* + against the noise floor. In demyelinating disease the parvocellular + (small-check) P100 may sit below noise at baseline and transition to + detectable as remyelination progresses. The state transition itself is + a clinical biomarker — independent of any latency value. + + Returns per-eye ``{detectable, snr, amp_uv, latency_ms}`` plus a + session-level summary that can be plotted longitudinally without + losing the "not-detectable" sessions to NaN-holes: + - ``eyes_detectable``: 0/1/2 — count of eyes above the SNR gate. + - ``mean_snr``: average SNR across detectable eyes (0 if none). + - ``recovery_score``: composite score in [0, 1+] suitable for trend + plots. Floor 0 = "neither eye detectable"; 1 = "both detectable at + threshold SNR"; >1 = "both detectable, with margin above threshold". + """ + def _eye_summary(p): + if p is None: + return {'detectable': False, + 'snr': None, + 'amp_uv': None, + 'latency_ms': None} + snr = p.get('peak_to_trough_snr', p.get('snr')) + amp = p.get('peak_to_trough', p['amplitude']) + return { + 'detectable': bool(snr is not None and snr >= min_snr), + 'snr': json_safe_float(snr), + 'amp_uv': json_safe_float(amp * 1e6), + 'latency_ms': json_safe_float(p['latency'] * 1000.0), + } + + left = _eye_summary(p100_left) + right = _eye_summary(p100_right) + detected_snrs = [s for s, d in [(left['snr'], left['detectable']), + (right['snr'], right['detectable'])] + if d and s is not None] + mean_snr = float(np.mean(detected_snrs)) if detected_snrs else 0.0 + # Composite score: average SNR / threshold across both eyes. + # 0 if neither detectable, 1 = both at threshold, >1 = above threshold. + eye_scores = [ + (left['snr'] / min_snr) if (left['detectable'] and left['snr'] is not None) else 0.0, + (right['snr'] / min_snr) if (right['detectable'] and right['snr'] is not None) else 0.0, + ] + recovery_score = float(np.mean(eye_scores)) + return { + 'left': left, + 'right': right, + 'eyes_detectable': int(left['detectable']) + int(right['detectable']), + 'mean_snr': json_safe_float(mean_snr), + 'recovery_score': json_safe_float(recovery_score), + 'min_snr_threshold': float(min_snr), + } + + +def compute_amplitude_ratio(p100_left, p100_right, log2_flag=LOG2_AMP_FLAG): + """P100 amplitude ratio L/R (rectified) and log2 ratio. + + Returns ``None`` if either eye lacks a P100 or right-eye amplitude is zero + (ratio undefined). + """ + if p100_left is None or p100_right is None: + return None + # Peak-to-trough where available (stable against DC pedestal); abs() so + # the ratio is sign-invariant in case one trace flipped polarity. + amp_l_uv = abs(p100_left.get('peak_to_trough', p100_left['amplitude'])) * 1e6 + amp_r_uv = abs(p100_right.get('peak_to_trough', p100_right['amplitude'])) * 1e6 + if amp_r_uv <= 0: + return None + ratio = amp_l_uv / amp_r_uv + log2_ratio = float(np.log2(ratio)) + return { + 'amp_left_uv': json_safe_float(amp_l_uv), + 'amp_right_uv': json_safe_float(amp_r_uv), + 'ratio': json_safe_float(ratio), + 'log2_ratio': json_safe_float(log2_ratio), + 'flag': bool(abs(log2_ratio) > log2_flag), + } + + +def compute_check_size_slope(epochs, event_id, ch_name, check_size_arcmin, + eye_prefixes=('left_eye', 'right_eye'), + min_trials=5): + """Per-eye P100 latency slope vs. check size (ms / arcmin). + + For each (eye × size) condition present in ``event_id`` (keys of the form + ``"{eye_prefix}/{size_label}"``), trims-mean across trials, locates the + P100 in the canonical search window, and fits a line of P100 latency vs. + check size in arcmin. The L − R slope difference amplifies asymmetric + spatial-frequency-dependent demyelination. + + Returns a dict with per-condition P100 latencies, per-eye slopes (or None + if fewer than two sizes were detected), and the L − R slope difference (or + None if either eye is missing a slope). + """ + out = { + 'per_condition_p100_ms': {}, + 'slope_left_ms_per_arcmin': None, + 'slope_right_ms_per_arcmin': None, + 'slope_diff': None, + } + rows = [] + for eye in eye_prefixes: + for size_label, arcmin in check_size_arcmin.items(): + cond_key = f'{eye}/{size_label}' + if cond_key not in event_id: + continue + ep = epochs[cond_key] + if len(ep) < min_trials: + continue + ev = trimmed_average(ep) + _, p100_c, _ = get_pr_vep_latencies(ev.copy().pick([ch_name])) + if p100_c is None: + continue + lat_ms = p100_c['latency'] * 1000.0 + rows.append({'eye': eye, 'size_label': size_label, + 'size_arcmin': arcmin, 'p100_ms': lat_ms}) + out['per_condition_p100_ms'][cond_key] = json_safe_float(lat_ms) + + slopes = {} + for eye in eye_prefixes: + eye_rows = [r for r in rows if r['eye'] == eye] + if len(eye_rows) >= 2: + xs = np.array([r['size_arcmin'] for r in eye_rows]) + ys = np.array([r['p100_ms'] for r in eye_rows]) + slopes[eye] = float(np.polyfit(xs, ys, 1)[0]) + if 'left_eye' in slopes: + out['slope_left_ms_per_arcmin'] = json_safe_float(slopes['left_eye']) + if 'right_eye' in slopes: + out['slope_right_ms_per_arcmin'] = json_safe_float(slopes['right_eye']) + if 'left_eye' in slopes and 'right_eye' in slopes: + out['slope_diff'] = json_safe_float(slopes['left_eye'] - slopes['right_eye']) + return out + + +def bootstrap_p100_latency(epochs, event_id, ch_name, eye_prefix, + win_ms=(60, 160), n_boot=1000, seed=0, + proportiontocut=0.1, min_trials=10): + """Bootstrap distribution of P100 latency for one eye. + + Trial-resamples with replacement, recomputes the trimmed-mean evoked at + ``ch_name``, and locates the positive max in ``win_ms``. Returns a + length-``n_boot`` array of latencies (ms), or ``None`` if there aren't + enough trials. Pair two calls (one per eye) to derive an IOLD CI from the + pairwise differences. + """ + keys = [k for k in event_id if k.startswith(eye_prefix)] + if not keys: + return None + ep = epochs[keys].copy().pick([ch_name]) + data = ep.get_data()[:, 0, :] + if data.shape[0] < min_trials: + return None + times_ms = ep.times * 1000.0 + win_mask = (times_ms >= win_ms[0]) & (times_ms <= win_ms[1]) + win_times = times_ms[win_mask] + n_trials = data.shape[0] + rng = np.random.default_rng(seed) + times_all = ep.times * 1000.0 # full time axis in ms + dt = float(times_all[1] - times_all[0]) # sample period (ms) + win_indices = np.where(win_mask)[0] # absolute indices of window samples + latencies = np.empty(n_boot) + for b in range(n_boot): + idx = rng.integers(0, n_trials, n_trials) + boot_avg = trim_mean(data[idx], proportiontocut=proportiontocut, axis=0) + + # Grid-level peak within the search window + local_peak = np.argmax(boot_avg[win_mask]) + abs_peak = win_indices[local_peak] + + # Parabolic interpolation: recovers sub-sample peak location, + # reducing CI quantisation from ±dt to roughly ±0.3 dt. + if 0 < abs_peak < len(boot_avg) - 1: + y_l = boot_avg[abs_peak - 1] + y_c = boot_avg[abs_peak] + y_r = boot_avg[abs_peak + 1] + denom = y_l - 2 * y_c + y_r + if abs(denom) > 1e-30: + offset = 0.5 * (y_l - y_r) / denom # fractional sample offset + latencies[b] = times_all[abs_peak] + offset * dt + else: + latencies[b] = times_all[abs_peak] + else: + latencies[b] = times_all[abs_peak] + + return latencies + + +def compute_hemi_asymmetry(evoked_avg, ch_left, ch_right, + lat_flag_ms=IOLD_FLAG_MS, log2_flag=LOG2_AMP_FLAG): + """P100 latency/amplitude asymmetry between two hemispheric channels. + + Works for any homologous pair (O1/O2, P7/P8, ...). Returns ``None`` if + either channel fails P100 detection. Sign convention: ``lat_diff = + ch_left − ch_right``, so positive means the left-named channel is later. + """ + ev_l = evoked_avg.copy().pick([ch_left]) + ev_r = evoked_avg.copy().pick([ch_right]) + _, p100_l, _ = get_pr_vep_latencies(ev_l) + _, p100_r, _ = get_pr_vep_latencies(ev_r) + if p100_l is None or p100_r is None: + return None + lat_l = p100_l['latency'] * 1000.0 + lat_r = p100_r['latency'] * 1000.0 + # Peak-to-trough where available; abs() to keep ratio sign-invariant. + amp_l = abs(p100_l.get('peak_to_trough', p100_l['amplitude'])) * 1e6 + amp_r = abs(p100_r.get('peak_to_trough', p100_r['amplitude'])) * 1e6 + lat_diff = lat_l - lat_r + amp_ratio = amp_l / amp_r if amp_r > 0 else float('inf') + log2_ratio = float(np.log2(amp_ratio)) if amp_r > 0 else float('nan') + return { + f'lat_{ch_left.lower()}': json_safe_float(lat_l), + f'lat_{ch_right.lower()}': json_safe_float(lat_r), + f'amp_{ch_left.lower()}': json_safe_float(amp_l), + f'amp_{ch_right.lower()}': json_safe_float(amp_r), + f'snr_{ch_left.lower()}': json_safe_float(p100_l.get('peak_to_trough_snr', + p100_l.get('snr'))), + f'snr_{ch_right.lower()}': json_safe_float(p100_r.get('peak_to_trough_snr', + p100_r.get('snr'))), + 'lat_diff_ms': json_safe_float(lat_diff), + 'amp_ratio': json_safe_float(amp_ratio), + 'log2_ratio': json_safe_float(log2_ratio), + 'lat_flag': bool(abs(lat_diff) > lat_flag_ms), + 'amp_flag': bool(amp_r > 0 and abs(log2_ratio) > log2_flag), + } + + +def compute_hemi_delta_asymmetry(left_eye_hemi, right_eye_hemi, + ch_left, ch_right): + """Inter-ocular contrast of hemispheric asymmetry. + + Δlat = (chL − chR)|left_eye − (chL − chR)|right_eye + Δlog2 = log2(chL/chR)|left_eye − log2(chL/chR)|right_eye + + A purely anatomical chL 0 else float('nan')) + log2_asym_R = (np.log2(R[amp_key_l] / R[amp_key_r]) + if R[amp_key_r] > 0 else float('nan')) + d_log2 = log2_asym_L - log2_asym_R + return { + 'lat_asym_left': json_safe_float(lat_asym_L), + 'lat_asym_right': json_safe_float(lat_asym_R), + 'd_lat': json_safe_float(d_lat), + 'log2_asym_left': json_safe_float(log2_asym_L), + 'log2_asym_right': json_safe_float(log2_asym_R), + 'd_log2': json_safe_float(d_log2), + } diff --git a/eegnb/cli/introprompt.py b/eegnb/cli/introprompt.py index ea96b3685..9e16162a0 100644 --- a/eegnb/cli/introprompt.py +++ b/eegnb/cli/introprompt.py @@ -4,7 +4,7 @@ from eegnb import generate_save_fn, DATA_DIR from eegnb.devices.eeg import EEG -from .utils import run_experiment, get_exp_desc, experiments +from .utils import run_experiment, get_exp_desc, get_experiments eegnb_sites = ['eegnb_examples', 'grifflab_dev', 'jadinlab_home'] @@ -87,6 +87,7 @@ def device_prompt() -> EEG: def exp_prompt(runorzip:str='run') -> str: + experiments = get_experiments() print("\nPlease select which experiment you would like to %s: \n" %runorzip) print( "\n".join( diff --git a/eegnb/cli/utils.py b/eegnb/cli/utils.py index fad282cf0..057335c41 100644 --- a/eegnb/cli/utils.py +++ b/eegnb/cli/utils.py @@ -1,38 +1,42 @@ -#change the pref libraty to PTB and set the latency mode to high precision -from psychopy import prefs -prefs.hardware['audioLib'] = 'PTB' -prefs.hardware['audioLatencyMode'] = 3 +try: + #change the pref libraty to PTB and set the latency mode to high precision + from psychopy import prefs + prefs.hardware['audioLib'] = 'PTB' + prefs.hardware['audioLatencyMode'] = 3 +except ImportError: + pass from eegnb.devices.eeg import EEG - -from eegnb.experiments import VisualN170, Experiment -from eegnb.experiments import VisualP300 -from eegnb.experiments import VisualSSVEP -from eegnb.experiments import AuditoryOddball -from eegnb.experiments.visual_cueing import cueing -from eegnb.experiments.visual_codeprose import codeprose -from eegnb.experiments.auditory_oddball import diaconescu -from eegnb.experiments.auditory_ssaep import ssaep, ssaep_onefreq from typing import Optional +def get_experiments(): + from eegnb.experiments import VisualN170, Experiment + from eegnb.experiments import VisualP300 + from eegnb.experiments import VisualSSVEP + from eegnb.experiments import AuditoryOddball + from eegnb.experiments.visual_cueing import cueing + from eegnb.experiments.visual_codeprose import codeprose + from eegnb.experiments.auditory_oddball import diaconescu + from eegnb.experiments.auditory_ssaep import ssaep, ssaep_onefreq -# New Experiment Class structure has a different initilization, to be noted -experiments = { - "visual-N170": VisualN170(), - "visual-P300": VisualP300(), - "visual-SSVEP": VisualSSVEP(), - "visual-cue": cueing, - "visual-codeprose": codeprose, - "auditory-SSAEP orig": ssaep, - "auditory-SSAEP onefreq": ssaep_onefreq, - "auditory-oddball orig": AuditoryOddball(), - "auditory-oddball diaconescu": diaconescu, -} + # New Experiment Class structure has a different initilization, to be noted + return { + "visual-N170": VisualN170, + "visual-P300": VisualP300, + "visual-SSVEP": VisualSSVEP, + "visual-cue": cueing, + "visual-codeprose": codeprose, + "auditory-SSAEP orig": ssaep, + "auditory-SSAEP onefreq": ssaep_onefreq, + "auditory-oddball orig": AuditoryOddball, + "auditory-oddball diaconescu": diaconescu, + } def get_exp_desc(exp: str): + experiments = get_experiments() if exp in experiments: module = experiments[exp] if hasattr(module, "__title__"): @@ -43,17 +47,24 @@ def get_exp_desc(exp: str): def run_experiment( experiment: str, eeg_device: EEG, record_duration: Optional[float] = None, save_fn=None ): + experiments = get_experiments() if experiment in experiments: - module = experiments[experiment] + exp_item = experiments[experiment] + + from eegnb.experiments import Experiment # Condition added for different run types of old and new experiment class structure - if isinstance(module, Experiment.BaseExperiment): - module.duration = record_duration - module.eeg = eeg_device - module.save_fn = save_fn - module.run() + # If it's a class (BaseExperiment subclass), instantiate it + if isinstance(exp_item, type) and issubclass(exp_item, Experiment.BaseExperiment): + # Concrete subclasses supply defaults for BaseExperiment's required args; mypy can't see which subclass. + exp_instance = exp_item() # type: ignore[call-arg] + exp_instance.duration = record_duration + exp_instance.eeg = eeg_device + exp_instance.save_fn = save_fn + exp_instance.run() else: - module.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) # type: ignore + # Otherwise it's an old-style module + exp_item.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) # type: ignore else: print("\nError: Unknown experiment '{}'".format(experiment)) print("\nExperiment can be one of:") diff --git a/eegnb/datasets/datasets.py b/eegnb/datasets/datasets.py index 21add031c..61a7fa429 100644 --- a/eegnb/datasets/datasets.py +++ b/eegnb/datasets/datasets.py @@ -54,10 +54,12 @@ def fetch_dataset( "visual-P300", "visual-spatialfreq", "visual-SSVEP", + "visual-PRVEP", ] # List gdrive extensions for various experiments gdrive_locs = { + "visual-PRVEP": "1qn-0OSxlO6-stL6EXh9VT4pMSFghCXBG", "visual-SSVEP": "1zj9Wx-YEMJo7GugUUu7Sshcybfsr-Fze", "visual-spatialfreq": "1ggBt7CNvMgddxji-FvxcZoP-IF-PmESX", "visual-P300": "1OLcj-zSjqdNrsBSUAsGBXOwWDnGWTVFC", @@ -84,6 +86,12 @@ def fetch_dataset( download_it = True if download_it: + if gdrive_locs.get(experiment) is None: + raise ValueError( + f"No example dataset available for '{experiment}' yet. " + "Upload a zip to Google Drive and add the file ID to gdrive_locs in datasets.py." + ) + # check if data directory exits. If not, create it if os.path.exists(data_dir) is not True: os.makedirs(data_dir) diff --git a/eegnb/devices/__init__.py b/eegnb/devices/__init__.py index e69de29bb..a95062678 100644 --- a/eegnb/devices/__init__.py +++ b/eegnb/devices/__init__.py @@ -0,0 +1,9 @@ +from eegnb.devices.utils import ( # noqa: F401 + CYTON_CONFIG_GAIN_1X, + CYTON_CONFIG_GAIN_2X, + CYTON_CONFIG_GAIN_4X, + CYTON_CONFIG_GAIN_6X, + CYTON_CONFIG_GAIN_8X, + CYTON_CONFIG_GAIN_12X, + CYTON_CONFIG_GAIN_24X, +) diff --git a/eegnb/devices/eeg.py b/eegnb/devices/eeg.py index ba85f2e25..8c057e3ef 100644 --- a/eegnb/devices/eeg.py +++ b/eegnb/devices/eeg.py @@ -20,11 +20,21 @@ from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE -import pyxid2 +from eegnb.utils.missing import missing_module + +try: + import pyxid2 +except (ImportError, OSError): + pyxid2 = missing_module( + "pyxid2", + "The Cedrus XID backend (NIRSport2 and other Cedrus stimulus-marker devices)", + "xid", + ) from eegnb.devices.utils import ( get_openbci_usb, create_stim_array, + assert_ftdi_latency_1ms, SAMPLE_FREQS, EEG_INDICES, EEG_CHANNELS, @@ -78,6 +88,7 @@ def __init__( ip_addr=None, ch_names=None, config=None, + analog_mode=False, make_logfile=False): """The initialization function takes the name of the EEG device and determines whether or not the device belongs to the Muse or Brainflow families and initializes the appropriate backend. @@ -97,6 +108,12 @@ def __init__( self.ip_addr = ip_addr self.other = other self.config = config + # Cyton-only: switch the board into analog read mode (firmware cmd /2) + # so the AUX header pins (A5-A7) stream alongside the EEG channels. + # Tradeoff: analog mode replaces the on-board accelerometer data with + # the analog reads. Use when you have a photodiode / external sensor + # wired to AUX and don't need accel. + self.analog_mode = analog_mode self.make_logfile = make_logfile # currently only used for kf self.backend = self._get_backend(self.device_name) self.initialize_backend() @@ -323,6 +340,8 @@ def _init_brainflow(self): if self.serial_port: serial_port = str(self.serial_port) self.brainflow_params.serial_port = serial_port + if self.device_name in ('cyton', 'cyton_daisy'): + assert_ftdi_latency_1ms(serial_port) # Initialize board_shim self.sfreq = BoardShim.get_sampling_rate(self.brainflow_id) @@ -333,11 +352,20 @@ def _init_brainflow(self): if self.config: # For Cyton boards, split config string by 'X' delimiter and apply each setting if 'cyton' in self.device_name: - config_settings = self.config.split('X') + config_settings = [s for s in self.config.split('X') if s] for setting in config_settings: - self.board.config_board(setting + 'X') + cmd = setting + 'X' + response = self.board.config_board(cmd) + print(f"[cyton config] {cmd} -> {response!r}") else: - self.board.config_board(self.config) + response = self.board.config_board(self.config) + print(f"[config_board] {self.config!r} -> {response!r}") + + # Cyton: enable analog read mode so AUX header pins (A5-A7) stream + # alongside the EEG channels. Replaces accelerometer data. + if self.analog_mode and 'cyton' in (self.device_name or ''): + response = self.board.config_board('/2') + print(f"[cyton analog mode] /2 -> {response!r}") def _start_brainflow(self): # only start stream if non exists @@ -412,6 +440,30 @@ def _brainflow_extract(self, data): eeg_data = data[:, BoardShim.get_eeg_channels(self.brainflow_id)] timestamps = data[:, BoardShim.get_timestamp_channel(self.brainflow_id)] + # BrainFlow scales Cyton data assuming 24× gain. If a different gain was + # configured via config_board, correct the scaling here. + # Config string format: x{ch}0{gain_code}0110X (gain_code: 0=1×,1=2×,2=4×,3=6×,4=8×,5=12×,6=24×) + if self.config and 'cyton' in self.device_name: + gain_multipliers = {0: 1, 1: 2, 2: 4, 3: 6, 4: 8, 5: 12, 6: 24} + brainflow_assumed_gain = 24 + gain_code = int(self.config[3]) # 4th char of first command "x{ch}0{G}..." + actual_gain = gain_multipliers.get(gain_code, brainflow_assumed_gain) + if actual_gain != brainflow_assumed_gain: + eeg_data = eeg_data * (brainflow_assumed_gain / actual_gain) + + # Append analog (AUX) channels when Cyton is in analog mode. Useful + # for photodiode triggers and other external sensors that need to be + # sampled in lockstep with the EEG. + if self.analog_mode: + try: + analog_idx = BoardShim.get_analog_channels(self.brainflow_id) + if len(analog_idx): + aux_data = data[:, analog_idx] + eeg_data = np.append(eeg_data, aux_data, axis=1) + ch_names = list(ch_names) + [f"AUX{i}" for i in range(len(analog_idx))] + except Exception as e: + logger.warning("could not read analog channels: %s", e) + return ch_names, eeg_data, timestamps def _brainflow_push_sample(self, marker): @@ -670,14 +722,21 @@ def start(self, fn, duration=None): - def push_sample(self, marker, timestamp, marker_name=None): + def push_sample(self, marker, timestamp=None, marker_name=None): """ Universal method for pushing a marker and its timestamp to store alongside the EEG data. Parameters: marker (int): marker number for the stimuli being presented. - timestamp (float): timestamp of stimulus onset from time.time() function. + timestamp (float, optional): timestamp of stimulus onset from time.time() function. + Not used by the BrainFlow backend (which records the board's current sample + timestamp instead). Required by muselsl and kernelflow backends. + + Returns: + bool: True if the marker was recorded, False if the stream is no longer active. """ + if not self.stream_started: + return False if self.backend == "brainflow": self._brainflow_push_sample(marker=marker) elif self.backend == "muselsl": @@ -688,8 +747,10 @@ def push_sample(self, marker, timestamp, marker_name=None): self._serial_push_sample(marker=marker) elif self.backend == "xidport": self._xid_push_sample(marker=marker) + return True def stop(self): + self.stream_started = False if self.backend == "brainflow": self._stop_brainflow() elif self.backend == "muselsl": diff --git a/eegnb/devices/utils.py b/eegnb/devices/utils.py index c804befd0..9b60d95b3 100644 --- a/eegnb/devices/utils.py +++ b/eegnb/devices/utils.py @@ -1,5 +1,6 @@ import numpy as np import socket +import sys import platform import serial @@ -90,6 +91,44 @@ } + +# --------------------------------------------------------------------------- +# Cyton board channel configuration presets +# --------------------------------------------------------------------------- +# Each channel command has the format: x N P G I B S1 S2 X +# N = channel number (1-8) +# P = power (0=ON, 1=OFF) +# G = gain (0=1×, 1=2×, 2=4×, 3=6×, 4=8×, 5=12×, 6=24×) +# I = input type (0=normal EEG, 1=shorted, ...) +# B = include in BIAS derivation (1=yes) +# S2 = SRB2 connection (1=connected) +# S1 = SRB1 connection (0=disconnected) +# +# Build a config string by joining per-channel strings — applied with +# EEG(device='cyton', config=CYTON_CONFIG_GAIN_4X). + +def _cyton_ch_config(gain_code: int, n_channels: int = 8) -> str: + """Build a Cyton channel-settings string for all channels. + + Args: + gain_code: ADS1299 gain code (0=1×, 1=2×, 2=4×, 3=6×, 4=8×, 5=12×, 6=24×). + n_channels: Number of channels to configure (default 8 for standard Cyton). + + Returns: + Config string ready to pass to ``EEG(config=...)``. + """ + return "".join(f"x{ch}0{gain_code}0110X" for ch in range(1, n_channels + 1)) + +# Standard gain presets — normal EEG input, bias enabled, SRB2 on, SRB1 off. +CYTON_CONFIG_GAIN_1X = _cyton_ch_config(0) # 1× (for strong signals / testing) +CYTON_CONFIG_GAIN_2X = _cyton_ch_config(1) # 2× +CYTON_CONFIG_GAIN_4X = _cyton_ch_config(2) # 4× - for Thinkpulse electrodes +CYTON_CONFIG_GAIN_6X = _cyton_ch_config(3) # 6× +CYTON_CONFIG_GAIN_8X = _cyton_ch_config(4) # 8× +CYTON_CONFIG_GAIN_12X = _cyton_ch_config(5) # 12× — good general-purpose EEG config +CYTON_CONFIG_GAIN_24X = _cyton_ch_config(6) # 24× — default gain + + def create_stim_array(timestamps, markers): """Creates a stim array which is the lenmgth of the EEG data where the stimuli are lined up with their corresponding EEG sample. @@ -123,3 +162,80 @@ def get_openbci_usb(): def serial_ports(): return serial.tools.list_ports.comports() + + +def get_ftdi_latency_ms(com_port: str): + """Read the FTDI VCP LatencyTimer (ms) for a COM port from the Windows registry. + + Returns the latency in ms, or None on non-Windows platforms or if the port + is not found under HKLM\\SYSTEM\\CurrentControlSet\\Enum\\FTDIBUS. + """ + if sys.platform != 'win32': + return None + import winreg + ftdi_root = r"SYSTEM\CurrentControlSet\Enum\FTDIBUS" + target = com_port.upper() + try: + root_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, ftdi_root) + except OSError: + return None + try: + for i in range(1024): + try: + device_name = winreg.EnumKey(root_key, i) + except OSError: + break + device_path = f"{ftdi_root}\\{device_name}" + try: + device_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, device_path) + except OSError: + continue + try: + for j in range(64): + try: + instance = winreg.EnumKey(device_key, j) + except OSError: + break + params_path = f"{device_path}\\{instance}\\Device Parameters" + try: + params_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, params_path) + except OSError: + continue + try: + port_name, _ = winreg.QueryValueEx(params_key, "PortName") + if str(port_name).upper() == target: + latency, _ = winreg.QueryValueEx(params_key, "LatencyTimer") + return int(latency) + except OSError: + pass + finally: + params_key.Close() + finally: + device_key.Close() + finally: + root_key.Close() + return None + + +def assert_ftdi_latency_1ms(com_port: str) -> None: + """Assert the FTDI LatencyTimer for a COM port is 1 ms (Windows only). + + The OpenBCI Cyton dongle ships with the Windows default of 16 ms, which + adds ~15 ms of USB buffering jitter to every marker push and corrupts + stimulus/EEG alignment for VEP-class experiments. No-op on non-Windows. + + Fix in: Device Manager -> Ports -> USB Serial Port -> Properties -> + Port Settings -> Advanced -> Latency Timer (ms) = 1. + """ + if sys.platform != 'win32': + return + latency = get_ftdi_latency_ms(com_port) + assert latency is not None, ( + f"Could not read FTDI LatencyTimer for {com_port}. " + f"Verify it is an FTDI device in Device Manager." + ) + assert latency == 1, ( + f"FTDI LatencyTimer for {com_port} is {latency} ms; required 1 ms. " + f"Device Manager -> Ports -> USB Serial Port ({com_port}) -> " + f"Properties -> Port Settings -> Advanced -> Latency Timer (ms) = 1." + ) diff --git a/eegnb/devices/vr.py b/eegnb/devices/vr.py new file mode 100644 index 000000000..983ecbd83 --- /dev/null +++ b/eegnb/devices/vr.py @@ -0,0 +1,223 @@ +import logging +import csv +import numpy as np +from time import time +from psychopy import core, monitors +from psychopy.visual.rift import Rift + + +# Frame-rate deviation above this percentage triggers a user-facing warning. +# Set high (50%) because VR jitter is recoverable per-trial via the timing +# sidecar (app_motion_to_photon_latency_s). The warning catches fundamentally +# broken setups (wrong GPU, no acceleration), not borderline cases. +DISPLAY_DEVIATION_FLAG_PCT = 50.0 + + +def _build_placeholder_monitor(): + """Programmatic Monitor passed to the Rift constructor. + Display geometry comes from the HMD runtime, not this object. + Logger is silenced during construction because Monitor.__init__ emits an + unconditional 'Monitor specification not found' warning for any name not + present in the user's saved calibration database.""" + from psychopy import logging as psy_logging + prev_level = psy_logging.console.level + psy_logging.console.setLevel(psy_logging.ERROR) + try: + mon = monitors.Monitor('eegnb_vr_placeholder', autoLog=False) + mon.setDistance(60) + mon.setSizePix([1920, 1080]) + finally: + psy_logging.console.setLevel(prev_level) + return mon + +class VR(Rift): + """ + Extended VR class for HMDs, providing built-in methods for + stereoscopic rendering math, precise hardware clock synchronization, + and per-trial compositor telemetry buffering. + """ + def __init__(self, *args, **kwargs): + # Provide a placeholder monitor so PsychoPy doesn't emit + # 'Monitor specification not found' on first flip. The values are not + # used by the Rift compositor — display geometry comes from the HMD + # runtime via libovr — but PsychoPy's Window base class still queries + # the monitor object during initialization. + kwargs.setdefault('monitor', _build_placeholder_monitor()) + super().__init__(*args, **kwargs) + self.libovr_to_wallclock_offset = None + self.libovr_to_wallclock_bracket = None + self.timing_data = [] + + def validate_frame_rate(self, draw_blank, n_frames=60, warmup=10): + """Measure actual frame delivery rate and compare to the runtime target. + + Specific to VR because Quest Link's encoded transport pipeline can + silently degrade (wrong GPU, encode bottleneck) even though the runtime + still advertises its nominal refresh rate. + + Args: + draw_blank: callable that draws a frame and flips the window. + Caller decides eye-buffer logic. + n_frames: measurement window. 60 frames ≈ 0.5s at 120 Hz. + warmup: discarded frames so the compositor reaches steady state. + + Returns: + Dict with ``target_hz``, ``actual_hz``, ``deviation_pct``, ``ok`` + (True iff deviation ≤ ``DISPLAY_DEVIATION_FLAG_PCT``). + """ + target_hz = float(self.displayRefreshRate) + + for _ in range(warmup): + draw_blank() + + t0 = core.getTime() + for _ in range(n_frames): + draw_blank() + elapsed = core.getTime() - t0 + + actual_hz = n_frames / elapsed + deviation = abs(actual_hz - target_hz) / target_hz * 100.0 + + result = { + 'target_hz': round(target_hz, 1), + 'actual_hz': round(actual_hz, 1), + 'deviation_pct': round(deviation, 1), + 'ok': deviation <= DISPLAY_DEVIATION_FLAG_PCT, + } + + status = 'OK' if result['ok'] else 'DEGRADED — check GPU assignment' + print(f"[vr] Target: {target_hz:.0f} Hz Measured: {actual_hz:.1f} Hz " + f"Deviation: {deviation:.1f}% [{status}]") + return result + + def compute_optical_axis_offsets(self): + """ + Computes the Normalized Device Coordinates (NDC) horizontal (x) offset + needed to center content directly in front of each eye's physical lens. + + Because VR headsets use asymmetric lenses (the screen extends further + to the outside of the eye than the inside), the mathematical center of + the screen (NDC 0,0) does not align with the user's optical axis. + """ + try: + import psychxr.drivers.libovr as libovr + # fov = [UpTan, DownTan, LeftTan, RightTan] + left_fov, _, _ = libovr.getEyeRenderFov(libovr.EYE_LEFT) + right_fov, _, _ = libovr.getEyeRenderFov(libovr.EYE_RIGHT) + left_L, left_R = float(left_fov[2]), float(left_fov[3]) + right_L, right_R = float(right_fov[2]), float(right_fov[3]) + return ((left_L - left_R) / (left_L + left_R), + (right_L - right_R) / (right_L + right_R)) + except Exception as e: + logging.warning(f"[VR] Failed to compute optical axis offsets: {e}") + return (0.0, 0.0) + + def sync_vr_clock(self): + """ + Calculates Wall-clock <-> LibOVR clock offset. LibOVR timestamps are on a QPC-based + clock with arbitrary zero; time.time() is Unix epoch. Sample paired calls in a + tight bracket and keep the tightest, so analysis can convert LibOVR times to wall-clock. + """ + if self.libovr_to_wallclock_offset is not None: + return self.libovr_to_wallclock_offset + + try: + from psychxr.drivers.libovr import timeInSeconds + best_bracket = None + best_offset = None + for _ in range(21): + t0 = time() + lovr = timeInSeconds() + t1 = time() + bracket = t1 - t0 + offset = 0.5 * (t0 + t1) - lovr + if best_bracket is None or bracket < best_bracket: + best_bracket = bracket + best_offset = offset + + logging.info( + f"[VR] clock offset (wall - libovr) = " + f"{best_offset:.6f}s (tightest bracket = {best_bracket*1e3:.3f}ms)" + ) + + self.libovr_to_wallclock_offset = best_offset + self.libovr_to_wallclock_bracket = best_bracket + return best_offset + except Exception as e: + logging.warning(f"[VR] LibOVR clock sync failed: {e}") + return None + + def log_display_info(self): + """ + Reads IPD, PPD, and display resolution from the LibOVR session and logs + them to the telemetry sidecar as header comment rows. + Returns (ppd, ipd_mm) for use in stimulus sizing. + """ + try: + ppta = self.pixelsPerTanAngleAtCenter + ppd_h = np.mean([p[0] for p in ppta]) * (np.pi / 180.0) + ppd_v = np.mean([p[1] for p in ppta]) * (np.pi / 180.0) + ppd = int(round(min(ppd_h, ppd_v))) + + eye_to_nose = self.eyeToNoseDistance + ipd_mm = (eye_to_nose[0] + eye_to_nose[1]) * 1000.0 + + logging.info( + f"[VR] IPD={ipd_mm:.1f}mm ppd={ppd} (h={ppd_h:.1f} v={ppd_v:.1f}) " + f"res={self.displayResolution} eye_buf={self.size}" + ) + + self.timing_data.insert(0, ['# ipd_mm', ipd_mm, 'ppd', ppd, f'ppd_h={ppd_h:.1f} ppd_v={ppd_v:.1f}']) + + return ppd, ipd_mm + except Exception as e: + logging.warning(f"[VR] Failed to read display info: {e}") + return None, None + + def log_telemetry(self, trial_idx, software_time): + """Extracts native LibOVR performance stats and buffers them in memory.""" + submitted_frame_index = None + app_frame_index = None + app_m2p_s = None + comp_latency_s = None + time_to_vsync_s = None + + try: + submitted_frame_index = self._frameIndex - 1 + perf = getattr(self, '_perfStats', None) + if perf is not None and perf.frameStatsCount > 0: + stat = perf.frameStats[0] + app_frame_index = stat.appFrameIndex + app_m2p_s = stat.appMotionToPhotonLatency + comp_latency_s = stat.compositorLatency + time_to_vsync_s = stat.timeToVsync + except Exception: + pass + + self.timing_data.append([ + trial_idx, software_time, + submitted_frame_index, app_frame_index, + app_m2p_s, comp_latency_s, time_to_vsync_s + ]) + + def save_telemetry(self, save_fn): + """Saves memory-buffered VR timing telemetry to a CSV sidecar. + """ + if save_fn is None: + return + + timing_path = save_fn.with_name(save_fn.stem + '_timing.csv') + with open(timing_path, 'w', newline='') as f: + writer = csv.writer(f) + writer.writerow([ + 'trial_idx', 'software_time', + 'submitted_frame_index', 'app_frame_index', + 'app_motion_to_photon_latency_s', 'compositor_latency_s', + 'time_to_vsync_s' + ]) + + if self.libovr_to_wallclock_offset is not None: + writer.writerow(['# libovr_to_wallclock_offset_s', self.libovr_to_wallclock_offset, 'bracket_ms', self.libovr_to_wallclock_bracket * 1000]) + + writer.writerows(self.timing_data) + print(f" Saved VR timing telemetry to {timing_path}") diff --git a/eegnb/experiments/BlockExperiment.py b/eegnb/experiments/BlockExperiment.py new file mode 100644 index 000000000..a728a21a5 --- /dev/null +++ b/eegnb/experiments/BlockExperiment.py @@ -0,0 +1,151 @@ +""" +BlockExperiment Class - Extends BaseExperiment with block-based functionality + +This class provides block-based experiment capabilities by inheriting from BaseExperiment +and overriding the run method to handle multiple blocks. It loads stimulus only once +and reuses it across blocks, while allowing block-specific instructions. + +Experiments that need block-based execution should inherit from this class instead of BaseExperiment. +""" +import gc +from abc import ABC +from time import time + +from .Experiment import BaseExperiment +from psychopy import core + + +class BlockExperiment(BaseExperiment, ABC): + """ + Inherits from BaseExperiment to provide block-based functionality. + + This class is designed for experiments that need to run as multiple blocks. + Each block has its own instructions and duration. It loads all stimuli at once, then re/uses it across blocks. + """ + + def __init__(self, exp_name, block_duration, eeg, save_fn, block_trial_size, n_blocks, iti: float, soa: float, jitter: float, + use_vr=False, use_fullscr=True, stereoscopic=False): + """ Initializer for the BlockExperiment Class + + Args: + exp_name (str): Name of the experiment + block_duration (float): Duration of each block in seconds + eeg: EEG device object for recording + save_fn (str): Save filename for data + block_trial_size (int): Number of trials per block + n_blocks (int): Number of blocks to run + iti (float): Inter-trial interval + soa (float): Stimulus on arrival + jitter (float): Random delay between stimulus + use_vr (bool): Use VR for displaying stimulus + use_fullscr (bool): Use fullscreen mode + """ + # Calculate total trials for the base class + total_trials = block_trial_size * n_blocks + + # Initialize BaseExperiment with total trials + # Pass None for duration if block_duration is None to ignore time spent in instructions + super().__init__(exp_name, block_duration, eeg, save_fn, total_trials, iti, soa, jitter, use_vr, use_fullscr, stereoscopic=stereoscopic) + + # Block-specific parameters + self.block_duration = block_duration + self.block_trial_size = block_trial_size + self.n_blocks = n_blocks + + # Current block index + self.current_block_index = 0 + + def present_block_instructions(self, current_block): + """ + Display instructions for the current block to the user. + + This method is meant to be overridden by child classes to provide + experiment-specific instructions before each block. The base implementation + simply flips the window without adding any text. + + This method is called by __show_block_instructions in a loop until the user + provides input to continue or cancel the experiment. + + Args: + current_block (int): The current block number (0-indexed), used to customize + instructions for specific blocks if needed. + """ + self.window.flip() + + def _show_block_instructions(self, block_number): + """ + Show instructions for a specific block + + Args: + block_number (int): Current block number (0-indexed) + + Returns: + tuple: (continue_experiment, instruction_end_time) + - continue_experiment (bool): Whether to continue the experiment + """ + + # Clear any previous input + self._clear_user_input() + + # Wait for user input to continue + while True: + # Display the instruction text + super()._draw(lambda: self.present_block_instructions(block_number)) + + if self._user_input('start'): + return True + elif self._user_input('cancel'): + return False + + def run(self, instructions=True): + """ + Run the experiment as a series of blocks + + This method overrides BaseExperiment.run() to handle multiple blocks. + + Args: + instructions (bool): Whether to show the initial experiment instructions + """ + # Setup the experiment (creates window, loads stimulus once) + if not self.setup(instructions): + return False + + # Start EEG Stream once for all blocks + if self.eeg: + print("Wait for the EEG-stream to start...") + self.eeg.start(self.save_fn) + print("EEG Stream started") + + self._enable_frame_tracking() + + # Run each block + for block_index in range(self.n_blocks): + self.current_block_index = block_index + print(f"Starting block {block_index + 1} of {self.n_blocks}") + + # Show block-specific instructions + if not self._show_block_instructions(block_index): + break + + core.rush(True) + gc.disable() + try: + if self.use_vr: + self.vr.sync_vr_clock() + if not self._run_trial_loop(start_time=time(), duration=self.block_duration): + break + finally: + gc.enable() + core.rush(False) + + self._report_frame_stats() + + # Stop EEG Stream after all blocks + if self.eeg: + self.eeg.stop() + + if self.use_vr: + self.vr.save_telemetry(self.save_fn) + + # Close window at the end of all blocks + self.window.close() diff --git a/eegnb/experiments/Experiment.py b/eegnb/experiments/Experiment.py index 4d7cab119..2802e7440 100644 --- a/eegnb/experiments/Experiment.py +++ b/eegnb/experiments/Experiment.py @@ -11,23 +11,30 @@ from abc import abstractmethod, ABC from typing import Callable from eegnb.devices.eeg import EEG -from psychopy import prefs -from psychopy.visual.rift import Rift +from eegnb.devices.vr import VR +from psychopy import prefs, visual, event, core +import gc +import logging from time import time import random +import json +import csv import numpy as np from pandas import DataFrame -from psychopy import visual, event from eegnb import generate_save_fn +from eegnb.experiments import diagnostics +from eegnb.utils.display import snap_refresh_rate + +logger = logging.getLogger(__name__) class BaseExperiment(ABC): def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, soa: float, jitter: float, - use_vr=False, use_fullscr = True, screen_num=0, stereoscopic = False, devices = list): + use_vr=False, use_fullscr = True, screen_num=0, stereoscopic = False, devices=None): """ Initializer for the Base Experiment Class Args: @@ -50,7 +57,7 @@ def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, Press spacebar to continue. \n""".format(self.exp_name) self.duration = duration self.eeg: EEG = eeg - self.devices = devices + self.devices = devices if devices is not None else [] self.save_fn = save_fn self.n_trials = n_trials self.iti = iti @@ -61,11 +68,11 @@ def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, self.stereoscopic = stereoscopic if use_vr: # VR interface accessible by specific experiment classes for customizing and using controllers. - self.rift: Rift = visual.Rift(monoscopic=not stereoscopic, headLocked=True) - # eye for presentation - if stereoscopic: - self.left_eye_x_pos = 0.2 - self.right_eye_x_pos = -0.2 + self.vr: VR = VR(monoscopic=not stereoscopic, headLocked=True) + + # Shift the display so it aligns perfectly with the center of each eye. + if use_vr and stereoscopic: + self.left_eye_x_pos, self.right_eye_x_pos = self.vr.compute_optical_axis_offsets() else: self.left_eye_x_pos = 0 self.right_eye_x_pos = 0 @@ -73,6 +80,26 @@ def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, self.use_fullscr = use_fullscr self.window_size = [1600,800] + # Diagnostic results — populated by run()/setup(), read by show_diagnostics() + self.signal_check = None + self.display_check = None + + # Marker observers: callables (trial_idx, timestamp) invoked on every + # push_marker(). Used by integrations that want timing context but + # don't emit a hardware/software marker themselves (e.g. VR compositor + # telemetry, eyetracker fixation logs, photodiode metadata sidecars). + # Hardware/software *emitters* (Cyton, XID, kernelflow, etc.) live in + # self.devices and are dispatched via push_sample, not this list. + self.marker_listeners: list = [] + self.monitor_timing_data: list = [] + + if not self.use_vr: + def _record_monitor_timing(trial_idx, timestamp, flip_time=None): + # timestamp is software_time (when marker is pushed) + # flip_time is when the frame actually appeared + self.monitor_timing_data.append([trial_idx, timestamp, flip_time]) + self.marker_listeners.append(_record_monitor_timing) + # Initializing the marker names self.markernames = [1, 2] @@ -80,6 +107,7 @@ def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, self.parameter = np.random.binomial(1, 0.5, self.n_trials) self.trials = DataFrame(dict(parameter=self.parameter, timestamp=np.zeros(self.n_trials))) + @abstractmethod def load_stimulus(self): """ @@ -113,18 +141,59 @@ def present_iti(self): """ self.window.flip() + def present_soa(self, idx: int): + """ + Method called each frame during the SOA wait (stimulus-on period between trial transitions). + + Recommended for VR: override this to redraw the stimulus for trial `idx`. VR compositors + prefer a freshly drawn frame each submission; submitting only a flip leads + the compositor to treat frames as stale, which can drop to half-rate + reprojection and increase dropped/late frames. Overriding gives smoother + presentation and more accurate frame timing. + + idx : Trial index of the most recently presented stimulus — same value that was + passed to the preceding present_stimulus call. + """ + raise NotImplementedError + + def _draw_blank_frame(self): + """Draw a blank frame and flip — used for display rate measurement.""" + self._draw(self.window.flip) + def setup(self, instructions=True): # Setting up Graphics - self.window = ( - self.rift if self.use_vr - else visual.Window(self.window_size, monitor="testMonitor", units="deg", - screen = self.screen_num, fullscr=self.use_fullscr)) - + if self.use_vr: + self.window = self.vr + self.display_check = self.vr.validate_frame_rate(self._draw_blank_frame) + # Capture per-marker compositor stats alongside each EEG trigger. + self.marker_listeners.append(self.vr.log_telemetry) + else: + self.window = visual.Window(self.window_size, + monitor="testMonitor", + units="deg", + screen=self.screen_num, + fullscr=self.use_fullscr) + actual_hz = self.window.getActualFrameRate() + self.display_check = { + 'target_hz': round(actual_hz, 1) if actual_hz else None, + 'actual_hz': round(actual_hz, 1) if actual_hz else None, + 'deviation_pct': 0.0, + 'ok': actual_hz is not None, + } + self.window.mouseVisible = False + + # Snap the target rate to the nearest standard panel rate so + # downstream stimulus code can rely on a clean integer Hz. + target = self.display_check.get('target_hz') + self.refresh_rate = snap_refresh_rate(target) if target else None + # Loading the stimulus from the specific experiment, throws an error if not overwritten in the specific experiment self.stim = self.load_stimulus() - - # Show Instruction Screen if not skipped by the user + + # Show diagnostics screen first if anything's wrong, then instructions. if instructions: + if not self.show_diagnostics(): + return False if not self.show_instructions(): return False @@ -151,10 +220,8 @@ def show_instructions(self): """ # Splitting instruction text into lines - self.instruction_text = self.instruction_text % self.duration - - # Disabling the cursor during display of instructions - self.window.mouseVisible = False + if '%s' in self.instruction_text: + self.instruction_text = self.instruction_text % self.duration # clear/reset any old key/controller events self._clear_user_input() @@ -165,13 +232,97 @@ def show_instructions(self): text = visual.TextStim(win=self.window, text=self.instruction_text, color=[-1, -1, -1]) self._draw(lambda: self.__draw_instructions(text)) - # Enabling the cursor again - self.window.mouseVisible = True + if self._user_input('cancel'): + return False + return True + + def show_diagnostics(self): + """Display a pre-experiment diagnostics screen — only when flagged. + + Shows synthetic-device warning, degraded-framerate warning, and + noisey electrode warning. If none are flagged the screen is skipped + entirely so the experimenter goes straight to the instructions. + Returns True to continue, False if the user cancels. + """ + warnings = diagnostics.format_diagnostic_warnings( + device_name=self.eeg.device_name if self.eeg else None, + display=self.display_check, + signal_check=self.signal_check, + ) + if not warnings: + return True + + body = "\n\n".join(warnings) + body += "\n\nFix the issues above, or press spacebar / index trigger to continue anyway." + + self._clear_user_input() + while not self._user_input('start'): + text = visual.TextStim( + win=self.window, text=body, + color=[1, 1, 1], font='Courier New', + height=0.04, wrapWidth=1.8, units='norm', + anchorHoriz='center', anchorVert='center', + ) + self._draw(lambda: self.__draw_instructions(text)) if self._user_input('cancel'): return False return True + def show_results(self, text): + """Display a results / summary screen after the experiment. + + Mirrors ``show_instructions``: loops the render loop until user input. Uses a + monospaced font so tabular output (e.g. recording quality reports) + aligns correctly. + + Args: + text (str): Text to display. Long lines are wrapped automatically. + """ + self._clear_user_input() + + stim = visual.TextStim( + win=self.window, text=text, + color=[1, 1, 1], # white on black background + font='Courier New', + height=0.04, + wrapWidth=1.8, + units='norm', + anchorHoriz='center', anchorVert='center', + ) + + while not self._user_input('start'): + self._draw(lambda: self.__draw_results(stim)) + if self._user_input('cancel'): + break + + def __draw_results(self, stim): + if self.use_vr and self.stereoscopic: + for eye, x_pos in [("left", self.left_eye_x_pos), + ("right", self.right_eye_x_pos)]: + self.window.setBuffer(eye) + stim.pos = (x_pos, 0) + stim.draw() + else: + stim.draw() + self.window.flip() + + def post_run(self): + """Called after the trial loop ends, before the window closes. + + Default: display a recording quality report so the experimenter can + decide whether to re-record before removing the electrodes. Override in a + subclass or reassign on the instance to replace this behaviour. + """ + if not self.save_fn: + return + try: + text = diagnostics.post_session_report(self.save_fn) + text += "\n\nPress spacebar or index trigger to close." + self.show_results(text) + except Exception as e: + print(f"[post_run] Could not display quality report: {e}") + def _user_input(self, input_type): if input_type == 'start': key_input = 'spacebar' @@ -215,13 +366,13 @@ def get_vr_input(self, vr_controller, button=None, trigger=False): """ trigger_squeezed = False if trigger: - for x in self.rift.getIndexTriggerValues(vr_controller): + for x in self.vr.getIndexTriggerValues(vr_controller): if x > 0.0: trigger_squeezed = True button_pressed = False if button is not None: - button_pressed, tsec = self.rift.getButtons([button], vr_controller, 'released') + button_pressed, tsec = self.vr.getButtons([button], vr_controller, 'released') if trigger_squeezed or button_pressed: return True @@ -247,6 +398,7 @@ def _draw(self, present_stimulus: Callable): tracking_state = self.window.getTrackingState() self.window.calcEyePoses(tracking_state.headPose.thePose) self.window.setDefaultView() + present_stimulus() def _clear_user_input(self): @@ -258,7 +410,7 @@ def clear_vr_input(self): Clears/resets input events from vr controllers """ if self.use_vr: - self.rift.updateInputState() + self.vr.updateInputState() def _run_trial_loop(self, start_time, duration): """ @@ -272,6 +424,14 @@ def _run_trial_loop(self, start_time, duration): """ + if self.use_vr and type(self).present_soa is BaseExperiment.present_soa: + raise NotImplementedError( + f"{type(self).__name__} uses VR but does not override present_soa(idx). " + "psychxr does not honor setAutoDraw, and the VR compositor requires per-frame " + "redraws during the SOA wait; the default flip-only implementation will blank " + "the stimulus after one frame. Override present_soa(idx) to redraw your stimulus." + ) + def iti_with_jitter(): return self.iti + np.random.rand() * self.jitter @@ -279,6 +439,7 @@ def iti_with_jitter(): current_trial = trial_end_time = -1 trial_start_time = None rendering_trial = -1 + has_soa_override = type(self).present_soa is not BaseExperiment.present_soa # Clear/reset user input buffer self._clear_user_input() @@ -302,6 +463,10 @@ def iti_with_jitter(): # Stimulus presentation overwritten by specific experiment self._draw(lambda: self.present_stimulus(current_trial)) rendering_trial = current_trial + elif has_soa_override: + # Keep submitting frames during SOA wait — VR compositor + # drops to lower framerate if we stall between reversals. + self._draw(lambda: self.present_soa(current_trial)) else: self._draw(lambda: self.present_iti()) @@ -310,8 +475,61 @@ def iti_with_jitter(): return True + def _enable_frame_tracking(self): + """Enable per-frame interval recording for dropped frame diagnostics.""" + self.window.recordFrameIntervals = True + rate = self.window.displayRefreshRate if self.use_vr else self.window.getActualFrameRate() + self.display_refresh_rate = int(np.round(rate)) if rate else None + # Threshold for counting a frame as "dropped" — 50% over expected duration + expected_frame_dur = 1.0 / (rate or 60) + self.window.refreshThreshold = expected_frame_dur * 1.5 + + def _report_frame_stats(self): + """Print frame timing summary and save intervals alongside recording.""" + intervals = self.window.frameIntervals + if not intervals: + return + + intervals_ms = [i * 1000 for i in intervals] + dropped = self.window.nDroppedFrames + total = len(intervals) + mean_ms = np.mean(intervals_ms) + std_ms = np.std(intervals_ms) + max_ms = max(intervals_ms) + + print(f"\nFrame timing: {total} frames, {dropped} dropped ({dropped/total*100:.1f}%)") + print(f" Refresh rate: {self.display_refresh_rate} Hz") + print(f" Mean: {mean_ms:.2f}ms Std: {std_ms:.2f}ms Max: {max_ms:.2f}ms") + + if self.save_fn: + stats_path = self.save_fn.with_name(self.save_fn.stem + '_frame_stats.json') + with open(stats_path, 'w') as f: + json.dump({ + 'display_refresh_rate_hz': self.display_refresh_rate, + 'total_frames': total, + 'dropped_frames': dropped, + 'mean_ms': round(mean_ms, 3), + 'std_ms': round(std_ms, 3), + 'max_ms': round(max_ms, 3), + 'intervals_ms': [round(i, 3) for i in intervals_ms] + }, f, indent=2) + print(f" Saved to {stats_path}") + + def _save_monitor_telemetry(self): + """Saves memory-buffered monitor timing telemetry to a CSV sidecar.""" + if self.save_fn is None or not self.monitor_timing_data: + return + + timing_path = self.save_fn.with_name(self.save_fn.stem + '_timing.csv') + with open(timing_path, 'w', newline='') as f: + writer = csv.writer(f) + writer.writerow(['trial_idx', 'software_time', 'flip_time']) + writer.writerows(self.monitor_timing_data) + print(f" Saved monitor timing telemetry to {timing_path}") + def run(self, instructions=True): """ Run the experiment """ + self.signal_check = diagnostics.check_signal_quality(self.eeg) # Setup the experiment self.setup(instructions) @@ -323,11 +541,22 @@ def run(self, instructions=True): self.eeg.start(self.save_fn, duration=self.duration + 5) print("EEG Stream started") + self._enable_frame_tracking() + # Record experiment until a key is pressed or duration has expired. record_start_time = time() - # Run the trial loop - self._run_trial_loop(record_start_time, self.duration) + core.rush(True) + gc.disable() + try: + if self.use_vr: + self.vr.sync_vr_clock() + self._run_trial_loop(record_start_time, self.duration) + finally: + gc.enable() + core.rush(False) + + self._report_frame_stats() # Clearing the screen for the next trial event.clearEvents() @@ -336,20 +565,43 @@ def run(self, instructions=True): if self.eeg: self.eeg.stop() + if self.use_vr: + self.vr.save_telemetry(self.save_fn) + else: + self._save_monitor_telemetry() + + # Post-run hook (e.g. show a summary / quality report screen) + self.post_run() + # Closing the window self.window.close() - def send_triggers(self, marker): - """Send timing triggers to recording device[s]""" + def push_marker(self, marker, trial_idx): + """Push a trigger to the primary EEG and every additional device in + self.devices, then notify any registered marker_listeners with + (trial_idx, timestamp). + + Emitters (self.eeg, self.devices) record the marker value into their + respective streams — Cyton's marker channel, XID's TTL output, + muselsl's lsl marker stream, etc. + Listeners (self.marker_listeners) receive only the timing context — + they're observers that capture additional state at marker time + (VR compositor telemetry, eyetracker fixation, photodiode metadata). + """ + timestamp = time() + if self.eeg: + self.eeg.push_sample(marker=marker, timestamp=timestamp) for dev in self.devices: - timestamp = time() dev.push_sample(marker=marker, timestamp=timestamp) - + for listener in self.marker_listeners: + try: + listener(trial_idx, timestamp) + except Exception: + logger.exception("marker listener failed: %s", listener) @property def name(self) -> str: """ This experiment's name """ return self.exp_name - diff --git a/eegnb/experiments/__init__.py b/eegnb/experiments/__init__.py index 8532c0f76..2cd3097d6 100644 --- a/eegnb/experiments/__init__.py +++ b/eegnb/experiments/__init__.py @@ -1,27 +1,66 @@ -from .visual_n170.n170 import VisualN170 -from .visual_p300.p300 import VisualP300 -from .visual_ssvep.ssvep import VisualSSVEP +from typing import TYPE_CHECKING -from psychopy import sound, plugins, prefs -import platform +from eegnb.utils.missing import missing_class -# PTB does not yet support macOS Apple Silicon freely, need to fall back to sounddevice. -if platform.system() == 'Darwin' and platform.machine() == 'arm64': - # import psychopy_sounddevice.backend_sounddevice - plugins.scanPlugins() - success = plugins.loadPlugin('psychopy-sounddevice') - print(f"psychopy_sounddevice plugin loaded: {success}") +MissingExperiment = missing_class( + "PsychoPy", + "Stimulus presentation experiments", + "stimpres", +) - # Force reload sound module - import importlib - importlib.reload(sound) - # setting prefs.hardware['audio_device'] still falls back to a default device, need to use setDevice. - audio_device = prefs.hardware.get('audioDevice', 'default') - if audio_device and audio_device != 'default': - sound.setDevice(audio_device) +if TYPE_CHECKING: + from .visual_n170.n170 import VisualN170 + from .visual_p300.p300 import VisualP300 + from .visual_ssvep.ssvep import VisualSSVEP else: - #change the pref library to PTB and set the latency mode to high precision - prefs.hardware['audioLib'] = 'PTB' - prefs.hardware['audioLatencyMode'] = 3 + try: + from .visual_n170.n170 import VisualN170 + from .visual_p300.p300 import VisualP300 + from .visual_ssvep.ssvep import VisualSSVEP + except ImportError: + VisualN170 = MissingExperiment + VisualP300 = MissingExperiment + VisualSSVEP = MissingExperiment -from .auditory_oddball.aob import AuditoryOddball \ No newline at end of file +try: + from psychopy import sound, plugins, prefs + import platform + import logging + + # PTB does not yet support macOS Apple Silicon freely, need to fall back to sounddevice. + if platform.system() == 'Darwin' and platform.machine() == 'arm64': + # import psychopy_sounddevice.backend_sounddevice + plugins.scanPlugins() + success = plugins.loadPlugin('psychopy-sounddevice') + print(f"psychopy_sounddevice plugin loaded: {success}") + + # Force reload sound module + import importlib + importlib.reload(sound) + + # Try to set the audio device if requested and available + audio_device = prefs.hardware.get('audioDevice', 'default') + if audio_device and audio_device != 'default': + if hasattr(sound, 'setDevice'): + try: + sound.setDevice(audio_device) + except Exception as e: + logging.warning(f"Failed to set audio device to '{audio_device}': {e}") + else: + logging.warning(f"sound.setDevice not available, could not set device to '{audio_device}'") + else: + #change the pref library to PTB and set the latency mode to high precision + prefs.hardware['audioLib'] = 'PTB' + prefs.hardware['audioLatencyMode'] = 3 +except ImportError: + import logging + # logging.warning("PsychoPy not found. Stimulus presentation experiments will not be available.") + pass + +if TYPE_CHECKING: + from .auditory_oddball.aob import AuditoryOddball +else: + try: + from .auditory_oddball.aob import AuditoryOddball + except ImportError: + AuditoryOddball = MissingExperiment diff --git a/eegnb/experiments/diagnostics.py b/eegnb/experiments/diagnostics.py new file mode 100644 index 000000000..a56044ef7 --- /dev/null +++ b/eegnb/experiments/diagnostics.py @@ -0,0 +1,184 @@ +"""Pre- and post-experiment diagnostic checks. + +Used by ``eegnb.experiments.Experiment`` to validate the experimental setup +(display frame rate, electrode contact) before the trial loop starts, and to +summarise recording quality afterwards. + +Each function is a pure(-ish) routine that takes runtime objects and returns +a result dict or string. The Experiment class is responsible for calling them +at the right point and rendering the output. Keeping these out of +Experiment.py makes that class about *what* the experiment does rather than +*how* the setup is validated. +""" +from time import sleep +import pathlib + +import numpy as np + + +# --------------------------------------------------------------------------- +# Pre-experiment signal quality check +# +# A brief read of the EEG amplifier's incoming signal to catch obviously +# broken channels (loose electrode, dead reference) before a session +# starts. +# +# This pre-flight check catches gross failures +# (no contact, broken wire, badly seated electrode); subtler problems +# like high-but-uniform noise or slow drift are caught by the post-session +# quality report instead. +# --------------------------------------------------------------------------- + +# Flag a channel only when both thresholds are exceeded — keeps the warning +# rate low for normal-but-noisy sessions. +SIGNAL_NOISE_FLAG_UV = 200.0 # absolute floor: nothing usable above this +SIGNAL_NOISE_REL_FACTOR = 3.0 # relative to montage median + + +def check_signal_quality(eeg, n_seconds=3): + """Read a brief EEG buffer and flag clearly broken channels. + + Used before a recording starts to catch hardware problems (loose + electrode, dead reference) before the recording begins. + The function reads ``n_seconds`` of live signal from the amplifier, + detrends each channel (1-second rolling-mean subtraction so DC drift + doesn't dominate), and computes the standard deviation as a baseline- + noise estimate. + + A channel is flagged only when its noise is BOTH: + - above ``SIGNAL_NOISE_FLAG_UV`` µV in absolute terms, AND + - more than ``SIGNAL_NOISE_REL_FACTOR`` × the group median. + + Both conditions are required so that a session where every channel is + a bit noisy doesn't trigger spurious warnings — only individually + broken channels are surfaced. + + Note: this is *not* an impedance check. Real impedance measurement + requires putting the amplifier into a dedicated test mode and is not + supported by all BrainFlow backends. Baseline noise is a proxy that + works for the common failure mode (loose / dry electrodes). + + Returns: + Dict with: + ``stds`` per-channel detrended std (µV) + ``median`` group median (µV) + ``flagged`` list of ``(channel, std_uv)`` for broken contacts + ``skipped`` True if not run (non-brainflow backend or error) + """ + result = {'stds': {}, 'median': None, 'flagged': [], 'skipped': True} + + if not eeg or getattr(eeg, 'backend', None) != 'brainflow': + return result + + try: + sfreq = int(getattr(eeg, 'sfreq', 250)) + n_samples = sfreq * n_seconds + + eeg._start_brainflow() + sleep(n_seconds) + raw = eeg.board.get_current_board_data(n_samples) + ch_names, eeg_data, _ = eeg._brainflow_extract(raw) + + # Stop so the main eeg.start() can restart cleanly later + eeg.board.stop_stream() + eeg.board.release_session() + eeg_data = np.array(eeg_data) + win = sfreq + for ch_name, x in zip(ch_names, eeg_data): + if len(x) < win: + std = float(np.std(x)) + else: + rolling = np.convolve(x, np.ones(win) / win, mode='same') + std = float(np.std(x - rolling)) + result['stds'][ch_name] = round(std, 1) + + if result['stds']: + med = float(np.median(list(result['stds'].values()))) + result['median'] = round(med, 1) + + # If the median itself is huge, the reference/ground is likely bad, + # so the relative check (3x median) will hide the noise. In this case, + # just flag anything over the absolute threshold. + bad_ref_mode = med > SIGNAL_NOISE_FLAG_UV + + for ch, s in result['stds'].items(): + if bad_ref_mode: + if s > SIGNAL_NOISE_FLAG_UV: + result['flagged'].append((ch, s)) + else: + if s > SIGNAL_NOISE_FLAG_UV and s > SIGNAL_NOISE_REL_FACTOR * med: + result['flagged'].append((ch, s)) + + result['skipped'] = False + print(f"[signal-check] {len(ch_names)} ch over {n_seconds}s — " + f"median noise = {result['median']} µV, " + f"flagged: {[c for c, _ in result['flagged']] or 'none'}") + return result + except Exception as e: + print(f"[signal-check] skipped — {e}") + return result + + +# --------------------------------------------------------------------------- +# Post-session report +# --------------------------------------------------------------------------- + +def post_session_report(save_fn): + """Build the recording quality report string for display after a session.""" + from eegnb.analysis.recording_quality import report_session + return report_session(pathlib.Path(save_fn).parent) + + +# --------------------------------------------------------------------------- +# Diagnostics screen formatting +# --------------------------------------------------------------------------- + +def format_diagnostic_warnings(*, device_name=None, display=None, signal_check=None): + """Build warnings for the pre-experiment diagnostics screen. + + Returns a list of strings — empty if everything's fine. + """ + warnings = [] + + if device_name and 'synthetic' in device_name.lower(): + warnings.append( + "[!] SYNTHETIC EEG DEVICE — NO REAL DATA WILL BE RECORDED\n" + " Set eeg_device = EEG(device, ...) in your run script before re-running." + ) + + if display and not display.get('ok', True): + warnings.append( + f"[!] DISPLAY WARNING — frame delivery severely degraded\n" + f" Target: {display['target_hz']:.0f} Hz - " + f"Measured: {display['actual_hz']:.1f} Hz " + f"({display['deviation_pct']:.1f}% off)\n" + f" Likely cause: wrong GPU selected, or GPU acceleration disabled.\n" + f" Fix: set python.exe to NVIDIA in NVIDIA Control Panel and\n" + f" Windows Graphics Settings, then restart." + ) + + if signal_check and signal_check.get('flagged'): + n_flagged = len(signal_check['flagged']) + n_total = len(signal_check.get('stds', {})) + ch_info = ", ".join(f"{ch} ({std:.0f} µV)" for ch, std in signal_check['flagged']) + + msg = ( + f"[!] SIGNAL QUALITY WARNING\n" + f" Channels with abnormally high noise: {ch_info}\n" + f" (group median is {signal_check['median']} µV)\n" + ) + + if n_total > 0 and n_flagged >= (n_total / 2.0): + msg += ( + f" Likely cause: Bad Reference (M1) or Ground (A2) connection.\n" + f" When noise is universally high across the head, the shared\n" + f" reference is usually loose or dry. Re-seat M1 and A2." + ) + else: + msg += ( + f" Likely cause: loose electrode or dry paste. Re-seat the\n" + f" listed contact(s) before continuing." + ) + warnings.append(msg) + + return warnings diff --git a/eegnb/experiments/rest/eoec.py b/eegnb/experiments/rest/eoec.py index 38ed467db..6087459a7 100644 --- a/eegnb/experiments/rest/eoec.py +++ b/eegnb/experiments/rest/eoec.py @@ -113,17 +113,9 @@ def present_stimulus(self, idx: int): timestamp = time() self.trials.at[idx, "timestamp"] = timestamp self.outlet.push_sample([self.markernames[label]], timestamp) - - if self.eeg: - if self.eeg.backend == "muselsl": - marker = [self.markernames[label]] - else: - marker = self.markernames[label] - self.eeg.push_sample(marker=marker, timestamp=timestamp) - - if self.devices: - marker = self.markernames[label] - self.send_triggers(marker) + + if self.eeg or self.devices: + self.push_marker(self.markernames[label], idx) if self.serial: try: @@ -136,12 +128,19 @@ def present_stimulus(self, idx: int): else: self.close_sound.play() + self._draw_block_cue(label) + + def _draw_block_cue(self, label): if label == 0: self.fixation.draw() else: self.close_text.draw() self.window.flip() + def present_soa(self, idx: int): + label = self.trials["parameter"].iloc[idx] + self._draw_block_cue(label) + def run(self, instructions: bool = True): try: super().run(instructions) diff --git a/eegnb/experiments/visual_n170/n170.py b/eegnb/experiments/visual_n170/n170.py index 231381ff7..723e8d251 100644 --- a/eegnb/experiments/visual_n170/n170.py +++ b/eegnb/experiments/visual_n170/n170.py @@ -36,32 +36,20 @@ def load_stimulus(self): return [self.houses, self.faces] def present_stimulus(self, idx: int): - # Get the label of the trial label = self.trials["parameter"].iloc[idx] # Get the image to be presented - image = choice(self.faces if label == 1 else self.houses) + self._current_image = choice(self.faces if label == 1 else self.houses) # Draw the image - image.draw() - - - # Pushing the sample to the EEG - if self.eeg: - timestamp = time() + self._current_image.draw() - if self.eeg.backend == "muselsl": - marker = [self.markernames[label]] - else: - marker = self.markernames[label] - - self.eeg.push_sample(marker=marker, timestamp=timestamp) - - - if self.devices: - marker = self.markernames[label] - self.send_triggers(marker) + if self.eeg or self.devices: + self.push_marker(self.markernames[label], idx) + self.window.flip() + def present_soa(self, idx: int): + self._current_image.draw() self.window.flip() diff --git a/eegnb/experiments/visual_p300/p300.py b/eegnb/experiments/visual_p300/p300.py index d6a8a1df3..8e6974241 100644 --- a/eegnb/experiments/visual_p300/p300.py +++ b/eegnb/experiments/visual_p300/p300.py @@ -38,8 +38,8 @@ def load_stimulus(self): def present_stimulus(self, idx: int): label = self.trials["parameter"].iloc[idx] - image = choice(self.targets if label == 1 else self.nontargets) - image.draw() + self._current_image = choice(self.targets if label == 1 else self.nontargets) + self._current_image.draw() # Push sample if self.eeg: @@ -50,4 +50,8 @@ def present_stimulus(self, idx: int): marker = self.markernames[label] self.eeg.push_sample(marker=marker, timestamp=timestamp) + self.window.flip() + + def present_soa(self, idx: int): + self._current_image.draw() self.window.flip() \ No newline at end of file diff --git a/eegnb/experiments/visual_vep/__init__.py b/eegnb/experiments/visual_vep/__init__.py index e69de29bb..514a3492b 100644 --- a/eegnb/experiments/visual_vep/__init__.py +++ b/eegnb/experiments/visual_vep/__init__.py @@ -0,0 +1,10 @@ +"""Visual Evoked Potential (VEP) experiments module. + +This module contains experiments for measuring visual evoked potentials, +including pattern reversal VEP for assessing the P100 component. +""" + +from .grating_vep import VisualGratingVEP +from .pattern_reversal_vep import VisualPatternReversalVEP + +__all__ = ['VisualGratingVEP', 'VisualPatternReversalVEP'] diff --git a/eegnb/experiments/visual_vep/vep.py b/eegnb/experiments/visual_vep/grating_vep.py similarity index 97% rename from eegnb/experiments/visual_vep/vep.py rename to eegnb/experiments/visual_vep/grating_vep.py index 11d076cc4..c661b94c0 100644 --- a/eegnb/experiments/visual_vep/vep.py +++ b/eegnb/experiments/visual_vep/grating_vep.py @@ -5,7 +5,7 @@ from eegnb.devices.eeg import EEG -class VisualVEP(Experiment.BaseExperiment): +class VisualGratingVEP(Experiment.BaseExperiment): def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None, diff --git a/eegnb/experiments/visual_vep/pattern_reversal_vep.py b/eegnb/experiments/visual_vep/pattern_reversal_vep.py new file mode 100644 index 000000000..3bfca6ce9 --- /dev/null +++ b/eegnb/experiments/visual_vep/pattern_reversal_vep.py @@ -0,0 +1,350 @@ +import logging +import random +import numpy as np + +from psychopy import visual +from typing import Optional, Dict, Any +from eegnb.devices.eeg import EEG +from eegnb.experiments.BlockExperiment import BlockExperiment +from eegnb.analysis.vep_utils import ISCEV_CHECK_DEG_LARGE, ISCEV_CHECK_DEG_SMALL +from stimupy.stimuli.checkerboards import contrast_contrast + +# ISCEV PR-VEP standard +ISCEV_FIELD_DEG = 16.0 +ISCEV_MEAN_LUM = 0.0 + +VR_DIODE_BRIGHT = +1.0 +VR_DIODE_DARK = 0.0 + +# Block conditions: 4 possible combinations of (eye, size) +CONDITIONS = [ + {'eye': 'left', 'size_name': 'large', 'size_idx': 0, 'check_deg': ISCEV_CHECK_DEG_LARGE}, + {'eye': 'right', 'size_name': 'large', 'size_idx': 0, 'check_deg': ISCEV_CHECK_DEG_LARGE}, + {'eye': 'left', 'size_name': 'small', 'size_idx': 1, 'check_deg': ISCEV_CHECK_DEG_SMALL}, + {'eye': 'right', 'size_name': 'small', 'size_idx': 1, 'check_deg': ISCEV_CHECK_DEG_SMALL}, +] + +# Hierarchical event tags → integer marker codes. The slash-delimited tags let +# MNE epoch by partial match (e.g. event_id key 'rev/left' selects both sizes). +# Kept stable across recordings so analysis can hard-code this dict. +EVENTS = { + f"rev/{c['eye']}/{c['size_name']}": 1 + i for i, c in enumerate(CONDITIONS) +} + + +class VisualPatternReversalVEP(BlockExperiment): + + def __init__(self, eeg: Optional[EEG] = None, save_fn=None, + block_duration_seconds: int = 50, + block_trial_size: int = 100, + reps_per_condition: int = 2, + use_vr: bool = False, + use_fullscr: bool = True): + """ + Pattern Reversal VEP with two check sizes, counterbalanced across blocks. + + Block schedule: 4 shuffled conditions (left/right eye, large/small check) × + ``reps_per_condition`` blocks. Each reversal is marked with a code 1–4 + identifying the condition; the condition is fully recoverable from the + reversal marker stream alone. + """ + n_conditions = 4 + n_blocks = n_conditions * reps_per_condition + soa = 0.5 + + super().__init__( + "Visual Pattern Reversal VEP", + block_duration_seconds, eeg, save_fn, + block_trial_size, n_blocks, + iti=0, soa=soa, jitter=0, + use_vr=use_vr, use_fullscr=use_fullscr, stereoscopic=True, + ) + + self.instruction_text = ( + f"Welcome to the Pattern Reversal VEP experiment!\n\n" + f"{n_blocks} blocks of {block_duration_seconds} s each.\n" + f"left/right eye × large/small checks)\n\n" + f"Press spacebar or trigger to continue." + ) + + # Build block schedule grouped by eye. + left_eye_blocks = [i for i, c in enumerate(CONDITIONS) if c['eye'] == 'left'] * reps_per_condition + right_eye_blocks = [i for i, c in enumerate(CONDITIONS) if c['eye'] == 'right'] * reps_per_condition + + random.shuffle(left_eye_blocks) + random.shuffle(right_eye_blocks) + + # Randomize which eye goes first + if random.random() < 0.5: + self.block_labels = left_eye_blocks + right_eye_blocks + else: + self.block_labels = right_eye_blocks + left_eye_blocks + + logging.info("[PRVEP] block schedule: %s", self.block_labels) + + # Expand into a per-trial parameter array. + self.parameter = np.array( + [lbl for lbl in self.block_labels for _ in range(block_trial_size)] + ) + + + # ------------------------------------------------------------------ + # Stimulus creation helpers + # ------------------------------------------------------------------ + + @staticmethod + def make_checker_image(intensity_checks, check_deg, field_deg=ISCEV_FIELD_DEG, ppd=72): + cpd = 1.0 / (2.0 * check_deg) + return contrast_contrast( + visual_size=(field_deg, field_deg), + ppd=ppd, + frequency=(cpd, cpd), + intensity_checks=intensity_checks, + target_shape=(0, 0), + alpha=0, + tau=0, + )['img'] + + def load_stimulus(self) -> Dict[str, Any]: + refresh_rate = self.refresh_rate + + reversals_per_sec = 1 / self.soa + assert refresh_rate % reversals_per_sec == 0, ( + f"Frame rate {refresh_rate} Hz must be an integer multiple of " + f"{reversals_per_sec} Hz reversal rate" + ) + + if self.use_vr: + ppd, _ = self.vr.log_display_info() + logging.info( + "[PRVEP-HMD] optical_axis_ndc=L%+.3f/R%+.3f", + self.left_eye_x_pos, self.right_eye_x_pos, + ) + tex_px = int(round(ISCEV_FIELD_DEG * ppd)) + stim_size_px = (tex_px, tex_px) + else: + ppd = 72 + stim_size_px = (self.window_size[1], self.window_size[1]) + + self.grey_background = visual.Rect( + self.window, + width=self.window.size[0], height=self.window.size[1], + fillColor=[ISCEV_MEAN_LUM] * 3, + ) + self.black_background = visual.Rect( + self.window, + width=self.window.size[0], height=self.window.size[1], + fillColor='black', + ) + + self._flash_size_px = 100 # flat-monitor corner patch (px) + self._vr_patch_size_px = 1000 # VR centred patch (px square) + + if self.use_vr: + patch_size_px = self._vr_patch_size_px + bright_color = (VR_DIODE_BRIGHT,) * 3 + dark_color = (VR_DIODE_DARK,) * 3 + patch_pos = (0, 0) + else: + patch_size_px = self._flash_size_px + bright_color = (1, 1, 1) + dark_color = (-1, -1, -1) + bw, bh = self.window.size[0], self.window.size[1] + patch_pos = ( + (bw - patch_size_px) / 2, + -(bh - patch_size_px) / 2, + ) + + self.photodiode_patch_bright = visual.Rect( + self.window, + width=patch_size_px, height=patch_size_px, + units='pix', + fillColor=bright_color, + pos=patch_pos, + ) + self.photodiode_patch_dark = visual.Rect( + self.window, + width=patch_size_px, height=patch_size_px, + units='pix', + fillColor=dark_color, + pos=patch_pos, + ) + + def make_checker_stim(intensity_checks, check_deg, pos): + return visual.ImageStim( + self.window, + image=self.make_checker_image(intensity_checks, check_deg, ppd=ppd), + units='pix', size=stim_size_px, color='white', pos=pos, + ) + + # Fixation cross: explicit '+' polygon so arm length and arm width are + # independent. At low VR ppd (~20) the old single-size ShapeStim + # rendered as a ~5 px blob that looked like a diamond and connected + # visually with nearby checkerboard corners (scintillating-grid effect). + # Arm half-length 0.3° keeps the cross inside one check cell at the + # small-check size (0.5°); arm width 0.12° gives a clearly legible + # stroke at VR ppd without occluding foveal stimulation. + FIX_ARM_DEG = 0.30 # half-length from centre to each arm tip + FIX_W_DEG = 0.12 # arm width (stroke thickness) + arm_px = max(6, int(round(FIX_ARM_DEG * ppd))) + w_px = max(2, int(round(FIX_W_DEG * ppd))) + + def _cross_verts(a, w): + h = w / 2 + return [(-h, a), (h, a), (h, h), (a, h), (a, -h), + (h, -h), (h, -a), (-h, -a), (-h, -h), (-a, -h), + (-a, h), (-h, h)] + + def make_fixation(pos_px): + return visual.ShapeStim( + win=self.window, pos=pos_px, + vertices=_cross_verts(arm_px, w_px), + units='pix', closeShape=True, + fillColor=[1, -1, -1], lineColor=[-1, -1, -1], lineWidth=1, + ) + + self.instruction_stim = visual.TextStim( + win=self.window, + text="", + color=[-1, -1, -1], + pos=(0, 0), + height=0.08, + units='norm', + wrapWidth=1.8, + ) + + def make_eye_stimuli(eye_x_pix): + # Two check-size variants per eye: index 0 = large, 1 = small. + return { + 'checkerboards': [ + [ + make_checker_stim((1, -1), ISCEV_CHECK_DEG_LARGE, (eye_x_pix, 0)), + make_checker_stim((-1, 1), ISCEV_CHECK_DEG_LARGE, (eye_x_pix, 0)), + ], + [ + make_checker_stim((1, -1), ISCEV_CHECK_DEG_SMALL, (eye_x_pix, 0)), + make_checker_stim((-1, 1), ISCEV_CHECK_DEG_SMALL, (eye_x_pix, 0)), + ], + ], + 'fixation': make_fixation([eye_x_pix, 0]), + } + + if self.use_vr: + w = self.window.size[0] + return { + 'left': make_eye_stimuli(self.left_eye_x_pos * (w / 2)), + 'right': make_eye_stimuli(self.right_eye_x_pos * (w / 2)), + } + else: + return {'monoscopic': make_eye_stimuli(0)} + + # ------------------------------------------------------------------ + # Block instructions + # ------------------------------------------------------------------ + + def block_eye_and_size(self, block_index: int): + c = CONDITIONS[self.block_labels[block_index]] + return c['eye'], c['size_name'] + + def present_block_instructions(self, current_block: int) -> None: + open_eye, size_name = self.block_eye_and_size(current_block) + closed_eye = 'right' if open_eye == 'left' else 'left' + + # Check if the eye just switched so we can prompt them to move the patch + is_first_block_for_eye = (current_block == 0) or (self.block_eye_and_size(current_block - 1)[0] != open_eye) + + patch_prompt = f"*** MOVE PHOTODIODE TO {closed_eye.upper()} LENS NOW ***\n" if is_first_block_for_eye else "" + + if self.use_vr: + # Re-assert height each call — VR state changes (calcEyePoses / + # setBuffer projection) can corrupt the cached norm-unit size on + # the shared instruction_stim, causing oversized text from block 2 + # onwards. Setting .height forces PsychoPy to recompute the glyph + # layout before draw, keeping it consistent across all blocks. + self.instruction_stim.height = 0.08 + self.instruction_stim.wrapWidth = 1.8 + for eye in ['left', 'right']: + self.window.setBuffer(eye) + + if eye == closed_eye: + self.black_background.draw() + self.instruction_stim.color = [1, 1, 1] + else: + self.grey_background.draw() + self.instruction_stim.color = [-1, -1, -1] + + self.instruction_stim.text = ( + f"Block {current_block + 1}/{self.n_blocks} — " + f"{open_eye} eye, {size_name} checks\n\n" + f"{patch_prompt}" + f"Please ensure your {closed_eye} eye is physically covered (e.g. tissue/patch),\n" + f"but keep BOTH eyes open underneath to prevent muscle artifacts.\n\n" + "Focus on the red dot.\n" + "Try not to blink while the squares are animating.\n" + "Press spacebar or trigger when ready." + ) + + self.instruction_stim.draw() + + if eye == open_eye: + self.stim[eye]['fixation'].draw() + else: + text = ( + f"Block {current_block + 1}/{self.n_blocks}\n\n" + f"{patch_prompt}" + f"Cover your {closed_eye} eye with a patch (keep both eyes open).\n" + f"Focus on the red dot with your {open_eye} eye.\n" + f"Check size: {size_name} ({ISCEV_CHECK_DEG_LARGE if size_name == 'large' else ISCEV_CHECK_DEG_SMALL}°)\n\n" + "Press spacebar when ready." + ) + visual.TextStim(win=self.window, text=text, color=[-1, -1, -1]).draw() + self.stim['monoscopic']['fixation'].draw() + self.window.flip() + + # ------------------------------------------------------------------ + # Stimulus presentation + # ------------------------------------------------------------------ + + def present_stimulus(self, idx: int): + self.draw_frame(idx) + trial_idx = self.current_block_index * self.block_trial_size + idx + c = CONDITIONS[int(self.parameter[trial_idx])] + self.push_marker(EVENTS[f"rev/{c['eye']}/{c['size_name']}"], trial_idx) + + def draw_frame(self, idx: int): + trial_idx = self.current_block_index * self.block_trial_size + idx + c = CONDITIONS[int(self.parameter[trial_idx])] + eye, size_idx = c['eye'], c['size_idx'] + phase = idx % 2 # alternates 0 / 1 for each reversal + + diode_patch = (self.photodiode_patch_bright if phase == 0 + else self.photodiode_patch_dark) + + if self.use_vr: + closed_eye = 'right' if eye == 'left' else 'left' + self.window.setBuffer(eye) + self.grey_background.draw() + self.stim[eye]['checkerboards'][size_idx][phase].draw() + self.stim[eye]['fixation'].draw() + self.window.setBuffer(closed_eye) + self.black_background.draw() + diode_patch.draw() # centred on closed-eye buffer + else: + self.grey_background.draw() + self.stim['monoscopic']['checkerboards'][size_idx][phase].draw() + self.stim['monoscopic']['fixation'].draw() + diode_patch.draw() + + self.window.flip() + + def present_soa(self, idx: int): + self.draw_frame(idx) + + def present_iti(self): + if self.use_vr: + for eye in ('left', 'right'): + self.window.setBuffer(eye) + self.grey_background.draw() + else: + self.grey_background.draw() + self.window.flip() diff --git a/eegnb/utils/__init__.py b/eegnb/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/eegnb/utils/display.py b/eegnb/utils/display.py new file mode 100644 index 000000000..406c117b4 --- /dev/null +++ b/eegnb/utils/display.py @@ -0,0 +1,30 @@ +"""Display-related helpers shared across experiments and example scripts.""" + +import logging + +# Common panel refresh rates seen on consumer monitors and HMDs. +STANDARD_REFRESH_HZ = (60, 72, 75, 90, 100, 120, 144, 165, 240) + + +def snap_refresh_rate(measured_hz: float, tolerance_pct: float = 3.0) -> int: + """Snap a noisy measured refresh rate to the nearest standard panel rate. + + PsychoPy's getActualFrameRate() and libovr's displayRefreshRate both + occasionally report a value 1-2 Hz off the nominal panel rate due to + measurement jitter or runtime quirks. This helper rounds them back to + something physically meaningful. + + Falls back to the rounded measured value (with a warning) when the + measurement is more than tolerance_pct away from any standard rate, so + truly unusual panels (e.g. 85 Hz CRTs) still pass through correctly. + """ + snapped = min(STANDARD_REFRESH_HZ, key=lambda h: abs(h - measured_hz)) + if abs(snapped - measured_hz) / snapped * 100 > tolerance_pct: + rounded = int(round(measured_hz)) + logging.warning( + "[display] measured refresh rate %.2f Hz didn't match any " + "standard rate within %.1f%%; using rounded %d Hz", + measured_hz, tolerance_pct, rounded, + ) + return rounded + return snapped diff --git a/eegnb/utils/missing.py b/eegnb/utils/missing.py new file mode 100644 index 000000000..dc03a5a67 --- /dev/null +++ b/eegnb/utils/missing.py @@ -0,0 +1,56 @@ +"""Sentinel objects for optional dependencies. + +Lets the package import cleanly even when an optional library isn't installed, +while still raising a clear, actionable error if a user tries to use a feature +that depends on it. + +Two flavours: + missing_class(...) — for symbols the user *instantiates* (e.g. an + Experiment subclass). Raises on construction. + missing_module(...) — for symbols the user *accesses attributes on* + (e.g. ``pyxid2.get_xid_devices()``). Raises on + any attribute access. +""" + + +from typing import Any + + +def _format_message(name: str, feature: str, extras: str) -> str: + return ( + f"{name} is not installed. {feature} is not available in this " + f"environment. Please install the '{extras}' or 'full' dependencies " + "to use this feature." + ) + + +def missing_class(name: str, feature: str, extras: str) -> Any: + """Sentinel mimicking a class; raises RuntimeError on instantiation. + + Returns ``Any`` so the sentinel can be assigned to a name that would + normally hold a specific class (e.g. ``VisualN170 = missing_class(...)``) + without type-checker complaints at the call site. + """ + message = _format_message(name, feature, extras) + + class _Missing: + def __init__(self, *args, **kwargs): + raise RuntimeError(message) + + return _Missing + + +def missing_module(name: str, feature: str, extras: str) -> Any: + """Sentinel mimicking a module; raises RuntimeError on attribute access. + + Returns ``Any`` so the sentinel can be assigned to a name that would + normally hold a real module (e.g. ``pyxid2 = missing_module(...)``) + without type-checker complaints at the call site. + """ + message = _format_message(name, feature, extras) + + class _Missing: + def __getattr__(self, attr): + raise RuntimeError(message) + + return _Missing() diff --git a/environments/eeg-expy-docsbuild.yml b/environments/eeg-expy-docsbuild.yml index 31eae3a6e..b699bbb17 100644 --- a/environments/eeg-expy-docsbuild.yml +++ b/environments/eeg-expy-docsbuild.yml @@ -3,10 +3,11 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.13 + - python >=3.10 # higher versions allowed for experimental builds + - setuptools - pytables # install pytables for macOS arm64, so do not need to build from source. - rust # used by docsbuild - pip - pip: # Install package with only Analysis requirements - - -e ..[docsbuild] \ No newline at end of file + - -e ..[docsbuild] diff --git a/environments/eeg-expy-full.yml b/environments/eeg-expy-full.yml index d3c160f01..2e096c6dc 100644 --- a/environments/eeg-expy-full.yml +++ b/environments/eeg-expy-full.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + - python>=3.10,<=3.11 # psychopy 2026.x requires py3.10; higher versions for experimental builds + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - numpy # fix PsychXR numpy dependency DLL issues on Windows - pytables # install pytables for macOS arm64, so do not need to build from source. diff --git a/environments/eeg-expy-stimpres.yml b/environments/eeg-expy-stimpres.yml index de7ed1178..dd6e36096 100644 --- a/environments/eeg-expy-stimpres.yml +++ b/environments/eeg-expy-stimpres.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + - python >=3.10 # psychopy 2026.x requires py3.10; higher versions allowed for experimental builds + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" - cffi # Fix sound ffi.callback() issue with sounddevice on macOS: https://github.com/spatialaudio/python-sounddevice/issues/397 diff --git a/environments/eeg-expy-streaming.yml b/environments/eeg-expy-streaming.yml index 2b7f87af7..800ab4cc6 100644 --- a/environments/eeg-expy-streaming.yml +++ b/environments/eeg-expy-streaming.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.13 + - python >=3.10 # higher versions allowed for experimental builds + - setuptools - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - pip - pip: diff --git a/environments/eeg-expy-streamstim.yml b/environments/eeg-expy-streamstim.yml index 8ed52571f..4942ff41b 100644 --- a/environments/eeg-expy-streamstim.yml +++ b/environments/eeg-expy-streamstim.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + - python>=3.10,<=3.13 # psychopy <= 3.10; higher versions for experimental builds + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" diff --git a/examples/visual_vep/00x__pattern_reversal_run_experiment.py b/examples/visual_vep/00x__pattern_reversal_run_experiment.py new file mode 100644 index 000000000..623559011 --- /dev/null +++ b/examples/visual_vep/00x__pattern_reversal_run_experiment.py @@ -0,0 +1,122 @@ +""" +PRVEP Run Experiment +========================================== + +Pattern Reversal VEP using PsychoPy on a standard monitor or Meta Quest +headset (via psychxr / Meta-Link). The EEG device and save-file are owned +by this script; the stimulus is driven by PsychoPy. + +Block schedule: 4 conditions (left/right eye × large/small check) × +``reps_per_condition`` reps = 8 blocks, shuffled at startup. + +Marker codes: + 1 — block_start, left eye, large check (~60 arcmin / 1 deg) + 2 — block_start, right eye, large check + 3 — block_start, left eye, small check (~30 arcmin / 0.5 deg) + 4 — block_start, right eye, small check +""" + +################################################################################################### +# Setup +# --------------------- +# +# Imports + +import os +import platform +from os import getenv +from dotenv import load_dotenv +load_dotenv() + +from eegnb import generate_save_fn +from eegnb.devices import CYTON_CONFIG_GAIN_4X +from eegnb.devices.eeg import EEG +from eegnb.experiments.visual_vep import VisualPatternReversalVEP +from eegnb.utils.display import snap_refresh_rate + +################################################################################################### +# Configuration +# --------------------- +# +# Set your experiment parameters here before running. +# + +# Display: set use_vr=True for Meta Quest, False for monitor +use_vr = True + +# Device: "cyton", "unicorn", "muse2", etc. +device = "cyton" + +# Serial port: "COM3" for Windows, "/dev/ttyUSB0" for Linux +serial_port = "COM3" + +# Per-cap channel names, in Cyton input order (1..8). Personal hardware, +# not part of the shared library. Add a new entry when you set up a new cap. +MONTAGES = { + # 3D-printed mark-iv occipital array. Ground A2, Ref Fz. + "thinkpulse-mark-iv": ["P7", "P8", "PO3", "PO4", "O1", "O2", "POz", "Oz"], + # Standard 10-20 cap (Tencom 20-ch). Ground A2, Ref Fz. + "cap": ["P7", "P8", "P3", "P4", "Pz", "Oz", "O1", "O2"], +} + +# Personal monitor specs — refresh rate is used for the save path and for +# the integer-multiple assertion in load_stimulus(). +MONITORS = { + "acer-34-predator": {"hz": 100}, +} + +# ---- pick montage and monitor for this session -------------------------- +montage_type = "cap" +config = CYTON_CONFIG_GAIN_4X if montage_type == "thinkpulse-mark-iv" else None + +monitor_name = "acer-34-predator" +ch_names = MONTAGES[montage_type] + +# Subject and session identifiers +subject_id = 0 +session_nb = 19 + +################################################################################################### +# Initiate EEG device +# --------------------- +# +# Start EEG device based on configuration above. +eeg_device = EEG(device, serial_port=serial_port, ch_names=ch_names, + config=config, + analog_mode=True) # stream AUX (A5-A7) for photodiode trigger +# eeg_device = EEG(device="synthetic") + +################################################################################################### +# Build experiment object and detect display settings +# --------------------- +# +# The experiment is constructed before the save path so the Rift session is +# already open and we can read the actual refresh rate from the runtime rather +# than hardcoding it. The save path is then built from the real Hz and set on +# the experiment before run() is called. + +pattern_reversal_vep = VisualPatternReversalVEP( + eeg=eeg_device, + use_vr=use_vr, + use_fullscr=True +) + +if use_vr: + refresh_rate = snap_refresh_rate(pattern_reversal_vep.vr.displayRefreshRate) + display = f"quest-2_{refresh_rate}Hz" +else: + refresh_rate = MONITORS[monitor_name]["hz"] + display = f"{monitor_name}_{refresh_rate}Hz" + +site = f"{display}_{montage_type}" +data_dir = getenv("DATA_DIR") +save_fn = generate_save_fn(eeg_device.device_name, + experiment="visual-PRVEP", + site=site, + subject_id=subject_id, + session_nb=session_nb, + data_dir=data_dir) +print(f"Saving data to: {save_fn} (detected {refresh_rate} Hz)") +pattern_reversal_vep.save_fn = save_fn + +pattern_reversal_vep.run() diff --git a/examples/visual_vep/README.txt b/examples/visual_vep/README.txt new file mode 100644 index 000000000..e201880e7 --- /dev/null +++ b/examples/visual_vep/README.txt @@ -0,0 +1 @@ +Visual VEP diff --git a/pyproject.toml b/pyproject.toml index de7bf0dc5..e9fb602e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,6 +9,7 @@ addopts = """ --current-env --ignore-glob 'examples/**.py' --ignore-glob '**/baseline_task.py' + --ignore 'tests/test_run_experiments.py' """ testpaths = [ "eegnb", diff --git a/requirements.txt b/requirements.txt index 001b69229..c5cf6db4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,8 +3,7 @@ scikit-learn>=0.23.2 pandas>=1.1.4 -numpy>=1.26.0; python_version >= "3.9" -numpy<=1.24.4; python_version == "3.8" +numpy>=1.26.0 mne>=0.20.8 seaborn>=0.11.0 pyriemann>=0.2.7 @@ -15,10 +14,7 @@ pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" -#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to -# a specific version prevents an endless dependency resolution loop. -pyobjc==7.3; sys_platform == 'darwin' +pyobjc>=8.0; sys_platform == 'darwin' airium>=0.1.0 attrdict>=2.0.1 attrdict3 @@ -26,6 +22,7 @@ attrdict3 ## ~~ Streaming Requirements ~~ +pyxid2 muselsl>=2.0.2 # Upgrade from 1.10.5 to 1.16.2 so the arm64 lib is available to macOS Apple Silicon for preventing error: # pylsl/liblsl64.dylib' (mach-o file, but is an incompatible architecture (have 'x86_64', need 'arm64e' or 'arm64')) @@ -35,10 +32,7 @@ pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" -#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to -# a specific version prevents an endless dependency resolution loop. -pyobjc==7.3; sys_platform == 'darwin' +pyobjc>=8.0; sys_platform == 'darwin' #Removed keyboard dependency due segmentation fault on Apple Silicon: https://github.com/boppreh/keyboard/issues/507 pynput airium>=0.1.0 @@ -46,50 +40,55 @@ attrdict>=2.0.1 attrdict3 click +# Tests +pytest +pytest-cov +nbval + ## ~~ Stimpres Requirements ~~ -#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to -# a specific version prevents an endless dependency resolution loop. -pyobjc==7.3; sys_platform == 'darwin' +pyobjc>=8.0; sys_platform == 'darwin' #upgrade psychopy to use newer wxpython dependency which is prebuilt for m1 support. -psychopy==2025.1.1 -#needed for macos arm sound support: https://github.com/psychopy/psychopy-sounddevice/pull/4 --e psychopy-sounddevice +# Forked with a fix for Rift stereo projection matrix crash under strict-ndim psychxr. +psychopy @ git+https://github.com/pellet/psychopy.git@v2026.2.0-rift-fix +#needed for macos arm sound support +psychopy-sounddevice @ git+https://github.com/psychopy/psychopy-sounddevice.git ffpyplayer==4.5.2 # 4.5.3 fails to build as wheel. psychtoolbox scikit-learn>=0.23.2 pandas>=1.1.4 -numpy>=1.26.0; python_version >= "3.9" -numpy==1.24.4; python_version == "3.8" +numpy>=1.26.0 mne>=0.20.8 seaborn>=0.11.0 pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" airium>=0.1.0 attrdict>=2.0.1 attrdict3 -# pywinhook needs some special treatment since there are only wheels on PyPI for Python 3.7-3.8, and building requires special tools (swig, VS C++ tools) -# See issue: https://github.com/NeuroTechX/eeg-notebooks/issues/29 -pywinhook>=1.6.0 ; platform_system == "Windows" and (python_version == "3.7" or python_version == "3.8") -pywinhook @ https://github.com/ActivityWatch/wheels/raw/master/pywinhook/pyWinhook-1.6.2-cp39-cp39-win_amd64.whl ; platform_system == "Windows" and python_version == "3.9" - # pyglet downgrade to prevent threadmode warning on windows # See issue: https://github.com/psychopy/psychopy/issues/2876 pyglet==1.4.11 ; platform_system == "Windows" -# Oculus/Quest VR support - currently only supported on Windows and -# <= 3.9, otherwise will need Oculus PC SDK to build wheel. -psychxr>=0.2.4rc2; platform_system == "Windows" and python_version <= "3.9" - +# Quest-link VR support - prebuilt Windows wheels from the fork's GitHub +# release (0.2.6rc1 adds Python 3.10 support). +# psychxr/quest-link is Windows-only. +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp310-cp310-win_amd64.whl ; platform_system == "Windows" and python_version == "3.10" +# PsychoPy/PsychXR does not yet officially support Python > 3.10; the wheels below are experimental. +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp311-cp311-win_amd64.whl ; platform_system == "Windows" and python_version == "3.11" +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp312-cp312-win_amd64.whl ; platform_system == "Windows" and python_version == "3.12" +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp313-cp313-win_amd64.whl ; platform_system == "Windows" and python_version == "3.13" +# Used for generating checkerboard in pattern reversal experiment +stimupy -## ~~ Docsbuild Requirements ~~ +## ~~ Docsbuild Requirements ~~ +setuptools<81 # brainflow imports pkg_resources at runtime; setuptools 81 deprecated it and 82 removed it +python-dotenv recommonmark brainflow numpydoc diff --git a/tests/test_acquisition.py b/tests/test_acquisition.py new file mode 100644 index 000000000..5626f6ac0 --- /dev/null +++ b/tests/test_acquisition.py @@ -0,0 +1,56 @@ +import os +import time +import pandas as pd +import pytest +from eegnb.devices.eeg import EEG + +def test_synthetic_acquisition(tmp_path): + """ + Test the data acquisition pipeline using a synthetic BrainFlow board. + This verifies that we can initialize a device, start a stream, + record data, and save it to a CSV file in a CI-friendly way. + """ + # Use a temporary file for recording + save_fn = tmp_path / "synthetic_data.csv" + + # Initialize EEG with synthetic board + # BrainFlow synthetic board (ID -1) works without hardware + eeg = EEG(device='synthetic') + + # Verify metadata initialization + assert eeg.backend == 'brainflow' + assert eeg.sfreq == 250 # Default for synthetic board + assert len(eeg.channels) > 0 + + # Start stream and capture data + # We specify a short duration for the test + record_duration = 2 + eeg.start(str(save_fn), duration=record_duration + 5) + + # Simulate some experiment time + time.sleep(record_duration) + + # Push a few synthetic markers + eeg.push_sample(marker=1, timestamp=time.time()) + time.sleep(0.1) + eeg.push_sample(marker=2, timestamp=time.time()) + + # Stop recording and release session + eeg.stop() + + # Verify file creation and content + assert save_fn.exists() + + # Read the data back + data = pd.read_csv(save_fn) + + # Basic data validation + assert len(data) > 0 + assert 'timestamps' in data.columns + assert 'stim' in data.columns + + # Check if markers were recorded (may vary slightly based on timing) + # but we should at least see non-zero values in the stim column + assert (data['stim'] != 0).any() + + print(f"Acquired {len(data)} samples with columns: {list(data.columns)}")