|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> |
| 3 | +# Matti Hamalainen <msh@nmr.mgh.harvard.edu> |
| 4 | +# Teon Brooks <teon.brooks@gmail.com> |
| 5 | +# Stefan Appelhoff <stefan.appelhoff@mailbox.org> |
| 6 | +# Joan Massich <mailsik@gmail.com> |
| 7 | +# |
| 8 | +# License: BSD (3-clause) |
| 9 | + |
| 10 | +import datetime |
| 11 | +import os.path as op |
| 12 | +import re |
| 13 | + |
| 14 | +import numpy as np |
| 15 | + |
| 16 | +from mne.io.constants import FIFF |
| 17 | +from mne.io.tree import dir_tree_find |
| 18 | +from mne.io.tag import read_tag |
| 19 | +from mne.io.write import start_file |
| 20 | +from mne.io.write import end_file |
| 21 | +from mne.io.write import write_dig_points |
| 22 | + |
| 23 | +from mne.transforms import _to_const |
| 24 | +from mne.utils import logger |
| 25 | +from mne.utils import warn |
| 26 | +from mne import __version__ |
| 27 | + |
| 28 | +b = bytes # alias |
| 29 | + |
| 30 | + |
| 31 | +def _read_dig_fif(fid, meas_info): |
| 32 | + """Read digitizer data from a FIFF file.""" |
| 33 | + isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK) |
| 34 | + dig = None |
| 35 | + if len(isotrak) == 0: |
| 36 | + logger.info('Isotrak not found') |
| 37 | + elif len(isotrak) > 1: |
| 38 | + warn('Multiple Isotrak found') |
| 39 | + else: |
| 40 | + isotrak = isotrak[0] |
| 41 | + dig = [] |
| 42 | + for k in range(isotrak['nent']): |
| 43 | + kind = isotrak['directory'][k].kind |
| 44 | + pos = isotrak['directory'][k].pos |
| 45 | + if kind == FIFF.FIFF_DIG_POINT: |
| 46 | + tag = read_tag(fid, pos) |
| 47 | + dig.append(tag.data) |
| 48 | + dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD |
| 49 | + return dig |
| 50 | + |
| 51 | + |
| 52 | +def write_dig(fname, pts, coord_frame=None): |
| 53 | + """Write digitization data to a FIF file. |
| 54 | +
|
| 55 | + Parameters |
| 56 | + ---------- |
| 57 | + fname : str |
| 58 | + Destination file name. |
| 59 | + pts : iterator of dict |
| 60 | + Iterator through digitizer points. Each point is a dictionary with |
| 61 | + the keys 'kind', 'ident' and 'r'. |
| 62 | + coord_frame : int | str | None |
| 63 | + If all the points have the same coordinate frame, specify the type |
| 64 | + here. Can be None (default) if the points could have varying |
| 65 | + coordinate frames. |
| 66 | + """ |
| 67 | + if coord_frame is not None: |
| 68 | + coord_frame = _to_const(coord_frame) |
| 69 | + pts_frames = {pt.get('coord_frame', coord_frame) for pt in pts} |
| 70 | + bad_frames = pts_frames - {coord_frame} |
| 71 | + if len(bad_frames) > 0: |
| 72 | + raise ValueError( |
| 73 | + 'Points have coord_frame entries that are incompatible with ' |
| 74 | + 'coord_frame=%i: %s.' % (coord_frame, str(tuple(bad_frames)))) |
| 75 | + |
| 76 | + with start_file(fname) as fid: |
| 77 | + write_dig_points(fid, pts, block=True, coord_frame=coord_frame) |
| 78 | + end_file(fid) |
| 79 | + |
| 80 | + |
| 81 | +def _read_dig_points(fname, comments='%', unit='auto'): |
| 82 | + """Read digitizer data from a text file. |
| 83 | +
|
| 84 | + If fname ends in .hsp or .esp, the function assumes digitizer files in [m], |
| 85 | + otherwise it assumes space-delimited text files in [mm]. |
| 86 | +
|
| 87 | + Parameters |
| 88 | + ---------- |
| 89 | + fname : str |
| 90 | + The filepath of space delimited file with points, or a .mat file |
| 91 | + (Polhemus FastTrak format). |
| 92 | + comments : str |
| 93 | + The character used to indicate the start of a comment; |
| 94 | + Default: '%'. |
| 95 | + unit : 'auto' | 'm' | 'cm' | 'mm' |
| 96 | + Unit of the digitizer files (hsp and elp). If not 'm', coordinates will |
| 97 | + be rescaled to 'm'. Default is 'auto', which assumes 'm' for *.hsp and |
| 98 | + *.elp files and 'mm' for *.txt files, corresponding to the known |
| 99 | + Polhemus export formats. |
| 100 | +
|
| 101 | + Returns |
| 102 | + ------- |
| 103 | + dig_points : np.ndarray, shape (n_points, 3) |
| 104 | + Array of dig points in [m]. |
| 105 | + """ |
| 106 | + if unit not in ('auto', 'm', 'mm', 'cm'): |
| 107 | + raise ValueError('unit must be one of "auto", "m", "mm", or "cm"') |
| 108 | + |
| 109 | + _, ext = op.splitext(fname) |
| 110 | + if ext == '.elp' or ext == '.hsp': |
| 111 | + with open(fname) as fid: |
| 112 | + file_str = fid.read() |
| 113 | + value_pattern = r"\-?\d+\.?\d*e?\-?\d*" |
| 114 | + coord_pattern = r"({0})\s+({0})\s+({0})\s*$".format(value_pattern) |
| 115 | + if ext == '.hsp': |
| 116 | + coord_pattern = '^' + coord_pattern |
| 117 | + points_str = [m.groups() for m in re.finditer(coord_pattern, file_str, |
| 118 | + re.MULTILINE)] |
| 119 | + dig_points = np.array(points_str, dtype=float) |
| 120 | + elif ext == '.mat': # like FastScan II |
| 121 | + from scipy.io import loadmat |
| 122 | + dig_points = loadmat(fname)['Points'].T |
| 123 | + else: |
| 124 | + dig_points = np.loadtxt(fname, comments=comments, ndmin=2) |
| 125 | + if unit == 'auto': |
| 126 | + unit = 'mm' |
| 127 | + if dig_points.shape[1] > 3: |
| 128 | + warn('Found %d columns instead of 3, using first 3 for XYZ ' |
| 129 | + 'coordinates' % (dig_points.shape[1],)) |
| 130 | + dig_points = dig_points[:, :3] |
| 131 | + |
| 132 | + if dig_points.shape[-1] != 3: |
| 133 | + err = 'Data must be (n, 3) instead of %s' % (dig_points.shape,) |
| 134 | + raise ValueError(err) |
| 135 | + |
| 136 | + if unit == 'mm': |
| 137 | + dig_points /= 1000. |
| 138 | + elif unit == 'cm': |
| 139 | + dig_points /= 100. |
| 140 | + |
| 141 | + return dig_points |
| 142 | + |
| 143 | + |
| 144 | +def _write_dig_points(fname, dig_points): |
| 145 | + """Write points to text file. |
| 146 | +
|
| 147 | + Parameters |
| 148 | + ---------- |
| 149 | + fname : str |
| 150 | + Path to the file to write. The kind of file to write is determined |
| 151 | + based on the extension: '.txt' for tab separated text file. |
| 152 | + dig_points : numpy.ndarray, shape (n_points, 3) |
| 153 | + Points. |
| 154 | + """ |
| 155 | + _, ext = op.splitext(fname) |
| 156 | + dig_points = np.asarray(dig_points) |
| 157 | + if (dig_points.ndim != 2) or (dig_points.shape[1] != 3): |
| 158 | + err = ("Points must be of shape (n_points, 3), " |
| 159 | + "not %s" % (dig_points.shape,)) |
| 160 | + raise ValueError(err) |
| 161 | + |
| 162 | + if ext == '.txt': |
| 163 | + with open(fname, 'wb') as fid: |
| 164 | + version = __version__ |
| 165 | + now = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y") |
| 166 | + fid.write(b'%% Ascii 3D points file created by mne-python version' |
| 167 | + b' %s at %s\n' % (version.encode(), now.encode())) |
| 168 | + fid.write(b'%% %d 3D points, x y z per line\n' % len(dig_points)) |
| 169 | + np.savetxt(fid, dig_points, delimiter='\t', newline='\n') |
| 170 | + else: |
| 171 | + msg = "Unrecognized extension: %r. Need '.txt'." % ext |
| 172 | + raise ValueError(msg) |
| 173 | + |
| 174 | + |
| 175 | +def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None, |
| 176 | + extra_points=None, dig_ch_pos=None): |
| 177 | + """Construct digitizer info for the info. |
| 178 | +
|
| 179 | + Parameters |
| 180 | + ---------- |
| 181 | + nasion : array-like | numpy.ndarray, shape (3,) | None |
| 182 | + Point designated as the nasion point. |
| 183 | + lpa : array-like | numpy.ndarray, shape (3,) | None |
| 184 | + Point designated as the left auricular point. |
| 185 | + rpa : array-like | numpy.ndarray, shape (3,) | None |
| 186 | + Point designated as the right auricular point. |
| 187 | + hpi : array-like | numpy.ndarray, shape (n_points, 3) | None |
| 188 | + Points designated as head position indicator points. |
| 189 | + extra_points : array-like | numpy.ndarray, shape (n_points, 3) |
| 190 | + Points designed as the headshape points. |
| 191 | + dig_ch_pos : dict |
| 192 | + Dict of EEG channel positions. |
| 193 | +
|
| 194 | + Returns |
| 195 | + ------- |
| 196 | + dig : list |
| 197 | + List of digitizer points to be added to the info['dig']. |
| 198 | + """ |
| 199 | + dig = [] |
| 200 | + if lpa is not None: |
| 201 | + lpa = np.asarray(lpa) |
| 202 | + if lpa.shape != (3,): |
| 203 | + raise ValueError('LPA should have the shape (3,) instead of %s' |
| 204 | + % (lpa.shape,)) |
| 205 | + dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA, |
| 206 | + 'kind': FIFF.FIFFV_POINT_CARDINAL, |
| 207 | + 'coord_frame': FIFF.FIFFV_COORD_HEAD}) |
| 208 | + if nasion is not None: |
| 209 | + nasion = np.asarray(nasion) |
| 210 | + if nasion.shape != (3,): |
| 211 | + raise ValueError('Nasion should have the shape (3,) instead of %s' |
| 212 | + % (nasion.shape,)) |
| 213 | + dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION, |
| 214 | + 'kind': FIFF.FIFFV_POINT_CARDINAL, |
| 215 | + 'coord_frame': FIFF.FIFFV_COORD_HEAD}) |
| 216 | + if rpa is not None: |
| 217 | + rpa = np.asarray(rpa) |
| 218 | + if rpa.shape != (3,): |
| 219 | + raise ValueError('RPA should have the shape (3,) instead of %s' |
| 220 | + % (rpa.shape,)) |
| 221 | + dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA, |
| 222 | + 'kind': FIFF.FIFFV_POINT_CARDINAL, |
| 223 | + 'coord_frame': FIFF.FIFFV_COORD_HEAD}) |
| 224 | + if hpi is not None: |
| 225 | + hpi = np.asarray(hpi) |
| 226 | + if hpi.ndim != 2 or hpi.shape[1] != 3: |
| 227 | + raise ValueError('HPI should have the shape (n_points, 3) instead ' |
| 228 | + 'of %s' % (hpi.shape,)) |
| 229 | + for idx, point in enumerate(hpi): |
| 230 | + dig.append({'r': point, 'ident': idx + 1, |
| 231 | + 'kind': FIFF.FIFFV_POINT_HPI, |
| 232 | + 'coord_frame': FIFF.FIFFV_COORD_HEAD}) |
| 233 | + if extra_points is not None: |
| 234 | + extra_points = np.asarray(extra_points) |
| 235 | + if extra_points.shape[1] != 3: |
| 236 | + raise ValueError('Points should have the shape (n_points, 3) ' |
| 237 | + 'instead of %s' % (extra_points.shape,)) |
| 238 | + for idx, point in enumerate(extra_points): |
| 239 | + dig.append({'r': point, 'ident': idx + 1, |
| 240 | + 'kind': FIFF.FIFFV_POINT_EXTRA, |
| 241 | + 'coord_frame': FIFF.FIFFV_COORD_HEAD}) |
| 242 | + if dig_ch_pos is not None: |
| 243 | + keys = sorted(dig_ch_pos.keys()) |
| 244 | + try: # use the last 3 as int if possible (e.g., EEG001->1) |
| 245 | + idents = [int(key[-3:]) for key in keys] |
| 246 | + except ValueError: # and if any conversion fails, simply use arange |
| 247 | + idents = np.arange(1, len(keys) + 1) |
| 248 | + for key, ident in zip(keys, idents): |
| 249 | + dig.append({'r': dig_ch_pos[key], 'ident': ident, |
| 250 | + 'kind': FIFF.FIFFV_POINT_EEG, |
| 251 | + 'coord_frame': FIFF.FIFFV_COORD_HEAD}) |
| 252 | + return dig |
0 commit comments