Skip to content

Can not read the trodes .rec file using spikeinterface.extractors.read_spikegadgets() #1810

@Sachuriga

Description

@Sachuriga

I'm using an HH128 and the MCU for tethered recording in Trodes. The signal looks good, and I can play back the .rec file within Trodes without issues.
However, when I try to load the .rec file into SpikeInterface using spikeinterface.extractors.read_spikegadgets(), I get an error stating that the file does not contain the channel mapping header.

Trodes version: 2.6.0
spikeinterface: 0.103

errors:

ZeroDivisionError Traceback (most recent call last)
Cell In[13], line 4
2 from probeinterface import generate_tetrode, ProbeGroup
3 # 1. Load the raw data (it will have NO geometry initially)
----> 4 recording = se.read_spikegadgets(r'C:\Users\gl_pc\Downloads\20260115_110218\20260115_110218\20260115_195453.rec/20260115_195453.rec')
5 recording
File c:\Users\gl_pc.conda\envs\si_trodes\Lib\site-packages\spikeinterface\extractors\neoextractors\spikegadgets.py:50, in SpikeGadgetsRecordingExtractor.init(self, file_path, stream_id, stream_name, all_annotations, use_names_as_ids)
41 def init(
42 self,
43 file_path,
(...) 47 use_names_as_ids: bool = False,
48 ):
49 neo_kwargs = self.map_to_neo_kwargs(file_path)
---> 50 NeoBaseRecordingExtractor.init(
51 self,
52 stream_id=stream_id,
53 stream_name=stream_name,
54 all_annotations=all_annotations,
55 use_names_as_ids=use_names_as_ids,
56 **neo_kwargs,
57 )
58 self._kwargs.update(dict(file_path=str(Path(file_path).absolute()), stream_id=stream_id))
60 probegroup = None # TODO remove once probeinterface is updated to 0.2.22 in the pyproject.toml
File c:\Users\gl_pc.conda\envs\si_trodes\Lib\site-packages\spikeinterface\extractors\neoextractors\neobaseextractor.py:188, in NeoBaseRecordingExtractor.init(self, stream_id, stream_name, block_index, all_annotations, use_names_as_ids, **neo_kwargs)
158 def init(
159 self,
160 stream_id: Optional[str] = None,
(...) 165 **neo_kwargs: Dict[str, Any],
166 ) -> None:
167 """
168 Initialize a NeoBaseRecordingExtractor instance.
169
(...) 185
186 """
--> 188 _NeoBaseExtractor.init(self, block_index, **neo_kwargs)
190 kwargs = dict(all_annotations=all_annotations)
191 if block_index is not None:
File c:\Users\gl_pc.conda\envs\si_trodes\Lib\site-packages\spikeinterface\extractors\neoextractors\neobaseextractor.py:27, in _NeoBaseExtractor.init(self, block_index, **neo_kwargs)
23 def init(self, block_index, **neo_kwargs):
24
25 # Avoids double initiation of the neo reader if it was already done in the init of the child class
26 if not hasattr(self, "neo_reader"):
---> 27 self.neo_reader = self.get_neo_io_reader(self.NeoRawIOClass, **neo_kwargs)
29 if self.neo_reader.block_count() > 1 and block_index is None:
30 raise Exception(
31 "This dataset is multi-block. Spikeinterface can load one block at a time. "
32 "Use 'block_index' to select the block to be loaded."
33 )
File c:\Users\gl_pc.conda\envs\si_trodes\Lib\site-packages\spikeinterface\extractors\neoextractors\neobaseextractor.py:66, in _NeoBaseExtractor.get_neo_io_reader(cls, raw_class, **neo_kwargs)
64 neoIOclass = getattr(rawio_module, raw_class)
65 neo_reader = neoIOclass(**neo_kwargs)
---> 66 neo_reader.parse_header()
68 return neo_reader
File c:\Users\gl_pc.conda\envs\si_trodes\Lib\site-packages\neo\rawio\baserawio.py:211, in BaseRawIO.parse_header(self)
197 """
198 Parses the header of the file(s) to allow for faster computations
199 for all other functions
200
201 """
202 # this must create
203 # self.header['nb_block']
204 # self.header['nb_segment']
(...) 208 # self.header['spike_channels']
209 # self.header['event_channels']
--> 211 self._parse_header()
212 self._check_stream_signal_channel_characteristics()
213 self.is_header_parsed = True
File c:\Users\gl_pc.conda\envs\si_trodes\Lib\site-packages\neo\rawio\spikegadgetsrawio.py:224, in SpikeGadgetsRawIO._parse_header(self)
220 self._mask_channels_bytes[stream_id] = []
222 # we can only produce these channels for a subset of spikegadgets setup. If this criteria isn't
223 # true then we should just use the raw_channel_ids and let the end user sort everything out
--> 224 if num_ephy_channels % num_chan_per_chip == 0:
225 all_hw_chans = [int(schan.attrib["hwChan"]) for trode in sconf for schan in trode]
226 missing_hw_chans = set(range(num_ephy_channels)) - set(all_hw_chans)
ZeroDivisionError: integer modulo by zero

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions