Skip to content
Open

NUWE 1 #1269

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 114 additions & 1 deletion Networking-Test-Kit/BrainFlow/brainflow_streamer_receive.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,112 @@
from brainflow.data_filter import DataFilter


@dataclass
class ChannelFeatures:
channel_index: int
sample_count: int
mean: float
stddev: float
peak_to_peak: float
mean_abs_delta: float


class NeuralFeedbackTextTransformer:
def __init__(self, max_channels=4):
self.max_channels = max_channels

def transform(self, master_board_id, data):
eeg_channels = BoardShim.get_eeg_channels(master_board_id)
if not eeg_channels:
return "No EEG channels were found for the selected BrainFlow board."

channel_features = []
for channel_index in eeg_channels[: self.max_channels]:
channel_data = self._as_float_list(data[channel_index])
if len(channel_data) < 2:
continue

channel_features.append(
ChannelFeatures(
channel_index=channel_index,
sample_count=len(channel_data),
mean=self._mean(channel_data),
stddev=self._stddev(channel_data),
peak_to_peak=max(channel_data) - min(channel_data),
mean_abs_delta=self._mean_abs_delta(channel_data),
)
)

if not channel_features:
return "The EEG stream did not contain enough samples to generate a text summary."

sampling_rate = BoardShim.get_sampling_rate(master_board_id)
sample_count = channel_features[0].sample_count
window_seconds = sample_count / sampling_rate if sampling_rate else 0.0

average_stddev = self._mean([item.stddev for item in channel_features])
average_peak_to_peak = self._mean([item.peak_to_peak for item in channel_features])
average_abs_delta = self._mean([item.mean_abs_delta for item in channel_features])

state_label, interpretation = self._infer_state(
average_stddev, average_peak_to_peak, average_abs_delta
)

lines = [
"Neural feedback text summary",
f"Window length: {window_seconds:.2f} seconds",
f"Samples per channel: {sample_count}",
f"Detected state: {state_label}",
f"Interpretation: {interpretation}",
"Channel highlights:",
]

for feature in channel_features:
lines.append(
" - EEG channel "
f"{feature.channel_index}: mean={feature.mean:.3f}, "
f"stddev={feature.stddev:.3f}, "
f"peak_to_peak={feature.peak_to_peak:.3f}, "
f"mean_abs_delta={feature.mean_abs_delta:.3f}"
)

return "\n".join(lines)

def _infer_state(self, average_stddev, average_peak_to_peak, average_abs_delta):
activation_score = average_stddev + (0.5 * average_peak_to_peak) + average_abs_delta

if activation_score < 20:
return (
"steady / low-activation",
"Signal changes are relatively small, which usually corresponds to a calm or stable feedback window.",
)
if activation_score < 60:
return (
"balanced / moderate-activation",
"Signal energy is present without large swings, suggesting a moderately engaged feedback window.",
)

return (
"active / high-variation",
"Signal energy and short-term variation are elevated, suggesting an active or strongly changing feedback window.",
)

def _as_float_list(self, values):
return [float(value) for value in values]

def _mean(self, values):
return sum(values) / len(values)

def _stddev(self, values):
mean_value = self._mean(values)
variance = sum((value - mean_value) ** 2 for value in values) / len(values)
return math.sqrt(variance)

def _mean_abs_delta(self, values):
deltas = [abs(current - previous) for previous, current in zip(values, values[1:])]
return self._mean(deltas)


def main():
BoardShim.enable_dev_board_logger()

Expand All @@ -21,6 +127,7 @@ def main():
params.ip_port = 6677
params.ip_address = "225.1.1.1"
params.master_board = BoardIds.SYNTHETIC_BOARD
master_board_id = BoardIds.SYNTHETIC_BOARD.value
board_id = BoardIds.STREAMING_BOARD

board = BoardShim(board_id, params)
Expand All @@ -31,7 +138,13 @@ def main():
board.stop_stream()
board.release_session()
DataFilter.write_file(data_default, "default.csv", "w")
print(data_default)

transformer = NeuralFeedbackTextTransformer()
text_summary = transformer.transform(master_board_id, data_default)
with open("neuralfeedback_summary.txt", "w", encoding="ascii") as summary_file:
summary_file.write(text_summary + "\n")

print(text_summary)


if __name__ == "__main__":
Expand Down
106 changes: 106 additions & 0 deletions Networking-Test-Kit/LSL/eeg_lsl_to_afrikaans.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import os
from collections import deque

import numpy as np
import torch
from pylsl import StreamInlet, resolve_stream

# -----------------------------
# CONFIGURATION
# -----------------------------
CHANNELS = 8
WINDOW_SIZE = 250
STEP_SIZE = 50
MODEL_PATH = "eeg_transformer.pt"
POLL_TIMEOUT_SECONDS = 1.0

# Simple English → Afrikaans mapping for demo purposes
EN_TO_AF = {
"LEFT": "LINKER",
"RIGHT": "REGTER",
"UP": "OP",
"DOWN": "AF",
"HELLO": "HALLO",
"BYE": "TOTSIE",
}


def load_model(model_path, device):
try:
model = torch.jit.load(model_path, map_location=device)
model.eval()
return model
except Exception:
allow_unsafe = os.environ.get("ALLOW_UNSAFE_TORCH_LOAD", "0") == "1"
if not allow_unsafe:
raise RuntimeError(
"Failed to load TorchScript model safely. "
"Set ALLOW_UNSAFE_TORCH_LOAD=1 only for trusted model files."
)
model = torch.load(model_path, map_location=device, weights_only=False)
model.eval()
return model


def main():
print("Resolving EEG stream...")
streams = resolve_stream("type", "EEG")
if not streams:
raise RuntimeError("No LSL EEG stream found.")
inlet = StreamInlet(streams[0])
print("Connected to EEG stream.")

print("Loading transformer model...")
device_name = os.environ.get("EEG_DECODER_DEVICE", "cpu")
device = torch.device(device_name)
model = load_model(MODEL_PATH, device)
print("Model loaded.")

buffer = deque(maxlen=WINDOW_SIZE)
channel_mismatch_warned = False
print("Starting real-time EEG → Afrikaans decoding...")

try:
while True:
sample, _ = inlet.pull_sample(timeout=POLL_TIMEOUT_SECONDS)
if sample is None:
continue

if len(sample) < CHANNELS:
if not channel_mismatch_warned:
print(
f"Skipping samples with fewer than {CHANNELS} channels. "
f"Received {len(sample)} channels."
)
channel_mismatch_warned = True
continue
buffer.append(sample[:CHANNELS])

if len(buffer) == WINDOW_SIZE:
segment = np.array(buffer, dtype=np.float32)
segment = (segment - np.mean(segment, axis=0)) / (
np.std(segment, axis=0) + 1e-6
)
segment_tensor = torch.from_numpy(segment).unsqueeze(0).to(device)

with torch.no_grad():
logits = model(segment_tensor)
predicted_class = torch.argmax(logits, dim=-1).item()

predicted_label_eng = (
model.labels[predicted_class]
if hasattr(model, "labels")
else str(predicted_class)
)
af_label = EN_TO_AF.get(predicted_label_eng, predicted_label_eng)
print(f"Afrikaans Prediction: {af_label}")

for _ in range(STEP_SIZE):
buffer.popleft()

except KeyboardInterrupt:
print("Real-time decoding stopped by user.")


if __name__ == "__main__":
main()
1 change: 1 addition & 0 deletions OpenBCI_GUI/W_AnalogRead.pde
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,7 @@ class AnalogReadBar{
for (int i=0; i < nPoints; i++) {
float timey = calcTimeAxis(i);
float value = (float)allData.get(i)[channels[auxValuesPosition]];
value = constrain(value, 0, 1024);
analogReadPoints.set(i, timey, value, "");
}

Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -144,3 +144,4 @@ MIT
- [Cyton+Daisy Boards](https://shop.openbci.com/collections/frontpage/products/cyton-daisy-biosensing-boards-16-channel)
- [GUI Widget Tutorial](https://docs.openbci.com/Software/OpenBCISoftware/GUIWidgets/#custom-widget)
- [Run GUI from Processing IDE](https://docs.openbci.com/Software/OpenBCISoftware/GUIDocs/#running-the-openbci-gui-from-the-processing-ide)
https://shop.openbci.com/collections/frontpage/products/pre-order-ganglion-board
2 changes: 1 addition & 1 deletion tools/graph_gui_downloads.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

for asset in assets:
if "mac" in asset["name"].lower():
download_count_mac[release_date] = download_count_mac.get(release_date, 0) + asset["download_count"]
download_count_mac[release_date] = download_count_mac.get(release_date, 0) + asset["download_count"] # type: ignore
elif "linux" in asset["name"].lower():
download_count_linux[release_date] = download_count_linux.get(release_date, 0) + asset["download_count"]
elif "win" in asset["name"].lower():
Expand Down