diff --git a/_images/central_limit_theorem.svg b/_images/central_limit_theorem.svg index 31a3e3f8..b1e4f4be 100644 --- a/_images/central_limit_theorem.svg +++ b/_images/central_limit_theorem.svg @@ -2,20 +2,20 @@ + inkscape:version="1.4 (86a8ad7, 2024-10-11)" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns="http://www.w3.org/2000/svg" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:dc="http://purl.org/dc/elements/1.1/"> @@ -40,13 +40,17 @@ inkscape:window-height="992" id="namedview2198" showgrid="false" - inkscape:zoom="0.64744413" - inkscape:cx="688.7021" - inkscape:cy="387.7892" + inkscape:zoom="0.45781213" + inkscape:cx="556.99703" + inkscape:cy="242.45753" inkscape:window-x="72" - inkscape:window-y="27" + inkscape:window-y="0" inkscape:window-maximized="0" - inkscape:current-layer="svg2196" /> + inkscape:current-layer="svg2196" + inkscape:showpageshadow="2" + inkscape:pagecheckerboard="0" + inkscape:deskcolor="#d1d1d1" + inkscape:document-units="pt" /> @@ -61,7 +65,7 @@ @@ -76,7 +80,7 @@ @@ -89,7 +93,9 @@ helper_size="0" apply_no_weight="true" apply_with_weight="true" - only_selected="false" /> + only_selected="false" + lpeversion="0" + uniform="false" /> - - + - - + + + + + + + + + + + + + + style="stroke-linecap:butt;stroke-linejoin:round" + transform="translate(-10.3,-6.8)"> @@ -2306,7 +2357,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -2315,7 +2366,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -2324,7 +2375,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -2333,14 +2384,15 @@ style="stroke-linecap:butt;stroke-linejoin:round"> + style="stroke-linecap:butt;stroke-linejoin:round" + transform="translate(-10.3,-6.8)"> @@ -3361,7 +3413,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -3370,7 +3422,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -3379,7 +3431,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -3388,14 +3440,15 @@ style="stroke-linecap:butt;stroke-linejoin:round"> + style="stroke-linecap:butt;stroke-linejoin:round" + transform="translate(-10.3,-6.8)"> @@ -6416,7 +6469,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -6425,7 +6478,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -6434,7 +6487,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -6443,14 +6496,15 @@ style="stroke-linecap:butt;stroke-linejoin:round"> + style="stroke-linecap:butt;stroke-linejoin:round" + transform="translate(-10.3,-6.8)"> @@ -8471,7 +8525,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -8480,7 +8534,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -8489,7 +8543,7 @@ style="stroke-linecap:butt;stroke-linejoin:round"> @@ -8498,66 +8552,15 @@ style="stroke-linecap:butt;stroke-linejoin:round"> - - - - - - - - - - - - - - - - + style="stroke-linecap:butt;stroke-linejoin:round" + transform="matrix(1.4778986,0,0,1.4778986,235.14552,335.88139)"> + style="stroke-linecap:butt;stroke-linejoin:round" + transform="matrix(1.4778986,0,0,1.4778986,235.14552,335.88139)"> @@ -9588,7 +9592,7 @@ + + + + + + - - - - - Normal Distribution - + diff --git a/_images/detection_realtime.png b/_images/detection_realtime.png new file mode 100644 index 00000000..fd8541a5 Binary files /dev/null and b/_images/detection_realtime.png differ diff --git a/_images/detection_realtime.svg b/_images/detection_realtime.svg deleted file mode 100644 index cd1c80d9..00000000 --- a/_images/detection_realtime.svg +++ /dev/null @@ -1,95912 +0,0 @@ - - - - - - - - 2026-02-05T20:17:37.262558 - image/svg+xml - - - Matplotlib v3.10.8, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/_images/gaussian_IQ.png b/_images/gaussian_IQ.png new file mode 100644 index 00000000..74147280 Binary files /dev/null and b/_images/gaussian_IQ.png differ diff --git a/_images/gaussian_histogram.png b/_images/gaussian_histogram.png new file mode 100644 index 00000000..9544a26f Binary files /dev/null and b/_images/gaussian_histogram.png differ diff --git a/_images/gaussian_transformed.png b/_images/gaussian_transformed.png new file mode 100644 index 00000000..3b56ceb4 Binary files /dev/null and b/_images/gaussian_transformed.png differ diff --git a/conf.py b/conf.py index 0df64d0f..7c2c60ff 100644 --- a/conf.py +++ b/conf.py @@ -22,6 +22,7 @@ 'sphinx.ext.mathjax', 'sphinx.ext.autosectionlabel', #'sphinxcontrib.tikz', #added for dutch + "sphinxcontrib.mermaid" ] mathjax_path = "mathjax/tex-mml-chtml.js" # so that the textbook can work offline diff --git a/content/detection.rst b/content/detection.rst index 9c12cea6..82249c23 100644 --- a/content/detection.rst +++ b/content/detection.rst @@ -436,40 +436,16 @@ Implementation Our detector will follow this workflow: -.. code-block:: text - - ┌─────────────────────────────────────────────────────────────┐ - │ Continuous IQ Stream from SDR (e.g., 1 MHz sample rate) │ - └────────────────────┬────────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ Buffer Accumulation (e.g., 100k samples = 0.1 sec) │ - └────────────────────┬────────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ Cross-Correlation with Known Preamble │ - │ → Produces correlation vs. sample index │ - └────────────────────┬────────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ CFAR Threshold Computation │ - │ → Adaptive threshold that tracks noise floor │ - └────────────────────┬────────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ Peak Detection (correlation > threshold) │ - │ → List of candidate packet start indices │ - └────────────────────┬────────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ Packet Extraction & Validation │ - │ → Extract samples, pass to demodulator │ - └─────────────────────────────────────────────────────────────┘ +.. mermaid:: + + flowchart TD + A("Continuous IQ Stream from SDR
(1 MHz sample rate)") + B("Buffer Accumulation
(100k samples = 0.1 sec)") + C("Cross-Correlation with Known Preamble") + D("CFAR Threshold Computation") + E("Peak Detection
(correlation > threshold)") + F("Packet Extraction & Validation") + A --> B --> C --> D --> E --> F To avoid missing packets that straddle buffer boundaries, we use an **overlap-save** approach, where each buffer includes the last ``N_preamble`` samples from the previous buffer. This ensures any packet starting near the end of buffer ``i`` will be fully contained in buffer ``i+1``. This requires a small additional computational overhead but we don't want to miss packets just because they straddle buffer boundaries. @@ -825,7 +801,6 @@ Step 7: Visualize Results axes[2].legend() plt.tight_layout() - plt.savefig('../_images/detection_realtime.svg', bbox_inches='tight') plt.show() The visualization should show: @@ -834,9 +809,9 @@ The visualization should show: 2. **Middle plot**: Correlation output with adaptive CFAR threshold tracking the noise floor 3. **Bottom plot**: Detected packets highlighted as peaks above threshold -.. image:: ../_images/detection_realtime.svg - :align: center - :target: ../_images/detection_realtime.svg +.. image:: ../_images/detection_realtime.png + :align: center + :scale: 50 % :alt: Real-time packet detection results Practical Considerations and Tuning diff --git a/content/noise.rst b/content/noise.rst index 9d9ea1c4..05c3b89e 100644 --- a/content/noise.rst +++ b/content/noise.rst @@ -1,10 +1,10 @@ .. _noise-chapter: -############# -Noise and dB -############# +########################## +Noise and Random Variables +########################## -In this chapter we will discuss noise, including how it is modeled and handled in a wireless communications system. Concepts include AWGN, complex noise, and SNR/SINR. We will also introduce decibels (dB) along the way, as it is widely within wireless communications and SDR. +In this chapter we will discuss noise, including how it is modeled and handled in a wireless communications system. Concepts include AWGN, complex noise, and SNR/SINR. We will also introduce decibels (dB) along the way, as it is widely used within wireless communications and SDR. Lastly, we take a deeper dive into the fundamental concepts of random variables and random processes, which are essential for understanding noise, channel effects, and many signal processing techniques in wireless communications. We'll cover probability distributions, expectation, variance, and how random processes evolve over time. These concepts form the mathematical foundation for analyzing noise and many other topics throughout SDR and DSP. ************************ Gaussian Noise @@ -14,7 +14,8 @@ Most people are aware of the concept of noise: unwanted fluctuations that can ob .. image:: ../_images/noise.png :scale: 70 % - :align: center + :align: center + :target: ../_images/noise.png Note how the average value is zero in the time domain graph. If the average value wasn't zero, then we could subtract the average value, call it a bias, and we would be left with an average of zero. Also note that the individual points in the graph are *not* "uniformly random", i.e., larger values are rarer, most of the points are closer to zero. @@ -52,6 +53,7 @@ To further illustrate the problems of scale we encounter in signal processing, c :scale: 70 % :align: center :alt: Depiction of why it's important to understand dB or decibels, showing a spectrogram using linear vs log scale + :target: ../_images/linear_vs_log.png For a given value x, we can represent x in dB using the following formula: @@ -64,7 +66,7 @@ In Python: x_db = 10.0 * np.log10(x) -You may have seen that :code:`10 *` be a :code:`20 *` in other domains. Whenever you are dealing with a power of some sort, you use 10, and you use 20 if you are dealing with a non-power value like voltage or current. In DSP we tend to deal with a power. In fact there is not a single time in this whole textbook we need to use 20 instead of 10. +You may have seen that :code:`10 *` be a :code:`20 *` in other domains. Whenever you are dealing with a power of some sort, you use 10, and you use 20 if you are dealing with a non-power value like voltage or current. In DSP we tend to deal with a power. We convert from dB back to linear (normal numbers) using: @@ -88,6 +90,7 @@ Some common errors people will run into when new to dB are: .. image:: ../_images/db.png :scale: 80 % :align: center + :target: ../_images/db.png It is also important to understand that dB is not technically a "unit". A value in dB alone is unit-less, like if something is 2x larger, there are no units until I tell you the units. dB is a relative thing. In audio when they say dB, they really mean dBA which is units for sound level (the A is the units). In wireless we typically use watts to refer to an actual power level. Therefore, you may see dBW as a unit, which is relative to 1 W. You may also see dBmW (often written dBm for short) which is relative to 1 mW. For example, someone can say "our transmitter is set to 3 dBW" (so 2 watts). Sometimes we use dB by itself, meaning it is relative and there are no units. One can say, "our signal was received 20 dB above the noise floor". Here's a little tip: 0 dBm = -30 dBW. @@ -133,6 +136,7 @@ In the :ref:`freq-domain-chapter` chapter we tackled "Fourier pairs", i.e., what :scale: 110 % :align: center :alt: AWGN in the time domain is also Gaussian noise in the frequency domain, although it looks like a flat line when you take the magnitude and perform averaging + :target: ../_images/noise_freq.png We can see that it looks roughly the same across all frequencies and is fairly flat. It turns out that Gaussian noise in the time domain is also Gaussian noise in the frequency domain. So why don't the two plots above look the same? It's because the frequency domain plot is showing the magnitude of the FFT, so there will only be positive numbers. Importantly, it's using a log scale, or showing the magnitude in dB. Otherwise these graphs would look the same. We can prove this to ourselves by generating some noise (in the time domain) in Python and then taking the FFT. @@ -157,6 +161,7 @@ Take note that the :code:`randn()` function by default uses mean = 0 and varianc :scale: 100 % :align: center :alt: Example of white noise simulated in Python + :target: ../_images/noise_python.png You can then produce the flat PSD that we had in GNU Radio by taking the log and averaging a bunch together. The signal we generated and took the FFT of was a real signal (versus complex), and the FFT of any real signal will have matching negative and positive portions, so that's why we only saved the positive portion of the FFT output (the 2nd half). But why did we only generate "real" noise, and how do complex signals work into this? @@ -198,6 +203,7 @@ To plot complex noise in the time domain, like any complex signal we need two li :scale: 80 % :align: center :alt: Complex noise simulated in Python + :target: ../_images/noise3.png You can see that the real and imaginary portions are completely independent. @@ -214,6 +220,7 @@ What does complex Gaussian noise look like on an IQ plot? Remember the IQ plot :scale: 60 % :align: center :alt: Complex noise on an IQ or constellation plot, simulated in Python + :target: ../_images/noise_iq.png It looks how we would expect; a random blob centered around 0 + 0j, or the origin. Just for fun, let's try adding noise to a QPSK signal to see what the IQ plot looks like: @@ -221,12 +228,15 @@ It looks how we would expect; a random blob centered around 0 + 0j, or the origi :scale: 60 % :align: center :alt: Noisy QPSK simulated in Python + :target: ../_images/noisey_qpsk.png Now what happens when the noise is stronger? .. image:: ../_images/noisey_qpsk2.png :scale: 50 % :align: center + :alt: Noisy QPSK with stronger noise simulated in Python + :target: ../_images/noisey_qpsk2.png We are starting to get a feel for why transmitting data wirelessly isn't that simple. We want to send as many bits per symbol as we can, but if the noise is too high then we will get erroneous bits on the receiving end. @@ -234,7 +244,7 @@ We are starting to get a feel for why transmitting data wirelessly isn't that si AWGN ************************* -Additive White Gaussian Noise (AWGN) is an abbreviation you will hear a lot in the DSP and SDR world. The GN, Gaussian Noise, we already discussed. Additive just means the noise is being added to our received signal. White, in the frequency domain, means the spectrum is flat across our entire observation band. It will almost always be white in practice,or approximately white. In this textbook we will use AWGN as the only form of noise when dealing with communications links and link budgets and such. Non-AWGN noise tends to be a niche topic. +Additive White Gaussian Noise (AWGN) is an abbreviation you will hear a lot in the DSP and SDR world. The GN, Gaussian Noise, we already discussed. Additive just means the noise is being added to our received signal. White, in the frequency domain, means the spectrum is flat across our entire observation band. It will almost always be white in practice, or approximately white. In this textbook we will use AWGN as the only form of noise when dealing with communications links and link budgets and such. Non-AWGN noise tends to be a niche topic. ************************* SNR and SINR @@ -262,26 +272,360 @@ Signal-to-Interference-plus-Noise Ratio (SINR) is essentially the same as SNR ex What constitutes interference is based on the application/situation, but typically it is another signal that is interfering with the signal of interest (SOI), and is either overlapping with the SOI in frequency, and/or cannot be filtered out for some reason. -************************* -External Resources -************************* +********************************* +Deeper Dive into Random Variables +********************************* + +So far we have avoided getting too mathematical, but now we are going to take a step back and introduce the concept of random variables and how they are used in the context of wireless communications and SDR. A **random variable** is a mathematical concept that maps outcomes of a random experiment to numerical values. Random variables represent quantities whose values are uncertain until they are observed or measured, like our noise samples. Think of rolling a six-sided die. Before you roll it, you don't know what number will appear. We can define a random variable :math:`X` that represents the outcome of the roll. The value of :math:`X` is one of {1, 2, 3, 4, 5, 6}, but we don't know which one until we actually roll the die. + +In the context of wireless communications and SDR, random variables are everywhere: + +* The thermal noise in a receiver is modeled as a random variable at each instant in time +* The amplitude of a received signal affected by multipath fading is random +* The phase offset introduced by a changing channel can be modeled as a random variable between :math:`0` and :math:`2\pi` +* Even the data bits we transmit can be treated as random variables + +**Single Sample vs. Many Samples** + +This is a crucial distinction that often causes confusion: + +* A **single realization** or **single sample** of a random variable is just one number—one outcome of the random experiment +* To characterize a random variable (find its average, spread, etc.), we need **many realizations**—many outcomes + +For example, if you call ``np.random.randn()`` in Python without any arguments, it returns a single random number drawn from a Gaussian distribution. That single number tells you almost nothing about the distribution itself. But if you call ``np.random.randn(10000)`` and generate 10,000 samples, you can now estimate properties of the distribution like its mean and variance. + +.. code-block:: python + + import numpy as np + + # Single sample - just one number + x_single = np.random.randn() + print(x_single) # might be 0.534, -1.23, or any other value + + # Many samples - now we can characterize the distribution + x_many = np.random.randn(10000) + print(np.mean(x_many)) # will be close to 0 + print(np.var(x_many)) # will be close to 1 + +Joint Distributions +#################### + +So far we've focused on single random variables. When dealing with two or more random variables simultaneously, we use a **joint distribution**. + +For continuous variables :math:`X` and :math:`Y`, this is described by the **joint PDF**: + +.. math:: + f_{X,Y}(x,y) + +The joint PDF tells us how likely it is for :math:`X` to take value :math:`x` *and* :math:`Y` to take value :math:`y` at the same time. + +From the joint PDF, we can compute: + +* Marginal PDFs (e.g., :math:`f_X(x)` or :math:`f_Y(y)`) +* Expectations such as :math:`E[XY]` +* Covariance and correlation +* Probabilities involving both variables + +For example, the marginal PDF of :math:`X` is obtained by integrating out :math:`Y`: + +.. math:: + f_X(x) = \int_{-\infty}^{\infty} f_{X,Y}(x,y)\,dy + +Joint distributions are the mathematical foundation for understanding dependence, correlation, and independence between random variables. + + +Probability Distributions +######################### + +A **probability distribution** describes how likely different values of a random variable are. For a continuous random variable, we use a **probability density function (PDF)**, denoted :math:`f_X(x)`. The PDF tells us the relative likelihood of the random variable taking on different values. + +The most important distribution in SDR and communications is the **Gaussian (Normal) distribution**. A Gaussian random variable :math:`X` with mean :math:`\mu` and variance :math:`\sigma^2` has the PDF: + +.. math:: + f_X(x) = \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{(x-\mu)^2}{2\sigma^2}} + +This is the famous "bell curve" you've likely seen before. The distribution is completely characterized by two parameters: + +* **Mean** :math:`\mu`: the center of the distribution +* **Variance** :math:`\sigma^2`: how spread out the distribution is (standard deviation :math:`\sigma` is the square root of variance) + +In Python, ``np.random.randn()`` generates samples from a **standard Gaussian** distribution with :math:`\mu = 0` and :math:`\sigma^2 = 1`. We can visualize this: + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + # Generate 10,000 samples from standard Gaussian + x = np.random.randn(10000) + + # Create histogram to visualize the distribution + plt.hist(x, bins=50, density=True, alpha=0.7, edgecolor='black') + plt.xlabel('Value') + plt.ylabel('Probability Density') + plt.title('Gaussian Distribution (μ=0, σ²=1)') + plt.grid(True) + plt.show() + +.. image:: ../_images/gaussian_histogram.png + :scale: 80% + :align: center + :alt: Histogram of Gaussian distributed samples + :target: ../_images/gaussian_histogram.png + +Expectation (a.k.a. Mean) +######################### + +The **expectation** or **expected value** of a random variable, denoted :math:`E[X]` or :math:`\mu`, represents its average value over many realizations. For a continuous random variable with PDF :math:`f_X(x)`, the expectation is: + +.. math:: + E[X] = \int_{-\infty}^{\infty} x \cdot f_X(x) \, dx + +In practice, when we have :math:`N` samples :math:`x_1, x_2, \ldots, x_N` drawn from the distribution, we estimate the expectation using the **sample mean**: + +.. math:: + \hat{\mu} = \frac{1}{N} \sum_{n=1}^{N} x_n + +The expectation is a **linear operator**, which means: + +* :math:`E[aX + b] = aE[X] + b` for constants :math:`a` and :math:`b` +* :math:`E[X + Y] = E[X] + E[Y]` for any two random variables + +This linearity is extremely useful in signal processing! + +Variance and Standard Deviation +############################### + +The **variance** of a random variable, denoted :math:`\text{Var}(X)` or :math:`\sigma^2`, measures how spread out its values are around the mean. It's defined as the expected value of the squared deviation from the mean: + +.. math:: + \text{Var}(X) = E[(X - \mu)^2] = E[X^2] - (E[X])^2 + +When we have :math:`N` samples, we estimate variance using: + +.. math:: + \hat{\sigma}^2 = \frac{1}{N} \sum_{n=1}^{N} (x_n - \hat{\mu})^2 + +The **standard deviation** :math:`\sigma` is simply the square root of variance: :math:`\sigma = \sqrt{\sigma^2}`. + +Note the :math:`\enspace \hat{} \enspace` symbol, known as a "hat", in the above equation at :math:`\sigma` and that for sample mean. The hat symbolizes we're estimating the mean/variance. It's not always exactly equal to the true mean/variance, but it gets closer to the true value as we increase the number of samples. + +**Key Property:** If :math:`X` is a random variable with variance :math:`\sigma^2`, then: + +* Scaling: :math:`\text{Var}(aX) = a^2 \text{Var}(X)` +* Shifting: :math:`\text{Var}(X + b) = \text{Var}(X)` (adding a constant doesn't change the spread) + +And consequently for standard deviation :math:`\sigma`: + +* Scaling: :math:`\sigma(aX) = a\sigma(X)` +* Shifting: :math:`\sigma(X+b) = \sigma(X)` + +.. image:: ../_images/gaussian_transformed.png + :scale: 80% + :align: center + :alt: Scaling and shifting the Gaussian Distribution. (notice the scales on x and y axes) + :target: ../_images/gaussian_transformed.png + +Scaling and shifting the Gaussian Distribution. (notice the scales on x and y axes) + +**Variance and Power** -Further resources about AWGN, SNR, and variance: +In signal processing, for a **zero-mean** signal (mean ~ 0), the variance equals the **average power**. This is why we often use the terms interchangeably: -1. https://en.wikipedia.org/wiki/Additive_white_Gaussian_noise -2. https://en.wikipedia.org/wiki/Signal-to-noise_ratio -3. https://en.wikipedia.org/wiki/Variance +.. math:: + P = \text{Var}(X) = E[X^2] \quad \text{(when } E[X] = 0\text{)} + +This relationship is fundamental in analyzing noise power, signal-to-noise ratio (SNR), and link budgets. + +.. code-block:: python + + noise_power = 2.0 + n = np.random.randn(N) * np.sqrt(noise_power) + print(np.var(n)) # will be approximately 2.0 + +Covariance +########## + +The **covariance** between two random variables :math:`X` and :math:`Y` is defined as: + +.. math:: + \text{Cov}(X,Y) = E[(X - E[X])(Y - E[Y])] + +An equivalent and often more convenient form is: + +.. math:: + \text{Cov}(X,Y) = E[XY] - E[X]E[Y] + +Covariance measures how two variables vary together: + +* Positive covariance: they tend to increase and decrease together +* Negative covariance: one tends to increase when the other decreases +* Zero covariance: they are uncorrelated + +If both variables are zero-mean, this simplifies to: + +.. math:: + \text{Cov}(X,Y) = E[XY] + +Covariance has units (it is not normalized), which is why we often use the **correlation coefficient** (or simply correlation) in practice: + +.. math:: + \rho_{XY} = \frac{\text{Cov}(X,Y)}{\sigma_X \sigma_Y} + +This produces a dimensionless value between −1 and +1. + +Variance of a Sum of Variables +############################### + +In signal processing we often deal with sums of random variables, such as a signal plus noise: + +.. math:: + Z = X + Y + +The variance of this sum depends on whether :math:`X` and :math:`Y` are independent (or more generally, correlated). +In full generality: +.. math:: + \text{Var}(X + Y) = \text{Var}(X) + \text{Var}(Y) + 2\,\text{Cov}(X,Y) +where :math:`\text{Cov}(X,Y)` is the **covariance** between :math:`X` and :math:`Y`. +**Independent Case** +If :math:`X` and :math:`Y` are independent (or simply uncorrelated), then the expression simplifies to: + +.. math:: + \text{Var}(X + Y) = \text{Var}(X) + \text{Var}(Y) +This result is extremely important in communications. For example, if a received signal is: +.. math:: + R = S + N + +where :math:`S` is the signal and :math:`N` is independent noise, then the total power is just the sum of signal power and noise power. + +This is why SNR calculations are so straightforward. + +************************ +Complex Random Variables +************************ + +In SDR, we work extensively with **complex-valued signals**, which means we also work with complex random variables. A complex random variable has the form: + +.. math:: + Z = X + jY +where :math:`X` and :math:`Y` are both real-valued random variables representing the in-phase (I) and quadrature (Q) components. +**Complex Gaussian Noise** +The most common complex random variable in wireless communications is **complex Gaussian noise**, where both :math:`X` and :math:`Y` are independent Gaussian random variables with the same variance. +For example, if :math:`X \sim \mathcal{N}(\alpha_1, \sigma_1^2)` and :math:`Y \sim \mathcal{N}(\alpha_2, \sigma_2^2)` are independent, then the complex random variable :math:`Z = X + jY` has: +* Mean: :math:`E[Z] = E[X] + jE[Y] = \alpha_1 + j\alpha_2` +* Variance (Power): :math:`\text{Var}(Z) = \text{Var}(X) + \text{Var}(Y) = \sigma_1^2 + \sigma_2^2` +.. image:: ../_images/gaussian_IQ.png + :scale: 80% + :align: center + :alt: Complex Gaussian noise visualized as two independent Gaussian random variables on the I and Q axes + :target: ../_images/gaussian_IQ.png + +This is why when we create complex Gaussian noise with unit power (variance = 1), we use: + +.. code-block:: python + + N = 10000 + n = (np.random.randn(N) + 1j*np.random.randn(N)) / np.sqrt(2) + print(np.var(n)) # ~ 1 + +The division by :math:`\sqrt{2}` ensures that the total power (sum of I and Q variances) equals 1. + +.. code-block:: python + + # Without normalization: + n_raw = np.random.randn(N) + 1j*np.random.randn(N) + print(np.var(np.real(n_raw))) # ~ 1 + print(np.var(np.imag(n_raw))) # ~ 1 + print(np.var(n_raw)) # ~ 2 (total power) + + # With normalization: + n_norm = n_raw / np.sqrt(2) + print(np.var(n_norm)) # ~ 1 (unit power) + +**************** +Random Processes +**************** + +So far we've discussed random variables—random values at a single point. A **random process** (also called a **stochastic process**) is a collection of random variables indexed by time: + +.. math:: + X(t) \quad \text{or} \quad X[n] \text{ for discrete time} + +At each time :math:`t`, :math:`X(t)` is a random variable. Think of a random process as a signal that evolves randomly over time. + +Examples in wireless communications: + +* Noise at the receiver: :math:`N(t)` or :math:`N[n]` +* A signal experiencing time-varying fading: :math:`H(t)S(t)` +* Samples from an SDR: each batch is a realization of a random process + +**Stationary Processes** + +A random process is **stationary** if its statistical properties don't change over time. In particular, a **wide-sense stationary (WSS)** process has: + +* Constant mean: :math:`E[X(t)] = \mu` for all :math:`t` +* Autocorrelation that depends only on time difference: :math:`E[X(t)X(t+\tau)]` depends only on :math:`\tau`, not :math:`t` + +Many noise sources in wireless systems are approximately stationary, which simplifies analysis significantly. + +**White Noise** + +**White noise** is a random process where samples at different times are uncorrelated, and the power spectral density is constant across all frequencies. Additive White Gaussian Noise (AWGN) is both: + +* **White**: uncorrelated in time, flat power spectrum +* **Gaussian**: each sample is Gaussian distributed + +When we generate noise in Python using ``np.random.randn(N)``, each of the :math:`N` samples is an independent Gaussian random variable, creating a white noise process. + + +Independence and Correlation +############################# + +Two random variables :math:`X` and :math:`Y` are **independent** if knowing the value of one tells you nothing about the other. Mathematically, their joint PDF factors: + +.. math:: + f_{X,Y}(x,y) = f_X(x) \cdot f_Y(y) + +Independence is a strong condition. A weaker condition is **uncorrelated**, which means: + +.. math:: + E[XY] = E[X]E[Y] + +For Gaussian random variables, uncorrelated implies independent (this is a special property of Gaussians). + +In complex Gaussian noise, the I and Q components are independent: + +.. code-block:: python + N = 10000 + I = np.random.randn(N) + Q = np.random.randn(N) + + # Check independence via correlation + correlation = np.corrcoef(I, Q)[0, 1] + print(f"Correlation between I and Q: {correlation:.4f}") # ~ 0 + +*************************** +Further Reading +*************************** + +1. Papoulis, A., & Pillai, S. U. (2002). *Probability, Random Variables, and Stochastic Processes*. McGraw-Hill. +2. Kay, S. M. (2006). *Intuitive Probability and Random Processes using MATLAB®*. Springer. +3. https://en.wikipedia.org/wiki/Random_variable +4. https://en.wikipedia.org/wiki/Normal_distribution +5. https://en.wikipedia.org/wiki/Stochastic_process +6. https://en.wikipedia.org/wiki/Additive_white_Gaussian_noise +7. https://en.wikipedia.org/wiki/Signal-to-noise_ratio diff --git a/content/usrp.rst b/content/usrp.rst index 4b278f45..d3540cf9 100644 --- a/content/usrp.rst +++ b/content/usrp.rst @@ -323,7 +323,7 @@ For debugging sake, you can verify the 10 MHz signal is showing up to the USRP b Phase Coherent Sync of Multiple B210s for MIMO ********************************************** -In order to perform operations like direction of arrival (DOA) and phased array digital beamforming, you typically need all receive channels to be phase coherent, meaning the relative phases between the receive channels stay constant and can be calibrated out. The B200 and B210 USRPs are based on the AD9361 RFIC, which generates the LO internally, there is no way to feed it an external LO, so even if you feed the USRP a 10 MHz reference signal and PPS, that will only allow multiple USRPs to synchronized in frequency and sample clock, not phase, because every time the device turns on or changes frequency, there is a new random phase offset due to the dividers in the VCO/PLL chains, for more information see `this page `_. One method to achieve phase sync is to add hardware that involves taking a calibration signal (either generated by the USRP, or wideband noise source, or tone), splitting it, and feeding it into all receive ports, and performing a quick calibration each time the USRPs are turned on or retuned. Note that changing the gain will also lead to phase shifts, but as long as the B210's are kept at the same gain the phase difference shouldn't change significantly. The `Techtile project `_ has additional information on this topic, including custom images that may allow multiple B210s to retune together so that they maintain sync, although it likely still requires calibration with external hardware each time the radios turn on. +In order to perform operations like direction of arrival (DOA) and phased array digital beamforming, you typically need all receive channels to be phase coherent, meaning the relative phases between the receive channels stay constant and can be calibrated out. The B200 and B210 USRPs are based on the AD9361 RFIC, which generates the LO internally, there is no way to feed it an external LO, so even if you feed the USRP a 10 MHz reference signal and PPS, that will only allow multiple USRPs to synchronized in frequency and sample clock, not phase, because every time the device turns on or changes frequency, there is a new random phase offset due to the dividers in the VCO/PLL chains, for more information see `this page `_. One method to achieve phase sync is to add hardware that involves taking a calibration signal (either generated by the USRP, or wideband noise source, or tone), splitting it, and feeding it into all receive ports, and performing a quick calibration each time the USRPs are turned on or retuned. Note that changing the gain will also lead to phase shifts, but as long as the B210's are kept at the same gain the phase difference shouldn't change significantly. The `Techtile project `_ has additional information on this topic, including custom images that may allow multiple B210s to re-tune together so that they maintain sync, although it likely still requires calibration with external hardware each time the radios turn on. **** GPIO diff --git a/figure-generating-scripts/detection_realtime.py b/figure-generating-scripts/detection_realtime.py index 2b740301..f90adabb 100644 --- a/figure-generating-scripts/detection_realtime.py +++ b/figure-generating-scripts/detection_realtime.py @@ -149,6 +149,6 @@ def generate_packet_stream(preamble, packet_length, num_packets, sample_rate, sn axes[2].legend(fontsize=10) plt.tight_layout() -plt.savefig('../_images/detection_realtime.svg', bbox_inches='tight', dpi=150) -print("Figure saved to ../_images/detection_realtime.svg") +plt.savefig('../_images/detection_realtime.png', bbox_inches='tight', dpi=150) +print("Figure saved to ../_images/detection_realtime.png") plt.show() \ No newline at end of file diff --git a/figure-generating-scripts/random_variables.py b/figure-generating-scripts/random_variables.py new file mode 100644 index 00000000..7f458918 --- /dev/null +++ b/figure-generating-scripts/random_variables.py @@ -0,0 +1,69 @@ +import numpy as np +import matplotlib.pyplot as plt + +# Generate 10,000 samples from standard Gaussian +x = np.random.randn(10000) + +# Create histogram to visualize the distribution +plt.hist(x, bins=50, density=True, alpha=0.7, edgecolor='black') +plt.xlabel('Value') +plt.ylabel('Probability Density') +plt.title('Gaussian Distribution (μ=0, σ²=1)') +plt.grid(True) +plt.show() + +# Simulation parameters +N = 10000 + +# Generate standard Gaussian random variables (mean=0, var=1) +x = np.random.randn(N) + +# Create different random variables by scaling and shifting +y1 = x # mean=0, var=1 +y2 = 2 * x # mean=0, var=4 +y3 = x + 3 # mean=3, var=1 +y4 = 0.5 * x - 1 # mean=-1, var=0.25 + +# Verify properties +signals = [y1, y2, y3, y4] +labels = ['y1: x', 'y2: 2x', 'y3: x+3', 'y4: 0.5x-1'] + +for i, (sig, label) in enumerate(zip(signals, labels)): + print(f"{label}") + print(f" Sample mean: {np.mean(sig):.3f}") + print(f" Sample variance: {np.var(sig):.3f}") + print() + +# Plot histograms +fig, axes = plt.subplots(2, 2, figsize=(10, 8)) +axes = axes.flatten() + +for i, (sig, label, ax) in enumerate(zip(signals, labels, axes)): + ax.hist(sig, bins=50, density=True, alpha=0.7, edgecolor='black') + ax.set_title(label) + ax.set_xlabel('Value') + ax.set_ylabel('Density') + ax.grid(True) + +plt.tight_layout() +plt.show() + +# Complex Gaussian noise demonstration +n_complex = (np.random.randn(N) + 1j*np.random.randn(N)) / np.sqrt(2) + +print("Complex Gaussian Noise (unit power):") +print(f" Real part variance: {np.var(np.real(n_complex)):.3f}") +print(f" Imag part variance: {np.var(np.imag(n_complex)):.3f}") +print(f" Total variance: {np.var(n_complex):.3f}") + +# Plot on IQ plane +plt.figure(figsize=(6, 6)) +plt.plot(np.real(n_complex[:1000]), np.imag(n_complex[:1000]), '.', alpha=0.3) +plt.xlabel('In-phase (I)') +plt.ylabel('Quadrature (Q)') +plt.title('Complex Gaussian Noise on IQ Plane') +plt.grid(True) +plt.axis('equal') +plt.xlim([-3, 3]) +plt.ylim([-3, 3]) +plt.show() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index f2c9865e..36ebd0be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,13 @@ sphinx==4.4.0 sphinxcontrib-tikz==0.4.20 sphinxcontrib-spelling==8.0.0 -docutils==0.17.1 -patreon==0.5.0 -imageio==2.11.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.5 \ No newline at end of file +sphinxcontrib-serializinghtml==1.1.5 +sphinxcontrib-mermaid==2.0.0 +docutils==0.17.1 +patreon==0.5.0 +imageio==2.11.0 \ No newline at end of file diff --git a/spelling_wordlist.txt b/spelling_wordlist.txt index 77311307..5232eeac 100644 --- a/spelling_wordlist.txt +++ b/spelling_wordlist.txt @@ -311,3 +311,6 @@ Neyman detections amidst IoT +Papoulis +Pillai +Springer