\documentclass[../schrodinger_simulation.tex]{subfiles} \begin{document} \section{Methods}\label{sec:methods} % \subsection{The Schrödinger equation}\label{ssec:schrodinger} % % Add something that takes Planck to Schrödinger % In classical mechanics, we have Newton laws and conservation of energy. In quantum % mechanics, we have Schrödinger equation. The Schrödinger equation has a general form \begin{align} i \hbar \frac{\partial}{\partial t} | \Psi \rangle &= \hat{H} | \Psi \rangle \ , \label{eq:schrodinger_general} \end{align} where $i$ is the imaginary number, and $\hbar$ is Plancks constant. $\hat{H}$ is a Hamiltonian operator, which represent the energy for the system, and $| \Psi \rangle$ is the quantum state. In two-dimensional position space, the quantum state can be expressed using the time-dependent complex-valued wave function $\Psi (x, y, t)$. Using Born rule, the square modulus of the wave function is proportional to the probability density of finding a particle at position $(x, y)$ at time t. The relation is given by \begin{align} p(x, y \ | \ t) &= |\Psi(x, y, t)|^{2} = \Psi^{*}(x, y, t) \Psi(x, y, t) \ , \label{eq:born_rule} \end{align} where $\Psi^{*}$ denotes the complex conjugated wave function. % Add something about kinetic and potential energy, to introduce the potential V When the potential is time-independent, the Schrödinger equation can be expressed as \begin{align*} i \hbar \frac{\partial}{\partial t} \Psi (x, y, t) &= - \frac{\hbar^{2}}{2m} \bigg( \frac{\partial^{2}}{\partial x^{2}} + \frac{\partial^{2}}{\partial y^{2}} \bigg) \Psi (x, y, t) \\ & \quad + V(x, y, t) \Psi (x, y, t) \numberthis \ . \label{eq:schrodinger_special} \end{align*} The partial derivatives (...) gives the kinetic energy, and the potental $V$ is the external environment. In this experiment we will only consider the case where the potential is time-independent, resulting in $V = V(x, y)$ When we scale Schrödinger equation by the dimensionful variables, we are left with a wave function $u$, potential $v$ and the dimensionless equation \begin{align} i \frac{\partial u}{\partial t} &= - \frac{\partial^{2} u}{\partial x^{2}} - \frac{\partial^{2} u}{\partial y^{2}} + v(x, y) u \ . \label{eq:schrodinger_dimensionless} \end{align} % This gives us the Born rule \begin{align} p(x, y \ | \ t) &= |u(x, y, t)|^{2} = u^{*}(x, y, t) u(x, y, t) \ . \label{eq:born_rule_scaled} \end{align} \subsection{The Crank-Nicolson scheme}\label{ssec:crank_nicolson} % When we evaluate a particle in position space, we have to consider partial differential equations (PDE). To solve these numerically, we have to discretize Equation \eqref{eq:schrodinger_dimensionless}. We use the $\theta$-rule \footnote{Using the $\theta$-rule, we can derive Forward Euler using $\theta = 1$, and Backward Euler using $\theta = 0$}, to combine the forward (explicit) and backward (implicit) finite difference method. The result is a linear combination of the explicit and implicit scheme, given by \begin{align} \frac{u_{\ivec, \jvec}^{n+1} - u_{\ivec, \jvec}^{n}}{\Delta t} &= \theta F_{\ivec, \jvec}^{n+1} + (1 - \theta) F_{\ivec, \jvec}^{n} \ , \label{eq:theta_rule} \end{align} % where $\theta \in [0, 1]$. To simplify notation and avoid confusion of indices with the imaginary number $i$, we have used the notation $\ivec, \jvec$ in subscript to indicate the commonly named indices $i, j$ in x- and y-direction. In addition, the superscript $n, n+1$ indicate position in time. We get the Crank-Nicolson method (CN) when $\theta = 1/2$ gives Crank-Nicolson \begin{align} \frac{u_{\ivec, \jvec}^{n+1} - u_{\ivec, \jvec}^{n}}{\Delta t} &= \frac{1}{2} \bigg[ F_{\ivec, \jvec}^{n+1} + F_{\ivec, \jvec}^{n} \bigg] \ . \label{eq:crank_nicolson_method} \end{align} % Using CN, we derive the discretized Schrödinger equation given by \begin{align*} & u_{\ivec, \jvec}^{n+1} - r \big[ u_{\ivec+1, \jvec}^{n+1} - 2u_{\ivec, \jvec}^{n+1} + u_{\ivec-1, \jvec}^{n+1} \big] \\ & - r \big[ u_{\ivec, \jvec+1}^{n+1} - 2u_{\ivec, \jvec}^{n+1} + u_{\ivec, \jvec-1}^{n+1} \big] + \frac{i \Delta t}{2} v_{\ivec, \jvec} u_{\ivec, \jvec}^{n+1} \\ &= u_{\ivec, \jvec}^{n} + r \big[ u_{\ivec+1, \jvec}^{n} - 2u_{\ivec, \jvec}^{n} + u_{\ivec-1, \jvec}^{n} \big] \\ & \quad + r \big[ u_{\ivec, \jvec+1}^{n} - 2u_{\ivec, \jvec}^{n} + u_{\ivec, \jvec-1}^{n} \big] - \frac{i \Delta t}{2} v_{\ivec, \jvec} u_{\ivec, \jvec}^{n} \numberthis \ , \label{eq:schrodinger_discretized} \end{align*} % where $r$ is defined as \begin{align*} r \equiv \frac{i \Delta t}{2 \Delta h^{2}} \end{align*} % The full derivation of both Equation \eqref{eq:crank_nicolson_method} and Equation \eqref{eq:schrodinger_discretized} can be found in Appendix \ref{ap:crank_nicolson}. \subsection{The double-slit experiment}\label{ssec:double_slit} % Thomas Young first performed the double-slit experiment in 1801 to demonstrate the principle of interference of light \cite{britannica:2023:young}, while postulating light as waves rather than particles. The double-slit experiment result in a diffraction pattern on a detector screen, where constructive interference of light result in bright spots, and destructive interference result in dark spots as showed in Figure \ref{fig:youngs_double_slit}. \begin{figure} \centering \includegraphics[width=\linewidth]{images/youngs_double_slit.pdf} \caption{The setup of Thomas Young's double slit experiment, where $S_{0}$ denotes the light source, $S_{1}$ and $S_{2}$ denotes the slits in the wall.} \label{fig:youngs_double_slit} \end{figure} After the wave passes through the two slits, the pattern observed is determined by the path difference determined by \begin{align*} \delta = d \sin (\theta) = m \lambda \ , \end{align*} where $\lambda$ is the wavelength and $m$ is called the order number. $d$ is the distance between the center of the two slits, assuming that the distance between the wall and the detector screen $L >> \delta$ \cite[p. 6]{mit:2004:physics}. In this case, we observe constructive interference when \begin{align*} \delta = m \lambda && m = 0, \pm 1, \pm 2 \dots \ , \end{align*} and destructive interference when \begin{align*} \delta = (m + \frac{1}{2}) \lambda && m = 0, \pm 1, \pm 2 \dots \ , \end{align*} % Something about Heisenberg uncertainty principle \subsection{Implementation}\label{ssec:implementation} % % Add tables of parameters used, initial conditions, notation etc. A, B are sparse csc matrix - theory of csc matrix? % \begin{equation*} % \begin{pNiceArray}{ccc|ccc} % \bullet & \bullet & & \bullet & & & & & \\ % \bullet & \bullet & \bullet & & \bullet & & & & \\ % & \bullet & \bullet & & & \bullet & & & % \end{pNiceArray} % \end{equation*} We use Dirichlet boundary conditions, as given in Table \ref{tab:boundary_conditions}, which allows us to express Equation \eqref{eq:schrodinger_discretized} as a matrix equation \begin{align} A u^{n+1} = B u^{n} \ . \end{align} Here, both $u^{n+1}$ and $u^{n}$ are column vectors containing the internal points of the $xy$ grid at time step $n+1$ and $n$, respectively. Since we have $M$ points in $x$- and $y$-direction, we have $M-2$ internal points. Both $u$ vectors have length $(M-2)^{2}$, and the matrices $A$ and $B$ have size $(M-2)^{2} \times (M-2)^{2}$. The matrices can be decomposed as submatrices of size $(M-2) \times (M-2)$, with the following pattern \begin{align*} A, B = \begin{bmatrix} \begin{matrix} \bullet & \bullet & \phantom{\bullet} \\ \bullet & \bullet & \bullet \\ \phantom{\bullet} & \bullet & \bullet \end{matrix} & \rvline & \begin{matrix} \bullet & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \bullet & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \bullet \end{matrix} & \rvline & \begin{matrix} \phantom{\bullet} & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \phantom{\bullet} \end{matrix} \\ \hline \begin{matrix} \bullet & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \bullet & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \bullet \end{matrix} & \rvline & \begin{matrix} \bullet & \bullet & \phantom{\bullet} \\ \bullet & \bullet & \bullet \\ \phantom{\bullet} & \bullet & \bullet \end{matrix} & \rvline & \begin{matrix} \bullet & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \bullet & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \bullet \end{matrix} \\ \hline \begin{matrix} \phantom{\bullet} & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \phantom{\bullet} \end{matrix} & \rvline & \begin{matrix} \bullet & \phantom{\bullet} & \phantom{\bullet} \\ \phantom{\bullet} & \bullet & \phantom{\bullet} \\ \phantom{\bullet} & \phantom{\bullet} & \bullet \end{matrix} & \rvline & \begin{matrix} \bullet & \bullet & \phantom{\bullet} \\ \bullet & \bullet & \bullet \\ \phantom{\bullet} & \bullet & \bullet \end{matrix} \end{bmatrix} \end{align*} To fill the matrices $A$ and $B$, we used \begin{align*} a_{k} &= 1 + 4r + \frac{i \Delta t}{2} v_{\ivec, \jvec} \\ b_{k} &= 1 - 4r - \frac{i \Delta t}{2} v_{\ivec, \jvec} \ . \end{align*} An example of filled matrices can be found in Appendix \ref{ap:matrix_structure}. Notations: In addition, we use an equal step size in x- and y-direction, $h$ such that \begin{align*} x \in [0, 1] && x \rightarrow x_{\ivec} = \ivec h && \ivec = 0, 1, \dots, M-1 \\ y \in [0, 1] && y \rightarrow y_{\jvec} = \jvec h && \jvec = 0, 1, \dots, M-1 \\ t \in [0, T] && t \rightarrow t_{n} = n \Delta t && n = 0, 1, \dots, N_{t}-1 \end{align*} And simplify indices such that \begin{align*} u(x, y, t) \rightarrow u(\ivec h, \jvec h, n \Delta t) \equiv u_{\ivec, \jvec}^{n} \\ v(x, y) \rightarrow u(\ivec h, \jvec h) \equiv v_{\ivec, \jvec} \end{align*} which gives a matrix $U^{n}$ that contains elements $u_{\ivec, \jvec}^{n}$, and a matrix $V$ that contains elements $v_{\ivec, \jvec}$. \begin{table}[H] \centering \begin{tabular}{l r} % @{\extracolsep{\fill}} \hline Position & Value \\ \hline $u(x=0, y, t)$ & $0$ \\ $u(x=1, y, t)$ & $0$ \\ $u(x, y=0, t)$ & $0$ \\ $u(x, y=1, t)$ & $0$ \\ \hline \end{tabular} \caption{Boundary conditions in the xy-plane, also known as Dirichlet boundary conditions.} \label{tab:boundary_conditions} \end{table} For the general setup of the wall, we used \begin{table}[H] \centering \begin{tabular}{l r} % @{\extracolsep{\fill}} \hline Parameter & Value \\ \hline Wall thickness & $0.02$ \\ Wall position & $0.5$ \\ Separator length & $0.05$ \\ Slit aperture & $0.05$ \\ \hline \end{tabular} \caption{Wall setup.} \label{tab:wall_setup} \end{table} \begin{table}[H] \centering \begin{tabular}{l r r} % @{\extracolsep{\fill}} \hline Simulation & $1$ & $2$ \\ \hline $h$ & $0.005$ & $0.005$ \\ $\Delta t$ & $2.5 \times 10^{-5}$ & $2.5 \times 10^{-5}$ \\ $T$ & $0.008$ & $0.002$ \\ $x_{c}$ & $0.25$ & $0.25$ \\ $\sigma_{x}$ & $0.05$ & $0.05$ \\ $p_{x}$ & $200$ & $200$ \\ $y_{c}$ & $0.5$ & $0.5$ \\ $\sigma_{y}$ & $0.05$ & $0.20$ \\ $p_{y}$ & $0$ & $0$ \\ $v_{0}$ & $0$ & $1 \times10^{10}$ \\ \hline \end{tabular} \caption{Wall setup.} \label{tab:sim_setup} \end{table} \subsection{Tools}\label{ssec:tools} % The double-slit experiment is implemented in C++. We use the Python library \verb|matplotlib| \cite{hunter:2007:matplotlib} to produce all the plots, and \verb|seaborn| \cite{waskom:2021:seaborn} to set the theme in the figures. \end{document}