Commit 94174d53 authored by Maxwell Michael Gisborne's avatar Maxwell Michael Gisborne
Browse files

refs still broken

parent 7225937c
......@@ -32,7 +32,7 @@ def plot(ax,data):
)
return ax
fig,ax = plt.subplots()
fig,ax = plt.subplots(figsize=plt.figaspect(0.5))
plt.yscale('log')
ax.grid()
for data in datas:
......
......@@ -4,44 +4,43 @@ import sys
dT = 0.01
LT = 500
#figure 5.12
args = HHNZ(3,0.05,0.5,np.arange(0,LT,dT))
# figure 5.8
args = HHNZ(3,0.05,0,np.arange(0,LT,dT))
peram_name = 'L{L}A{alpha}E{eps}'.format(**args[1])
print(peram_name)
fig = Plot_TDFD(DIFF(N(args)),fig_args = dict(figsize = plt.figaspect(0.5)), max_time = 8, max_freq = 0.6)
fig.suptitle(r'$\alpha = 0.05, \epsilon_0 = 0.5$')
fig.savefig(f'results/choice_cuts/L3_E05_A005_DIFF_N.pdf')
fig = Plot_TDFD(DIFF(Z(args)),fig_args = dict(figsize = plt.figaspect(0.5)), max_time = 10, max_freq = np.inf)
fig.suptitle(r'$\alpha = 0.05$')
fig.savefig(f'results/choice_cuts/L3_E00_A005_DIFF_Z.pdf')
#figure 5.11
args = HHNZ(3,0.05,0.5,np.arange(0,LT,dT))
args = HHNZ(3,0.05,0.5,np.arange(0,LT,dT*0.1))
peram_name = 'L{L}A{alpha}E{eps}'.format(**args[1])
print(peram_name)
fig = Plot_TDFD(DIFF(Z(args)),fig_args = dict(figsize = plt.figaspect(0.5)), max_time = 8, max_freq = 0.6)
fig.suptitle(r'$\alpha = 0.05, \epsilon_0 = 0.5$')
fig.savefig(f'results/choice_cuts/L3_E05_A005_DIFF_Z.pdf')
# figure 5.10
#figure 5.12
args = HHNZ(3,0.05,0.5,np.arange(0,LT,dT))
peram_name = 'L{L}A{alpha}E{eps}'.format(**args[1])
print(peram_name)
fig = Plot_TDFD(STAR(NZ(args)),fig_args = dict(figsize = plt.figaspect(0.5)),max_time = 8)
fig = Plot_TDFD(DIFF(N(args)),fig_args = dict(figsize = plt.figaspect(0.5)), max_time = 8, max_freq = 0.6)
fig.suptitle(r'$\alpha = 0.05, \epsilon_0 = 0.5$')
fig.savefig(f'results/choice_cuts/L3_E05_A005_STAR_NZ.pdf')
fig.savefig(f'results/choice_cuts/L3_E05_A005_DIFF_N.pdf')
# figure 5.8
args = HHNZ(3,0.05,0,np.arange(0,LT,dT))
# figure 5.10
args = HHNZ(3,0.05,0.5,np.arange(0,LT,dT))
peram_name = 'L{L}A{alpha}E{eps}'.format(**args[1])
print(peram_name)
fig = Plot_TDFD(DIFF(Z(args)),fig_args = dict(figsize = plt.figaspect(0.5)), max_time = 10, max_freq = 0.6)
fig.suptitle(r'$\alpha = 0.05$')
fig.savefig(f'results/choice_cuts/L3_E00_A005_DIFF_Z.pdf')
fig = Plot_TDFD(STAR(NZ(args)),fig_args = dict(figsize = plt.figaspect(0.5)),max_time = 8)
fig.suptitle(r'$\alpha = 0.05, \epsilon_0 = 0.5$')
fig.savefig(f'results/choice_cuts/L3_E05_A005_STAR_NZ.pdf')
# figure 5.7
args = HHNZ(3,0.1,0,np.arange(0,LT,dT))
peram_name = 'L{L}A{alpha}E{eps}'.format(**args[1])
fig = Plot_TDFD(STAR(NZ(args)),fig_args = dict(figsize = plt.figaspect(0.5)), max_time = 8, max_freq = 10)
fig.suptitle(r'$\alpha = 0.01$')
fig.suptitle(r'$\alpha = 0.1$')
fig.savefig(f'results/choice_cuts/L3_E00_A010_STAR_NZ.pdf')
# figure 5.6
......@@ -49,7 +48,7 @@ args = HHNZ(3,0.05,0,np.arange(0,LT,dT))
peram_name = 'L{L}A{alpha}E{eps}'.format(**args[1])
print(peram_name)
fig = Plot_TDFD(STAR(NZ(args)),fig_args = dict(figsize = plt.figaspect(0.5)),max_time = 8)
fig.suptitle(r'$\alpha = 0.005$')
fig.suptitle(r'$\alpha = 0.05$')
fig.savefig(f'results/choice_cuts/L3_E00_A005_STAR_NZ.pdf')
......
......@@ -101,6 +101,8 @@ centers = []
normals = []
ns = []
As = []
print('\\begin{tabular}{ c | c c | c c }')
print('Samples & Product & SEM & Norm & SEM\\\\')
for i,S in enumerate(Ss):
if i == 0:
S = S0 = np.array(list(S))
......@@ -118,10 +120,12 @@ for i,S in enumerate(Ss):
av,sd,sem = stats(products)
fitline = mk_normal(av,sd) # normal distribution
self_p_av, self_p_sd, self_p_sem = stats(selfp)
self_a_av, self_a_sd, self_a_sem = stats(selfa)
hist,bin_centers = histogram(products)
print(f'Curve( {i+1} ), Samples( {sampels[i]} ), {av:.4E} pm {sem:.4E}')
print(f'{sampels[i]} & {1-av:.3E} & {sem:.3E} & {1-self_p_av:.3E} & {self_p_sem:.3E}\\\\')
append_into(# Syntactic shuger. x = y -> x.append(y)
avs = av,
sds = sd,
......@@ -133,6 +137,7 @@ for i,S in enumerate(Ss):
ns = selfp,
)
print('\\end{tabular}')
Sdirect = sim.Compute_States( Initial_State_np,
sim.provided_unitary_methods['direct'](H_np,times[0]) )
......@@ -148,7 +153,6 @@ product_direct = [] # the product of the states with the direct U-method
product_comper = [] # comparison of states between teh direct and iteration U-methods
for t,direct,iteration in zip(times[0],Sdirect,S0):
print(f'{t:.1f}',end='\r')
append_into(
product_direct = abs(sim.inner(direct,direct)),
product_comper = abs(sim.inner(direct,iteration))
......@@ -160,7 +164,7 @@ av_comper, sd_comper, err_comper = stats(product_comper)
av_direct, sd_direct, err_direct = stats(product_direct)
normal_direct = mk_normal(av_direct,sd_direct)
fig,axs = plt.subplots(2)
fig,axs = plt.subplots(2,figsize = plt.figaspect(0.5))
ax1,ax2 = axs
ax1.grid()
......@@ -188,7 +192,7 @@ fig.tight_layout()
if SAVE: fig.savefig('results/validation/nummerical_stability_0.pdf')
if SHOW: plt.show()
fig,ax = plt.subplots()
fig,ax = plt.subplots(figsize = plt.figaspect(0.5))
ax.errorbar(sampels,[1-av for av in avs],yerr=SEMs,fmt='o-')
ax.grid()
ax.set_xlabel('number or time steps')
......@@ -199,7 +203,7 @@ if SAVE: fig.savefig('results/validation/nummerical_stability_1.pdf')
if SHOW: plt.show()
Plots = 3
fig,axs = plt.subplots(min(Curves,Plots))
fig,axs = plt.subplots(min(Curves,Plots),figsize = plt.figaspect(0.5))
for j,ax in enumerate(axs):
i = j * Curves//Plots
center = centers[i]
......@@ -220,7 +224,7 @@ fig.tight_layout()
if SAVE: fig.savefig('results/validation/nummerical_stability_2.pdf')
if SHOW: plt.show()
fig,ax = plt.subplots()
fig,ax = plt.subplots(figsize = plt.figaspect(0.5))
ax.grid()
for i in range(len(hists)):
center = centers[i]
......
\headcommand {\slideentry {0}{0}{1}{1/1}{}{0}}
\headcommand {\beamer@framepages {1}{1}}
\headcommand {\slideentry {0}{0}{2}{3/6}{}{0}}
\headcommand {\beamer@framepages {3}{6}}
\headcommand {\slideentry {0}{0}{3}{7/7}{}{0}}
\headcommand {\beamer@framepages {7}{7}}
\headcommand {\slideentry {0}{0}{4}{8/8}{}{0}}
\headcommand {\beamer@framepages {8}{8}}
\headcommand {\slideentry {0}{0}{5}{9/9}{}{0}}
\headcommand {\beamer@framepages {9}{9}}
\headcommand {\slideentry {0}{0}{6}{10/10}{}{0}}
\headcommand {\beamer@framepages {10}{10}}
\headcommand {\slideentry {0}{0}{7}{11/11}{}{0}}
\headcommand {\beamer@framepages {11}{11}}
\headcommand {\slideentry {0}{0}{8}{12/12}{}{0}}
\headcommand {\beamer@framepages {12}{12}}
\headcommand {\slideentry {0}{0}{9}{13/13}{}{0}}
\headcommand {\beamer@framepages {13}{13}}
\headcommand {\slideentry {0}{0}{10}{14/14}{}{0}}
\headcommand {\beamer@framepages {14}{14}}
\headcommand {\beamer@partpages {1}{14}}
\headcommand {\beamer@subsectionpages {1}{14}}
\headcommand {\beamer@sectionpages {1}{14}}
\headcommand {\beamer@documentpages {14}}
\headcommand {\gdef \inserttotalframenumber {10}}
\documentclass{beamer}
\usepackage{graphicx,xcolor,animate}% Include figure files
\usepackage{dcolumn}% Align table columns on decimal point
\usepackage{bm,ulem}% bold math
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{verbatim}
%\usepackage{blackboard}
\newtheorem{idea}{Idea}
\newtheorem{observation}{Note}
\newtheorem{question}{Question}
\newtheorem{answer}{Answer}
\newtheorem{challenge}{Challenge}
\newtheorem{Solution}{Solution}
\graphicspath{{figures/}}
\DeclareGraphicsExtensions{{.pdf},{.png}}
\def\diag{\operatorname{diag}}
\def\rank{\operatorname{rank}}
\def\Tr{\operatorname{Tr}}
\def\Ad{\operatorname{Ad}}
\def\norm#1{|| #1 ||}
\def\vec#1{\mathbf{#1}}
\def\ave#1{\langle #1\rangle}
\def\ket#1{| #1 \rangle}
\def\bra#1{\langle #1 |}
\def\ip#1#2{\langle #1 | #2 \rangle}
\def\lket#1{| #1 \rangle\rangle}
\def\lbra#1{\langle\langle #1 |}
\def\lip#1#2{\langle\langle #1 | #2 \rangle\rangle}
\def\d{\partial}
\def\op#1{\hat{#1}}
\def\A{\mathcal{A}}
\def\AA{\mathbf{A}}
\def\C{\mathcal{C}}
\def\D{\mathcal{D}}
\def\J{\mathcal{J}}
\def\L{\mathcal{L}}
\def\M{\mathcal{M}}
\def\HH{\mathcal{H}}
\def\DD{\mathfrak{D}}
\def\CP{\mathbb{CP}}
\def\UU{\mathbf{U}}
\def\SU{\mathbf{SU}}
\def\PSU{\mathbf{PSU}}
\def\U{\mathbb{U}}
\def\F{\mathfrak{F}}
\def\G{\mathfrak{G}}
\def\S{\mathcal{S}}
\def\dd{d}
\def\eps{\epsilon}
\def\PAUSE{\pause}
\mode<presentation>{\usetheme{Warsaw}}
\def\LL{\mathfrak{L}}
\def\CP{\mathbf{CP}}
\def\SU{\mathbf{SU}}
\def\U{\mathbf{U}}
\def\up{{\uparrow}}
\def\down{{\downarrow}}
%% other shortcuts
\def\xs{\vec{x}\cdot\vec{\sigma}}
\def\sx{\op{\sigma}_x}
\def\sy{\op{\sigma}_y}
\def\sz{\op{\sigma}_z}
\def\s#1{\op{{\bm \sigma}}^{#1}}
\def\IN{\textsc{in}}
\def\OUT{\textsc{out}}
\def\oo{\omega}
\def\cemph#1{\textcolor{cyan}{#1}}
\mode<presentation>{\usetheme{Warsaw}}
\def\PAUSE{}
\def\PAUSE{\pause}
\usepackage[english]{babel}
\usepackage[latin1]{inputenc}
\usepackage{times}
\usepackage[T1]{fontenc}
% Or whatever. Note that the encoding and the font should match. If T1
% does not look nice, try deleting the line with the fontenc.
%copied from color theme
\definecolor{swanseablue}{RGB}{0,103,182}
\definecolor{swansealblue}{RGB}{133,204,255}
\definecolor{highlightorange}{RGB}{255,160,35}
\setbeamercolor{normal text}{fg=black}
\setbeamercolor{alerted text}{fg=highlightorange}
\setbeamercolor{example text}{fg=swansealblue}
\setbeamercolor{structure}{fg=swanseablue}
\setbeamercolor{author}{fg=swansealblue}
\setbeamercolor{date}{fg=swansealblue}
\setbeamercolor{framesubtitle}{fg=swansealblue}
\setbeamercolor{institute}{fg=swansealblue}
\setbeamercolor{title}{fg=swanseablue}
\defbeamertemplate*{title page}{swansea}[1][]
{
\vbox{}
\vfill
\begin{flushleft}
\begin{beamercolorbox}[sep=8pt,#1]{title page}
\usebeamerfont{title}\inserttitle\par%
\ifx\insertsubtitle\@empty%
\else%
\vskip0.25em%
{\usebeamerfont{subtitle}\usebeamercolor[fg]{subtitle}\insertsubtitle\par}%
\fi%
\end{beamercolorbox}%
\vskip1em\par
\begin{beamercolorbox}[sep=8pt,#1]{author}
\usebeamerfont{author}\insertauthor
\end{beamercolorbox}
\begin{beamercolorbox}[sep=8pt,#1]{institute}
\usebeamerfont{institute}\insertinstitute
\end{beamercolorbox}
\begin{beamercolorbox}[sep=8pt,#1]{date}
\usebeamerfont{date}\insertdate
\end{beamercolorbox}\vskip0.5em
\begin{beamercolorbox}[sep=8pt,#1]{institute}
\usebeamerfont{institute}\conference
\end{beamercolorbox}\vskip0.5em
{\usebeamercolor[fg]{titlegraphic}\inserttitlegraphic\par}
\vskip0.25\paperheight
\end{flushleft}
\vfill
}
\setbeamertemplate{title}[swansea]
\title[Modelling cold atoms]{Modelling cold atoms in optical lattices: Hubbard vs Heisenberg model}
\author{Max Gisborne}
\institute{Swansea University}
\date{May 25, 2020}
\def\conference{MPhys Presentation}
\begin{document}
{\usebackgroundtemplate{\includegraphics[width=\paperwidth,height=\paperheight]{title.pdf}}
\begin{frame} \titlepage \end{frame}}
\begin{frame}{Motivation}
\textbf{Cold atoms in optical lattices} --- interesting platform for
\begin{itemize}
\item exploring physical phenomena \pause
\item testing new control schemes \pause
\item quantum simulation and technology applications \pause
\end{itemize}
% You could break this slide up into two and add a nice picture of the experimental setup
\vfill
Ideal platform to explore control via \textbf{energy landscape shaping} due to significant scope to shape optical potentials using laser and digital mirror devices
\vfill
\textbf{Optimal} control and \textbf{design} of energy landscape for specific applications requires \textbf{models} that are \textbf{accurate yet efficient} to evaluate.
\end{frame}
\begin{frame}{Aims and objectives}
Hubbard model with spin most accurate description of cold atom dynamics but expensive to evaluate
Perturbative expansion leads to XXZ coupled spin model --- much more computationally efficent but only applicable in perturbative regime
In the context of optimal control it is not always easy to know a-priori if we remain in the perturbative regime
Objective: Implement simulator for both Hubbard and Heisenberg models to allow for direct comparison of the dynamics and validity of approximations
\end{frame}
\begin{frame}{Hubbard model}
\end{frame}
\begin{frame}{Heisenberg model}
\end{frame}
\begin{frame}{Simulation Approach}
\end{frame}
\begin{frame}{Simulation task 1}
Outline task and present results
\end{frame}
\begin{frame}{Simulation task 2}
Outline task and present results
\end{frame}
\begin{frame}{Simulation task 3}
Outline task and present results
\end{frame}
\begin{frame}{Summary and Outlook}
What have we learned?
Where next?
What can we do with the simulator?
\end{frame}
\end{document}
% ************************** Thesis Abstract *****************************
\null\vspace{\fill}
\begin{abstract}
Ultracold atoms held in an optical lattice are currently being used to study how spin degrees of freedom can be controlled.
The full dynamics of ultra cold atoms in an optical lattice is described by the Hubbard model.
This model is expensive to compute so numerical models typically rely on the far simpler effective Heisenberg model,
which has been found to emerge from a perturbative expansion of the full Hubbard model.
Here numerical methods are used to study the limits of this approximation.
The results suggest that the Heisenberg model may not be stubble for many of its contemporary applications.
Ultra cold atoms trapped in an optical lattice are currently being used to study spin.
The Hubbard model, originally developed for the description of electrons in a solid,
it is now used to describe these cold atom systems.
Despite its its simplicity it is still expensive to solve numerically.
For this reason, recent efforts for control use an effective Heisenberg
model as an approximation to the Hubbard model.
However the limits of this method are not fully understood.
Here we implement a system for numerically simulating and systematically comparing these models.
We find that the Hubbard Hamiltonian picks up a significant frequency shift for $J/U \approx 0.05$.
\end{abstract}
\vspace{\fill}
......@@ -3,7 +3,7 @@
\renewcommand{\abstractname}{\large Acknowledgements}
\begin{abstract}
\vspace{2cm}
I could not have got here with out the support, patients and good humor of my parents, of my friends and especially of my supervisor Dr Sophie Schemer.
I could not have got here with out the support, patients and good humor of my parents, of my friends and especially of my supervisor Dr Sophie Shermer.
For sending me down this road I thank, Doc Horn, Carl Sagan and many other brilliant teachers \supperT
\end{abstract}
\vspace{\fill}
......
......@@ -9,14 +9,14 @@ L_z \left | l,m \right > = \hbar m \left | l,m \right >
\]
where $L^2$ is the total angular momentum operator, $L_z$ is the angular momentum operator along the z-axis, and $\left | l,m \right> $ is the state defined by $l,m$.
Angular momentum of composite objects is constructed with the rules of angular momentum addition, from the angular momentum of its constituent parts. Spin being Intrinsic angular momentum is the angular momentum of objects that are not composite. The total spin of a particle is a property of the type of particle. For example the electron is $\frac{1}{2}$-spin, meaning that $l = 1/2$ for all electrons. However $m$ depends on the electron, and constitutes a spin degree of freedom. The allowed values of $m$ in this case is $\frac12, -\frac12$.
Angular momentum of composite objects is constructed with the rules of angular momentum addition, from the angular momentum of its constituent parts. Spin being intrinsic angular momentum is the angular momentum of objects that are not composite. The total spin of a particle is a property of the type of particle. For example the electron is $\frac{1}{2}$-spin, meaning that $l = 1/2$ for all electrons. However $m$ depends on the electron, and constitutes a spin degree of freedom. The allowed values of $m$ in this case is $\frac12, -\frac12$.
Therefore, any electron can be as a sum of two parts,
\[
\left|\psi\right> = f(\vec x, t) \left| + \right > + g(\vec x,t) \left | - \right>
\]
where $\left| + \right>$ is called {\it spin up} and has $m=\frac12$ and $\left| - \right>$ is called {\it spin down} and has $m = -\frac12 $.
Such a state is sometimes writen in colom vector form
Such a state is sometimes written in coulomb vector form
$$
\psi = \begin{pmatrix}
f\\ g
......@@ -25,19 +25,19 @@ $$
\section{Exchange interaction}
Objects with half integer spin are Fermions, that is there wave function must be anti-symmetric under particle exchange. Objects with integer spin are Bosons, that is their wave function must be symmetric under particle exchange. These conditions give rise to an exchange energy.
Objects with half integer spin are Fermions, that is their wave function must be anti-symmetric under particle exchange. Objects with integer spin are Bosons, that is their wave function must be symmetric under particle exchange. These conditions give rise to an exchange energy.
Consider two electrons $\phi^1,\phi^2$ that occupy 2 orbitals $\phi_a,\phi_b$. The spin of the electrons can be written in bra-ket notation $\ket{\uparrow,\downarrow}$. The total wave function must be anti symmetric under permutations of fermions. We can define symmetric $\chi_+$ and anti-symmetric $\chi_-$ spin parts as such
Consider two electrons $\phi^1,\phi^2$ that occupy 2 orbitals $\phi_a,\phi_b$. The spin of the electrons can be written in bra-ket notation $\ket{\uparrow,\downarrow}$. The total wave function must be anti-symmetric under permutations of fermions. We can define symmetric $\chi_+$ and anti-symmetric $\chi_-$ spin parts as such
\begin{align*}
\chi_- &= \frac{1}{\sqrt{2}} \left( \ket{\uparrow,\downarrow} - \ket{\downarrow,\uparrow} \right)\\
\chi_+ &\in \left\{\frac{1}{\sqrt{2}} \left( \ket{\uparrow,\downarrow} + \ket{\downarrow,\uparrow} \right), \ket{\uparrow,\uparrow}, \ket{\downarrow,\downarrow} \right\}
\end{align*}
and construct the symmetric and antisemitic spacial parts as such
and construct the symmetric and anti-symmetric spacial parts as such
\begin{align*}
\psi_+ &= \frac1{\sqrt{2}} \left( \phi^1_a\phi_b^2 + \phi_a^2\phi_b^1 \right)\\
\psi_- &= \frac1{\sqrt{2}} \left( \phi^1_a\phi_b^2 - \phi_a^2\phi_b^1 \right)
\end{align*}
The total anti semetric wave function can be written as $\Psi =\psi_\pm \chi_\mp$.
The total anti-symmetric wave function can be written as $\Psi =\psi_\pm \chi_\mp$.
Suppose we have a Hamiltonians with two parts $H = H_1 + H_2 + H_{12}$ where $[H_1,\phi_2] = [H_1,\psi_2] = [H_2,\phi_1] = [H_2,\psi_1] = 0$ but $[H_{12},\phi_i] \neq 0 $ and $[H_{12},\phi_i] \neq 0 $.
\begin{align*}
......@@ -75,19 +75,22 @@ However
+ \left< H_{12} \right>_{\phi_a^2\phi_b^1}
\overbrace{\pm 2 \Re\left(\left< \phi_a^1\phi_b^2 |H_{12}|\phi_a^2 \phi_b^1 \right>\right)}^{\mbox{exchange energy}}
\]
The Hamiltonian dose not explicitly couple the spins part of the electrons. Even so, the coupling emerges because the Hamiltonian mixes the spacial part of and there is an over all antisymmetry condition imposed.
The Hamiltonian does not explicitly couple the spins part of the electrons. Even so, the coupling emerges because the Hamiltonian mixes the spacial part of and there is an over all anti-symmetry condition imposed.
\section{Spintronics}
While spin might seem like an abstract idea there are many current and potential practical applications.
In classical computing the limit on clock cycles is wast heat. You can run a chip faster but you need to to increase its power consumption and heat produced. If too much heat is produced then the chip will be damaged. In the past this problem has been overcome by moving the parts of the CPU closer together by shrinking the size of the chip. This trick has yielded a massive improvement in performance, but now approaches fundamental limits. In 2020 $5 nm$ transistors are planned to enter production. At these length scales, tunneling effects prevent gates for functioning reliable.
In classical computing the limit on clock cycles is waste heat. You can run a chip faster but you need to to increase its power consumption and heat produced. If too much heat is produced then the chip will be damaged. In the past this problem has been overcome by moving the parts of the CPU closer together by shrinking the size of the chip. This trick has yielded a massive improvement in performance, but now approaches fundamental limits. In 2020 $5 nm$ transistors are planned to enter production.
At these length scales, tunneling effects prevent gates for functioning reliable.
Spin dynamics are not driven by the flow of electrons, and can therefor have different physical characteristics. Spin dynamics happen on a much smaller time scale, and do not produce as much wast heat as electron flow. They there for have in principle the potential to over come the fundamental limits to electron flow computing. However they require a different chip architecture, would not approach current classical computing in the near future.
Spin dynamics are not driven by the flow of electrons, and can therefor have different physical characteristics. Spin dynamics happen on a much smaller time scale, and do not produce as much waste heat as electron flow. They therefor have in principle the potential to overcome the fundamental limits to electron flow computing. However they require a different chip architecture and would not approach current classical computing in the near future.
Not all Spintronic devises are exotic. Hard disk drives (HDD) store information in the spin degrees of freedom of magnetic atoms embedded on a non magnetic platter. Modern HDDs have over $1.3 Tb$ per square inch. However, the spin degrees of freedom are isolated, and read wright is achieved electromechanical, making the devises fragile and slow. In comparison Solid State Drives (SSD) store information in the position degrees of freedoms of electrons in semiconductor secrets, but also have limited life times. If one could store information in the spin degrees of freedom of single atoms, but read and write them using traditional electronics, one would have vast reliable and dense long team data storage.
Not all Spintronic devises are exotic. Hard Disk Drives (HDD) store information in the spin degrees of freedom of magnetic atoms embedded on a non magnetic platter. Modern HDDs have over $1.3 Tb$ per square inch. However, the spin degrees of freedom are isolated, and read write is achieved electromechanical making the devis fragile and slow. In comparison Solid State Drives (SSD) store information in the position degrees of freedoms of electrons in semiconductor circuits, but also have limited life times.
If one could store information in the spin degrees of freedom of single atoms, but read and write them using traditional electronics, one would have vast reliable and dense long term data storage.
Spintronics also has applications in Quantum Computers. One of the big obstacles in quantum computing is coherence times. Once coherence is lost no more quantum computation can occurs. Some spin systems can remain in coherent states currently for 100's of seconds, which is several orders of magnitude grater then the degrees coherence time of other quantum technology.
Spintronics also has applications in Quantum Computers. One of the big obstacles in quantum computing is coherence times. Once coherence is lost no more quantum computation can occur.
Some spin systems can remain in coherent states currently for 100's of seconds, which is several orders of magnitude greater then the degrees coherence time of other quantum technology.
......
......@@ -39,7 +39,8 @@ Neutral atoms can be trapped at the lattice sites via the Stark shift. The Star
\caption{Superfluid vs Mott insulator state.}
\end{figure}
If the optical potential is shallow then a condensate is formed, where all the atom is spread out over the same space (see Fig.~\ref{fig:superfluid}) If the optical potential is deep then a Mott insulator is formed, where each atom is confined to a single site (see Fig.~\ref{fig:Mott}). In the Mott state, neighboring particles interact via exchange and super exchange mechanisms. By modifying the optical potential, the overlap integral between neighboring cites can be modified, and thus exchange interactions can be controlled. If all the lattice cites occupied by one atom, a crystal is formed. An optical lattice itself has zero entropy and does not conduct photons, which greatly simplifies the dynamics of the crystal.
If the optical potential is shallow then a condensate is formed, where all the atom is spread out over the
same space (see Fig.~\ref{fig:superfluid}) If the optical potential is deep then a Mott insulator is formed, where each atom is confined to a single site (see Fig.~\ref{fig:Mott}). In the Mott state, neighboring particles interact via exchange and super exchange mechanisms. By modifying the optical potential, the overlap integral between neighboring cites can be modified, and thus exchange interactions can be controlled. If all the lattice cites occupied by one atom, a crystal is formed. An optical lattice itself has zero entropy and does not conduct photons, which greatly simplifies the dynamics of the crystal.
For the experimental setup in Aarhus the Mott insulating regime is reached around a lattice depth of $V_0 = 20E_R$, i.e., $\zeta=20$. Around this depth there is unit occupancy and we have $J\ll U$. With $k=2\pi/\lambda = \SI{5.9052e+06}{m^{-1}}$ and assuming a $s$-wave scattering length of $a_s \approx 90a_0$, where $a_0=\SI{5.29e-11}{m}$ is the Bohr radius we have
\begin{equation}
......
Many control strategies for quantum systems require to be able to efficiently compute the phenomenological outcome for a Hamiltonian.
However, solving the problem for exact Hamiltonians is usually far to expensive and approximation are needed.
This project investigated one such case, the approximation of cold atom dynamics using effective Heisenberg spin-models to probe the boundary of the domains of applicability.
Many control strategies for quantum systems require the ability to efficiently compute the phenomenological outcome for a Hamiltonian.
However the full Hubbard model Hamiltonian is thought to be to expensive to be used for control applications.
Numerical methods implemented where used to simulate the evolution of various initialisations by both the Hubbard and Heisenberg under various parameters,
and there differences where analysed.
Luckily for the systems used in the development of spin control methods, the Heisenberg can be used as an approximate.
The results broadly suggest that the approximate model captures the essential dynamics of the system in the parameter regime where the approximation is applicable.
However, more work is necessary to establish if the approximate models are sufficient for model-based robust control design.
The simulation framework create enabled this.
We have implemented both these models carried out a basic exploration of the domain of validity using.
The was achieved by developing a new framework designed to be simple, modular and extensible.
The framework was also used to test the numerical stability of the Padé approximation for computing the matrix exponential.
The early signs of the deviation of the effective Heisenberg Moodle from the full Hubbard model are found in the experimental regime.
These deviations where found to take the form of a frequency shift in the first 3 teams of the Fourier decomposition.
Such errors compound over time, and there for the effective Heisenberg model should not be in applications that are require accrued characterisation models over extended time.
However it was found that for a small number of atoms the full Hubbard model is tractable, if it is computed for low excitation subspaces.
It is also suggested that for intermediate chain lengths, or for applications where low computation time is required,
the Heisenberg model may be modified to extend it domain of applicability.
To this end, numerical and theoretical work is desirable.
The exploratory work produced results that agreed with theory, and also results that wallet not theoretically forbidden,
require furtherer explanation.
Quantitatively We find that the Hubbard Hamiltonian picks up a significant frequency shift for $J/U \approx 0.05$.
This work servers has direct particle implications and serves as an extent foundation for future work.
The work suggests that quantum control spintronics experiments using a small number of atoms
can and should use the Hubbard model, as we have shown non-Heisenberg phenomena emerge
within experimental parameter ranges.
Possibility's for future work that would benefit from the framework here developed include:
A more extensive study of on-sight potentials to find the real constraints needed for Quantum control.
A study into the effect initialisation errors such as double occupation or more likely, missing atoms,
with the aim of developing diagnostic tools, and copping stratagems.
An attempt to extend the Heisenberg Hamiltonian's domain of applicability.
Future work will systematically compare the effectiveness and robustness of optimal control solutions based on approximate models in the context of the more accurate spin-Hubbard model.
If significant differences are observed, the Hubbard model simulator can be integrated with the existing optimal control framework to find more effective and robust solutions for the energy landscape.
Limits of the optical potentials that can be generated using the lasers and digital mirror arrays will also have to be considered.
This could be done by integrating a simulator for the optical potential with the Hubbard model dynamics simulator.
Alternatively, if energy-landscapes designed using model-based optimization prove to be ineffective for various reasons,
then adaptive remote control could be explored, which would replace simulation runs with actual experiments.
As observed earlyer, for both the Hubard and Hisenberg Hamiltonions defined over $L$ modes,
the total mode ocupation number $N$ is conserved, allowing us to seperate the subspaces by $N$.
As observed earlier, for both the Hubbard and Heisenberg Hamiltonians defined over $L$ modes,
the total mode occupation number $N$ is conserved, allowing us to separate the subspaces by $N$.
We want to find the dimension of the $N$ subspace
for $L$ modes and a maximum excitation number per mode $M$.
......@@ -10,14 +10,14 @@ as a series of ones and comers:
$$
"1,,,111,,1,"
$$
each inter comer position (including the ends) is interpreted as a mode
each inter-comer position (including the ends) is interpreted as a mode
and each one is interpreted as an atom in that mode.
This encodes that atoms in a single mode are identical
but atoms on different modes are not.