%% LyX 2.3.7 created this file. For more info, see http://www.lyx.org/.
%% Do not edit unless you really know what you are doing.
\documentclass[english]{article}
\usepackage[latin9]{inputenc}
\usepackage{geometry}
\geometry{verbose,tmargin=2cm,bmargin=2cm,lmargin=3cm,rmargin=3cm}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\makeatletter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Textclass specific LaTeX commands.
\theoremstyle{definition}
\ifx\thechapter\undefined
\newtheorem{xca}{\protect\exercisename}
\else
\newtheorem{xca}{\protect\exercisename}[chapter]
\fi
\makeatother
\usepackage{babel}
\providecommand{\exercisename}{Exercise}
\begin{document}
\title{Exercise for Optimal control -- Week 1\date{}}
\author{Choose two problems to solve.}
\maketitle
\begin{xca}[Fundamental lemma of CoV]
Let $f$ be a real valued function defined on open interval $(a,b)$
and $f$ satisfies
\[
\int_{a}^{b}f(x)h(x){\rm d}x=0
\]
for all $h\in C_{c}(a,b)$, i.e., $h$ is continuous on $(a,b)$ and
its support, i.e., the closure of
\[
\{x:h(x)\ne0\}
\]
is contained in $(a,b)$.
1) Show that $f$ is identically zero if $f$ is continuous. If $f$
is only piecewise continuous, then $f$ has only finite non-zero values.
\emph{Hint: consider for example that $f(x_{1})>0$, since $f$ is
continuous, $f$ should be positive near $x_{1}$, say $(x_{1}-\delta,x_{1}+\delta)$.
Next, construct a non-negative continuous function $h\in C_{c}(x_{1}-\epsilon,x_{1}+\epsilon)$
which has some positive values on this interval. You'll then arrive
at a contradiction since $\int_{a}^{b}fh{\rm d}x>0$}.
2) Extend to multivariate case: i.e., if $f$ is continuous on an
open set $\Omega$ and
\[
\int_{\Omega}f(x)h(x){\rm d}x=0
\]
for all $h\in C_{c}(\Omega)$, then $f\equiv0$ for all $x\in\Omega$.
\emph{Hint: as before, if $f(x_{1})>0$ at some $x_{1}$, then there
exists a neighborhood of $x_{1}$ on which $f$ is positive. Construct
a non-negative function which vanishes outside this neighborhood.
And you get a contradiction.}
\end{xca}
%
\begin{xca}[Naive derivation of the 1st variations]
Derive the first order variations of the optimal control problem
\begin{align*}
\dot{x}=f(x,u)\\
\min_{u}J(u(\cdot)) & :=\varphi(x(T))+\int_{0}^{T}L(x,u){\rm d}t
\end{align*}
using variation/perturbation
\[
u_{\epsilon}(t)=u_{*}(t)+\epsilon v(t)
\]
for an arbitrary $v(\cdot)$. Assume $f,L$ and $\varphi$ are $C^{2}$
and the initial state is fixed.\emph{ }Compare with the computation
using $\delta$ operator. \emph{Hint: write $x_{\epsilon}(t)$ as
the solution to the following integral equation $\dot{x}_{\epsilon}(t)=f(x_{\epsilon},u_{\epsilon})$.
}Define $H(x,u,p)=p^{\top}f(x,u)-L(x,u)$ (the Hamiltonian!), and
fix a $C^{1}$ curve $t\mapsto p(t)$, then
\begin{align*}
J(u_{\epsilon}) & =\varphi(x_{\epsilon}(T))+\int_{0}^{T}L(x_{\epsilon},u_{\epsilon}){\rm d}t\\
& =\varphi(x_{\epsilon}(T))+\int_{0}^{T}p^{\top}(t)\dot{x}_{\epsilon}-H(x_{\epsilon},u_{\epsilon},p){\rm d}t
\end{align*}
Apply integration by parts to get rid of $\dot{x}_{\epsilon}$. Then
compute $\frac{\partial J}{\partial\epsilon}|_{\epsilon=0}$.
\end{xca}
%
\begin{xca}[Dido's problem]
Formulate Dido's problem as optimal control with only finite dimensional
constraints. \emph{Hint: define a new variable $\eta(t):=\int_{0}^{t}|\gamma'(s)|{\rm d}s$
and let $\gamma'(t):=u(t)$. }
\end{xca}
%
\begin{xca}[Minimum surface problem]
Let $D$ be an open set on the plane $\mathbb{R}^{2}$. Consider
a surface $D\ni(x,y)\mapsto(x,y,u(x,y))\in\mathbb{R}^{3}$. The area
of the graph of this surface can be calculated as
\[
\mathcal{A}(u)=\int_{D}\sqrt{1+u_{x}^{2}+u_{y}^{2}}{\rm d}x{\rm d}y
\]
where $u_{x}$, $u_{y}$ stands for partial derivatives, with boundary
condition $u|_{\partial D}=g$ for some function $g$ on $\partial D$.
Consider the problem of minimizing $\mathcal{A}(\cdot)$. Derive the
Euler-Lagragian equation of this problem. \emph{Hint: let $v$ be
an arbitrary function whose support lies in $D$. Assume $u_{*}$
is a minimizer, and consider the variation
\[
u_{\epsilon}=u+\epsilon v.
\]
Calculate $\frac{\partial\mathcal{A}(u_{\epsilon})}{\partial\epsilon}|_{\epsilon=0}$
to get
\[
\int_{D}\frac{u_{x}v_{x}+u_{y}v_{y}}{\sqrt{1+u_{x}^{2}+u_{y}^{2}}}{\rm d}x{\rm d}y=0
\]
They apply integration by parts formula for $\varphi\in C_{c}^{\infty}(D)$
(no boundary terms )
\[
\int_{D}v_{x}\varphi{\rm d}x{\rm d}y=-\int_{D}v\varphi_{x}{\rm d}x{\rm d}y.
\]
After eliminating $v_{x}$, $v_{y}$, apply Exercise 1.}
\end{xca}
%
\begin{xca}[Boundary value problem]
Consider the following boundary value problem
\begin{equation}
\ddot{q}+f(q,\dot{q})=0\label{eq:bvp}
\end{equation}
where $q\in\mathbb{R}^{n}$ with boundary condition
\begin{equation}
q(0)=a,\quad q(1)=b.\label{eq:bd}
\end{equation}
Two classical methods exist for numerically solving the BVP: 1) Finite
difference method; 2) Shooting.
1) The first order derivative may be discretized as
\[
\dot{q}(t_{i})\approx\frac{q(t_{i+1})-q(t_{i})}{t_{i+1}-t_{i}}
\]
In particular, for fixed step size $h=t_{i+1}-t_{i}$,
\[
\dot{q}_{i}=\frac{q_{i+1}-q_{i}}{h}.
\]
Propose a discretization scheme for second order derivative and write
the discretized version of (\ref{eq:bvp}), (\ref{eq:bd}) in matrix
form. Use this to find the geodesic on the ellipsoid
\[
x^{2}+y^{2}+\frac{z^{2}}{4}=1.
\]
Try different boundary values and observe the non-uniqueness geodesic
phenomenon.
2) The shooting method works like this. Write the second order ODE
as a first order one by defining $x_{1}=q$, $x_{2}=\dot{q}$
\[
\begin{bmatrix}\dot{x}_{1}\\
\dot{x}_{2}
\end{bmatrix}=\begin{bmatrix}x_{2}\\
-f(x_{1},x_{2})
\end{bmatrix}
\]
Then we know $x_{1}(0)=a$, $x_{1}(1)=b$. Suppose that $x_{2}(0)=\lambda$.
Then with the initial condition $x(0)=(a,\lambda)$, the value $x_{1}(1)$
should be uniquely determined, which is a function of $\lambda$,
denoted as $x_{1}(1,\lambda)$. Let
\[
F(\lambda):=x_{1}(1,\lambda)-b.
\]
The idea is to alter $\lambda$ so that $F(\lambda)$ converges to
$0$. This is equivalent to finding the root of $F$ (or $|F(\cdot)|^{2}$).
Solve the problem in 1) using shooting method. You may use gradient
descent or Newton's method. Is the scheme stable? Or, when does the
algorithm converge?
\end{xca}
%
\begin{xca}[Hamiltonian equation]
Recall the Hamiltonian equation:
\begin{align*}
\dot{q} & =\frac{\partial H}{\partial p}(q,p)\\
\dot{p} & =-\frac{\partial H}{\partial q}(q,p)
\end{align*}
where $q,p\in\mathbb{R}^{n}$. Let $\phi_{t}$ be the flow of the
system. That is, $(q(t),p(t))=\phi_{t}(q_{0},p_{0})$ for the initial
condition $(q_{0},p_{0})$.
1) For functions $H_{1},H_{2}:\mathbb{R}^{n}\times\mathbb{R}^{n}\to\mathbb{R}$,
define the Poisson bracket between $H_{1}$ and $H_{2}$ as
\[
\{H_{1},H_{2}\}=\sum_{i=1}^{n}\frac{\partial H_{1}}{\partial q_{i}}\frac{\partial H_{2}}{\partial p_{i}}-\frac{\partial H_{1}}{\partial p_{i}}\frac{\partial H_{2}}{\partial q_{i}}.
\]
Show that for any real function $f$ on $\mathbb{R}^{n}\times\mathbb{R}^{n}$,
$f$ satisfies the ordinary differential equation along the Hamiltonian
system:
\[
\dot{f}=\{f,H\}.
\]
2) Given a bounded set $U\subseteq\mathbb{R}^{n}\times\mathbb{R}^{n}$,
define
\[
U_{t}=\phi_{t}(U)
\]
Show that the volume of $U_{t}$ is constant. \emph{Hint: use the
transport equation: consider a system $\dot{x}=f(x)$, and let $\phi_{t}:\mathbb{R}^{n}\to\mathbb{R}^{n}$
be its flow, then for any bounded set $D\subseteq\mathbb{R}^{n}$,
}
\[
\frac{d}{dt}{\rm vol}(\phi_{t}(D))=\int_{\phi_{t}(D)}{\rm div}f{\rm d}x.
\]
3) Assume that there exists a bounded forward invariant set $D\subseteq\mathbb{R}^{n}\times\mathbb{R}^{n}$
of the Hamiltonian system. Then for any open set $U\subseteq D$,
and any $s>0$, there exists at least one point $x\in U$ which returns
to $U$ after some time $t\ge s$.
4) The Hamiltonian equation has time-dependent version
\begin{align*}
\dot{q} & =\frac{\partial H}{\partial p}(q,p,t)\\
\dot{p} & =-\frac{\partial H}{\partial q}(q,p,t)
\end{align*}
Show that the energy is not preserved.
5) The Hamiltonian equation can be generalized to
\[
\begin{bmatrix}\dot{q}\\
\dot{p}
\end{bmatrix}=\begin{bmatrix}0 & I\\
-I & R(q)
\end{bmatrix}\begin{bmatrix}\frac{\partial H}{\partial q}\\
\frac{\partial H}{\partial p}
\end{bmatrix}+\begin{bmatrix}0\\
G(q)u
\end{bmatrix}
\]
where $u$ is an input and $R$, $G$ are matrices. Assume that $H\ge0$
. Show that the matrix $R(q)$ plays the role of energy damping/injection
-- depending on sign. Show that the system is dissipative if $R$
is semi-positive definite, in the sense that there exists an output
$y$ such that
\[
-\int_{\infty}^{\infty}y^{\top}(t)u(t){\rm d}t\le H(q(0),p(0)).
\]
That is, the energy that can be extracted from the system via the
input output pair $(u,y)$ is less than the total energy of the system.
\end{xca}
\end{document}