\documentclass[10pt]{article}
\usepackage{amsfonts}
\usepackage{graphicx}
\usepackage{epstopdf}
%\usepackage{epsfig}
\usepackage{mathtools}
\begin{document}
%%%%%---------------------
%--------------
%% preamble.tex
%% this should be included with a command like
%% \input{preamble.tex}
%% \lecture{1}{September 4, 1996 }{Daniel A. Spielman}{name
%% of poor scribe}
\hbadness=10000
\vbadness=10000
%\setlength{\oddsidemargin}{.25in}
%\setlength{\evensidemargin}{.25in}
%\setlength{\textwidth}{6in}
%\setlength{\topmargin}{-0.4in}
%\setlength{\textheight}{8.5in}
\newcommand{\handout}[6]{
\renewcommand{\thepage}{#6-\arabic{page}}
\noindent
\begin{center}
\framebox{
\vbox{
\hbox to 5.78in { {\bf 6.440 Essential Coding Theory }
\hfill #2 }
\vspace{4mm}
\hbox to 5.78in { {\Large \hfill #5 \hfill} }
\vspace{2mm}
\hbox to 5.78in { {\it #3 \hfill #4} }
}
}
\end{center}
\vspace*{4mm}
}
\newcommand{\lecture}[4]{\handout{#1}{#2}{Lecturer:
#3}{Scribe: #4}{Lecture #1}}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{observation}[theorem]{Observation}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{fact}[theorem]{Fact}
\newtheorem{assumption}[theorem]{Assumption}
\newcommand{\qed}{\rule{7pt}{7pt}}
\newcommand{\dis}{\mathop{\mbox{\rm d}}\nolimits}
\newcommand{\per}{\mathop{\mbox{\rm per}}\nolimits}
\newcommand{\area}{\mathop{\mbox{\rm area}}\nolimits}
\newcommand{\cw}{\mathop{\rm cw}\nolimits}
\newcommand{\ccw}{\mathop{\rm ccw}\nolimits}
\newcommand{\DIST}{\mathop{\mbox{\rm DIST}}\nolimits}
\newcommand{\OP}{\mathop{\mbox{\it OP}}\nolimits}
\newcommand{\OPprime}{\mathop{\mbox{\it OP}^{\,\prime}}\nolimits}
\newcommand{\ihat}{\hat{\imath}}
\newcommand{\jhat}{\hat{\jmath}}
\newcommand{\abs}[1]{\mathify{\left| #1 \right|}}
\newenvironment{proof}{\noindent{\bf Proof}\hspace*{1em}}{\qed\bigskip}
\newenvironment{proof-sketch}{\noindent{\bf Sketch of Proof}\hspace*{1em}}{\qed\bigskip}
\newenvironment{proof-idea}{\noindent{\bf Proof Idea}\hspace*{1em}}{\qed\bigskip}
\newenvironment{proof-of-lemma}[1]{\noindent{\bf Proof of Lemma #1}\hspace*{1em}}{\qed\bigskip}
\newenvironment{proof-attempt}{\noindent{\bf Proof Attempt}\hspace*{1em}}{\qed\bigskip}
\newenvironment{proofof}[1]{\noindent{\bf Proof}
of #1:\hspace*{1em}}{\qed\bigskip}
\newenvironment{remark}{\noindent{\bf Remark}\hspace*{1em}}{\bigskip}
% \makeatletter
% \@addtoreset{figure}{section}
% \@addtoreset{table}{section}
% \@addtoreset{equation}{section}
% \makeatother
\newcommand{\FOR}{{\bf for}}
\newcommand{\TO}{{\bf to}}
\newcommand{\DO}{{\bf do}}
\newcommand{\WHILE}{{\bf while}}
\newcommand{\AND}{{\bf and}}
\newcommand{\IF}{{\bf if}}
\newcommand{\THEN}{{\bf then}}
\newcommand{\ELSE}{{\bf else}}
% \renewcommand{\thefigure}{\thesection.\arabic{figure}}
% \renewcommand{\thetable}{\thesection.\arabic{table}}
% \renewcommand{\theequation}{\thesection.\arabic{equation}}
\makeatletter
\def\fnum@figure{{\bf Figure \thefigure}}
\def\fnum@table{{\bf Table \thetable}}
\long\def\@mycaption#1[#2]#3{\addcontentsline{\csname
ext@#1\endcsname}{#1}{\protect\numberline{\csname
the#1\endcsname}{\ignorespaces #2}}\par
\begingroup
\@parboxrestore
\small
\@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par
\endgroup}
\def\mycaption{\refstepcounter\@captype \@dblarg{\@mycaption\@captype}}
\makeatother
\newcommand{\figcaption}[1]{\mycaption[]{#1}}
\newcommand{\tabcaption}[1]{\mycaption[]{#1}}
\newcommand{\head}[1]{\chapter[Lecture \##1]{}}
\newcommand{\mathify}[1]{\ifmmode{#1}\else\mbox{$#1$}\fi}
%\renewcommand{\Pr}[1]{\mathify{\mbox{Pr}\left[#1\right]}}
%\newcommand{\Exp}[1]{\mathify{\mbox{Exp}\left[#1\right]}}
\newcommand{\bigO}O
\newcommand{\set}[1]{\mathify{\left\{ #1 \right\}}}
\def\half{\frac{1}{2}}
% Coding theory addenda
\newcommand{\enc}{{\sf Enc}}
\newcommand{\dec}{{\sf Dec}}
\newcommand{\E}{{\rm Exp}}
\newcommand{\Var}{{\rm Var}}
\newcommand{\Z}{{\mathbb Z}}
\newcommand{\F}{{\mathbb F}}
\newcommand{\integers}{{\mathbb Z}^{\geq 0}}
\newcommand{\R}{{\mathbb R}}
\newcommand{\Q}{{\cal Q}}
\newcommand{\eqdef}{{\stackrel{\rm def}{=}}}
\newcommand{\from}{{\leftarrow}}
\newcommand{\vol}{{\rm Vol}}
\newcommand{\poly}{{\rm poly}}
\newcommand{\ip}[1]{{\langle #1 \rangle}}
\newcommand{\wt}{{\rm wt}}
\renewcommand{\vec}[1]{{\mathbf #1}}
\newcommand{\mspan}{{\rm span}}
\newcommand{\rs}{{\rm RS}}
\newcommand{\RM}{{\rm RM}}
\newcommand{\Had}{{\rm Had}}
\newcommand{\calc}{{\cal C}}
%\newcommand{\binom}[2]{{#1 \choose #2}}
\newcommand{\fig}[4]{
\begin{figure}
\setlength{\epsfysize}{#2}
\vspace{3mm}
\centerline{\epsfbox{#4}}
\caption{#3} \label{#1}
\end{figure}
}
\newcommand{\ord}{{\rm ord}}
\providecommand{\norm}[1]{\lVert #1 \rVert}
\newcommand{\embed}{{\rm Embed}}
\newcommand{\qembed}{\mbox{$q$-Embed}}
\newcommand{\calh}{{\cal H}}
\newcommand{\lp}{{\rm LP}}
%%%%%---------------------
\lecture{19 - April 22, 2013}{Spring 2013}{Madhu Sudan}{Mohsen Ghaffari}{19}
\section{Announcement}
\textbf{Sign up for the project presentation slots.} Related Information are available on the website of the course. The presentation dates are Tuesday 5/7 and Thursday 5/9, 10 am to 1 pm. Each team will have a 20 minutes slot for presentation.
\section{Overview: Codes From Other Codes}
So far in the course, we have seen three types of codes, summarized as follows:
\begin{enumerate}
\item \textbf{Algebraic Codes}
\begin{itemize}
\item Work for worst-case errors.
\item Have polynomial time encoding and decoding algorithms
\end{itemize}
\item \textbf{LDPCs}
\begin{itemize}
\item Work for worst-case errors as well, but a less many errors (smaller distance) compared to the Algebraic Codes.
\item Have linear time encoding and decoding algorithms and are simpler.
\end{itemize}
\item \textbf{Polar Codes}
\begin{itemize}
\item Work for random errors. when viewed in the context of worst-case errors, these codes do not have good distance.
\item Have $O(n\log n)$ encoding and decoding algorithms. Note that this bounds sits somewhere in between the time complexities of the Algebraic Codes and LDPCs.
\end{itemize}
\end{enumerate}
In today's lecture, we will look into the question of ``how can we combine codes to get new codes?''. In this regard, we will consider five types of operations:
\begin{itemize}
\item Puncturing
\item Restriction
\item Tensor
\item Concatenation
\item A graph-theoretic method
\end{itemize}
Puncturing and restriction are simple and basic operations, which we will review quickly, and we have already seen concatenation in the past lectures (refer to Lecture 7). The focus in this lecture will be on the \emph{Tensor method}, presented in Section \ref{sec:tensor} and the \emph{graph-theoretical method}, presented in \ref{sec:graph}.
\section{Operations on Codes}
\subsection{Puncturing \& Restriction (simple)}
To explain puncturing and restriction, consider an $(n, k, d)_{\Sigma}$ code $C$ and fix a coordinate $i \in \{1, 2, \dots, n\}$.
\paragraph{Puncturing} In the puncturing operation, for each codeword $c_j \in C$, we eliminate the $i^{th}$ coordinate of $c_j$. It is easy to see that this operation produces a $(n-1, k, d-1)_{\Sigma}$ code.
\paragraph{Restriction} Consider the $i^{th}$ coordinate of all the codewords in $C$ and let $a \in \Sigma$ be the symbol that appears in the $i^{th}$ coordinate with the highest frequency. Let $C'$ be the set of all the codewords $c_j\in C$ such that the $i^{th}$ coordinate of $c_j$ is equal to $a$. In the restriction operation, the new code $C_{new}$ is derived from $C'$ by eliminating the $i^{th}$ coordinate of each codeword $c_j \in C'$. It is easy to see that $C_{new}$ is a $(n-1, k-1, d)_{\Sigma}$ code. In particular, to see that $C_{new}$ has dimension $k-1$, note that by definition of $a$, the number of codewords in $C'$ (and thus in $C_{new}$) is at least $|\Sigma|^k/\Sigma = |\Sigma|^{k-1}$.
\subsection{Tensor (new)}\label{sec:tensor}
We define the tensor operation for linear codes. For two linear codes $C_1=[n_1,k_1,d_1]_{\Sigma}$, $C_2=[n_2,k_2,d_2]_{\Sigma}$, the tensor code of $C_1$ and $C_2$ is denoted by $C_1 \otimes C_2$. We view the codewords of $C_1 \otimes C_2$ as a $n_1 \times n_2$ matrix \footnote{Note that this can be equivalently viewed as a vector of length $n_1 n_2$.}. A given $n_1 \times n_2$ matrix $A$ with entries in $\Sigma$ is a codeword of code $C_1 \otimes C_2$ iff each column $A$ is a codeword of $C_1$ and each row of $A$ is a codeword of $C_2$.
\begin{eqnarray}
A_{n_1,n_2} =
\begin{bmatrix}
a_{1,1} & a_{1,2} & \cdots & a_{1,n_2} \\
a_{2,1} & a_{2,2} & \cdots & a_{2,n_2} \\
\vdots & \vdots & \ddots & \vdots \\
a_{n_1,1} & a_{n_1,2} & \cdots & a_{n_1,n_2} \nonumber
\end{bmatrix}
\end{eqnarray}
\begin{theorem} $C_1\otimes C_2$ is a $[n_1n_2,\, k_1k_2,\, d_1d_2 ]_{\Sigma}$ code.
\end{theorem}
\begin{proof}First of all, it is easy to see that $C_1\otimes C_2$ is a linear code. Intuitively, this is because $C_1 \otimes C_2$ can be described as a collection of linear constraints on the entries of the matrix. To formally prove the linearity, one can see that the summation of two matrices (codewords) in $C_1 \otimes C_2$ is in $C_1 \otimes C_2$, and also, that scaling up a matrix in $C_1 \otimes C_2$ by an element of $\Sigma$ is in $C_1 \otimes C_2$, as well.
We next verify that $C_1\otimes C_2$ has distance $d_1 d_2$. First consider a codeword $c_1$ of $C_1$ that has only $d_1$ non-zero elements, and a codeword $c_2$ of $C_2$ that has only $d_2$ non-zero elements. Then, consider the matrix $A$ where the entry of $A$ in the intersection of the $i^{th}$ row and the $j^{th}$ column, i.e., $a_{i,j}$, is equal to the product of the $i^{th}$ element of $c_1$ and the $j^{th}$ element of $c_2$. Note that $A \in C_1\otimes C_2$ because each column of $A$ is a multiple of $c_1$ and is thus a codeword of $C_1$, and each row of $A$ is a multiple of $c_2$ and is thus a codeword of $C_2$. Now the number of nonzero elements of $A$ is exactly $d_1 d_2$. This shows that the distance of $C_1\otimes C_2$ is at most $d_1 d_2$. To see that this distance is also at least $d_1d_2$, consider an arbitrary non-zero matrix $B$ in $C_1 \otimes C_2$ and pick a nonzero entry of $B$; let it be $b_{i,j}$. Since the $i^{th}$ row of $B$ is a non-zero codeword of $C_2$, this row has at least $d_2$ nonzero elements. Consider the $d_2$ columns of $B$ that intersect the $i^{th}$ row of $B$ in these nonzero elements (at least $d_2$ columns). Each of these columns is z nonzero codeword of $C_1$ and thus, has at least $d_1$ nonzero elements. Therefore, matrix $B$ in total has at least $d_1d_2$ nonzero entries. Hence, we conclude that the distance of code $C_1 \otimes C_2$ is $d_1d_2$.
Finally, we verify that $C_1 \otimes C_2$ has dimension $k_1 k_2$. Before going there, note that just using the method described at the start of the previous paragraph, we can find $|\Sigma|^{k_1} \cdot |\Sigma|^{k_2} = |\Sigma|^{k_1+k_2}$ matrices in $C_1\otimes C_2$, which already shows that the dimension of $C_1 \otimes C_2$ is at least $k_1 + k_2$. To prove that the dimension is $k_1 k_2$, we show a method to produce $|\Sigma|^{k_1 k_2}$ matrices in $C_1 \otimes C_2$. Note that we can describe code $C_1$ by an $n_1 \times k_1$ matrix $G_1$ such that for each $k_1 \times 1$ vector $x$ with entries from $\Sigma$, $G_1 x$ is a codeword of $C_1$. Similarly, we can describe code $C_2$ by an $n_2 \times k_2$ matrix $G_2$ such that for each $k_2 \times 1$ vector $y$ with entries from $\Sigma$, $y^{T} G_2^{T}$ is a codeword of $C_2$. Now, for each $k_1 \times k_2$ matrix $A$ with entries from $\Sigma$, $G_1 A G_2^{T}$ is a codeword of $C_1 \otimes C_2$.
\begin{eqnarray}
\begin{bmatrix}
\; & \; & \; \\
\; & \; & \; \\
\; & \; & \; \\
\; & G_1 & \; \\
\; & \; & \; \\
\; & \; & \; \\
\; & \; & \;
\end{bmatrix}_{n_1 \times k_1}
\begin{bmatrix}
\; & \; & \; \\
\; & A & \; \\
\; & \; & \;
\end{bmatrix}_{k_1 \times k_2}
\begin{bmatrix}
\; & \; & \; & \; & \; & \; & \; \\
\; & \; & \; & G_2^{T} & \; & \; & \; \\
\; & \; & \; & \; & \; & \; & \; \nonumber
\end{bmatrix}_{k_2 \times n_2} \in C_1 \otimes C_2
\end{eqnarray}
Moreover, each matrix $A$ produces a distinct matrix $G_1 A G_2^{T}$ of $C_1 \otimes C_2$. Hence, we get $|\Sigma|^{k_1 k_2}$ matrices which shows that $C_1 \otimes C_2$ has dimension $k_1 k_2$.
\end{proof}
\subsection{Concatenation, and Recap}
In the past lectures, we defined concatenation. As a short summary, the operations that we have studied so far can be summarized as follows:
\begin{itemize}
\item Puncturing: $(n,k,d)_{\mathbb{F}_q} \rightarrow (n-1,k,d-1)_{\mathbb{F}_q}$
\item Restriction: $(n,k,d)_{\mathbb{F}_q} \rightarrow (n-1,k-1,d)_{\mathbb{F}_q}$
\item Tensor: $[n_1,k_1,d_1]_{\mathbb{F}_q} \otimes [n_2,k_2,d_2]_{\mathbb{F}_q} \rightarrow [n_1n_2,k_1k_2,d_1d_2]_{\mathbb{F}_q}$
\item Concatenation: $\{n_1,k_1,d_1\}_{\mathbb{F}^{k_2}_q} \circ [n_2,k_2,d_2]_{\mathbb{F}_q} \rightarrow [n_1n_2,k_1k_2,d_1d_2]_{\mathbb{F}_q}$
\end{itemize}
In the final bullet-point, $\{n,k,d\}_{\mathbb{F}^{k'}_q}$ denotes an \emph{Additive Code}, which can be viewed as a weaker variant of linear code, and is defined as follows:
\begin{definition} (\textbf{Additive Code}) $C \subseteq \mathbb{F}^{k'}_q$ is a $\{n,k,d\}_{\mathbb{F}^{k'}_q}$ code iff (1) for each codeword $(x_1, \dots, x_n) \in C$ and each $\alpha \in \mathbb{F}_{q}$, we have $(\alpha x_1 , \dots, \alpha x_n) \in C$, and (2) for each two codewords $(x_1, \dots, x_n)$ and $(y_1, \dots, y_n)$ in $C$, we have $(x_1+y_1, \dots, x_n+y_n) \in C$.
\end{definition}
Note that each of the above operations loses in `performance' (e.g., when measured by rate plus distance). Also, amongst them, only concatenation has been useful for us thus far. In the next section, we present a graph-theoretic method of deriving new codes from given codes which actually leads to improvements in `performance'.
\subsection{The Graph-Theoretic Method}\label{sec:graph}
The method that we explain in this section was first presented by Alon, Bruck, Naor, Naor and Roth in 1992~\cite{ABNNR}, and later extended by Alon, Edmonds, and Luby in 1995 \cite{AEL}. These two papers focus on the construction of these two codes and will be the focus point of the rest of this lecture. Guruswami and Indyk~\cite{GI} studied the time complexity of the decoding of codes constructed by this method.
We start with presenting the idea of the ABNNR work~\cite{ABNNR}, which takes a `good' code over alphabet $\mathbb{F}_2$ and builds an `excellent' code over $\mathbb{F}^d_2$. Consider a bipartite $d$-regular graph $H$ with $n$ nodes on each side that is a $(\gamma, \delta)$-expander \footnote{Recall from the previous lectures that $H$ is a $(\gamma, \delta)$-expander if for each set $S$ of nodes on the left such that $|S| \leq \delta n$, we have $|\Gamma(S)|\geq \delta |S|$, where $\Gamma(S)$ denotes the set of nodes on the right side that have at least one neighbor in $S$.}. Let $C \in \mathbb{F}_2^n$ be a code with distance at least $\delta' n$. For each codeword $c \in C$, we get a codeword $c' \in C_{new}$, as follows: label the $n$ nodes on the left side of $H$ with the bits of the codeword $c$. Then, for each node $v$ on the right side of $H$, label $v$ with the binary string of length $d$ that is made of the labels of the $d$ neighbors of $v$ (ordered consistent with the ordering of nodes of the left side). The codeword $c' \in C_{new}$ consists of the labels of the nodes of the right side, which are $n$ symbols in $\mathbb{F}_{2}^d$. See Figure \ref{fig:ExpanderCode} for an illustration.
\begin{figure}[t]
\centering
\includegraphics[width=9cm]{ExpanderCode.eps}
\label{fig:ExpanderCode}
\caption{Creating a codeword $c' \in C_{new}$ from codeword $c\in C$}
\end{figure}
\begin{theorem} If $C$ is a $[n,k, \delta'n]_2$ code and $H$ is a bipartite $d$-regular $(\gamma, \delta)$-expander graph with $n$ nodes on each side such that $\delta \geq \delta'$, then the new code $C_{new}$ derived as above is a $\{n, \frac{k}{d}, \gamma\delta'n\}_{2^{d}}$ code.
\end{theorem}
\begin{proof}It is easy to check that $C_{new}$ is an additive code. For the dimension, note that $C_{new}$ has $2^k$ codewords, one for each codeword of $C$. These codewords correspond to $2^k=(2^{d})^{\frac{k}{d}}$ messages in alphabet $\mathbb{F}_{2}^{d}$, which can be each expressed via $\frac{k}{d}$ symbols in this alphabet. Thus the dimension of the new code is $\frac{k}{d}$.
For the distance, note that each nonzero codeword $c$ of $C$ produces a codeword of $C_{new}$ with at least $\gamma \delta'$ nonzero symbols\footnote{Here, the latter nonzero is with respect to $F_{2}^{d}$ and means an element of $\mathbb{F}_2^{d}$ that is not equal to $\underbrace{(0 , 0, \dots, 0)}_{d \; times}$.}. This is because, let $S$ be a set of $\delta'$ nonzero elements of $c$ (exists because $C$ has distance $\delta'n$). Then the labels of all nodes in $\Gamma(S)$ are non-zero, and we know that since $|S| = \delta' n \leq \delta n$ and $H$ is a $(\gamma, \delta)$-expander, $|\Gamma(S)| \geq \gamma \delta'$.
\end{proof}
If we choose a `good' expander $H$, then $\gamma$ would be close to $d$ which means that in $C_{new}$, we have increased the distance by (almost) a factor of $d$, when compared to $C$. This however comes at the cost of decreasing the rate by a factor of $d$ and increasing the alphabet size exponentially to $2^d$.
Alon, Edmonds, Luby \cite{AEL} presented a new interpretation of this method. In this interpretation, we combine two codes to get one new code. We explain later that putting the ABNNR work in this framework, one of the initial codes is simply a repetition code. First we present a definition:
\begin{definition}$((d, \epsilon)$-\textbf{Regular Bipartite Graph}): Consider a $d$-regular bipartite graph $H$ with $n$ nodes on each side. $H$ is called a $(d, \epsilon)$-{Regular Bipartite Graph} if for each set $X$ of the left nodes of $H$ and each set $Y$ of the right nodes of $H$, we have $$|E_{H}(X, Y)| \geq (\frac{|X|}{n}\frac{|Y|}{n} - \epsilon) \cdot d n$$, where $E_{H}(X, Y)$ denotes the edges of $H$ with one endpoint in $X$ and the other in $Y$.\footnote{Note that on a random $d$-regular bipartite graph $H'$, $\mathbb{E}[|E_{H'}(X, Y)|] = (\frac{|X|}{n}\frac{|Y|}{n}) \cdot d n$.}
\end{definition}
We define the new code $C_{new}$ using a given a $(d, \epsilon)$-regular bipartite graph $H$, a given additive code $C=\{n,k, \delta'n\}_{\mathbb{F}_{2}^{R_1 d}}$ and a given linear code $C_{small}=[d, R_1 d, \delta_1 d]_{\mathbb{F}_2}$. For each codeword $c \in C$, we define a codeword $c' \in C_{new}$, as follows: label the $n$ nodes on the left side of $H$ with the $n$ symbols of the codeword $c$. Then, encode each of these symbols using $C_{small}$. Thus, now, each node on the left is labeled with a $d$-bit vector. Each left-side node sends the $d$-bits of its labels to its $d$ neighbors on the right side of graph $H$, one bit to each and with the order consistent with the ordering of the bits of the label and the ordering of the right-side nodes. Each node on the right-side receives exactly $d$ bits, one from each of its neighbors. Putting these bits in a $d$-bit vector (ordered consistent with the ordering of the left-side nodes) gives the label of each right-side node. The $n$ labels of the right-side nodes constitute the codeword $c'$ in $C_{new}\subseteq\mathbb{F}_2^{d}$. See Figure \ref{fig:ExpanderCodeAEL} for an illustration.
\begin{figure}[t]
\centering
\includegraphics[width=12cm]{ExpanderCodeAEL.eps}
\label{fig:ExpanderCodeAEL}
\caption{New way of creating a codeword $c' \in C_{new}$ from codeword $c\in C$}
\end{figure}
\begin{theorem}\label{thm:AEL} If $C$ is a $\{n,k, \delta'n\}_{\mathbb{F}_{2}^{R_1 d}}$ code, $C_{small}$ is a $[d, R_1 d, \delta_1 d]_{\mathbb{F}_2}$ code,
and $H$ is a $(d, \epsilon)$-regular bipartite graph, then the new code $C_{new}$ derived as above is a $\{n, R_1 k, (\delta_1 - \epsilon/\delta')\}_{\mathbb{F}_{2}^{d}}$ code.
\end{theorem}
Note that this theorem shows that the new code will have rate and relative distance almost equal to those of the code $C_{small}$. In the case of ABNNR work, $C_{small}$ was simply a repetition code, i.e., we had $R_1d=1$ and $C_{small}=[d,1,d]_2$.
\bigskip
We will see the proof of Theorem \ref{thm:AEL} in the next lecture. We will also study some algorithms for decoding the codes constructed via this method.
\begin{thebibliography}{9}
\bibitem{GI}
V. Guruswami, and P. Indyk, \emph{``Expander-Based Constructions of Efficiently Decodable Codes,"} In the proceedings of 42nd Annual Symposium on Foundations of Computer Science (FOCS'01), IEEE Computer Society, Las Vegas, Nevada, USA, 658-667.
\bibitem{ABNNR}
N. Alon, N., J. Bruck, J. Naor, M. Naor, and R.M. Roth, \emph{``Construction of asymptotically good low-rate error-correcting codes through pseudo-random graphs,"} Information Theory, IEEE Transactions on , vol.38, no.2, pp.509-516, Mar 1992.
\bibitem{AEL} N. Alon, J. Edmonds, and M. Luby, \emph{``Linear time erasure codes with nearly optimal recovery,"} In Proceedings of the 36th Annual Symposium on Foundations of Computer Science (FOCS'95). IEEE Computer Society, Washington, DC, USA, 512-520.
\end{thebibliography}
\end{document}