1583 lines
51 KiB
TeX
1583 lines
51 KiB
TeX
% Created 2023-12-11 Mon 19:22
|
|
% Intended LaTeX compiler: pdflatex
|
|
\documentclass[11pt]{article}
|
|
\usepackage[utf8]{inputenc}
|
|
\usepackage[T1]{fontenc}
|
|
\usepackage{graphicx}
|
|
\usepackage{longtable}
|
|
\usepackage{wrapfig}
|
|
\usepackage{rotating}
|
|
\usepackage[normalem]{ulem}
|
|
\usepackage{amsmath}
|
|
\usepackage{amssymb}
|
|
\usepackage{capt-of}
|
|
\usepackage{hyperref}
|
|
\notindent \notag \usepackage{amsmath} \usepackage[a4paper,margin=1in,portrait]{geometry}
|
|
\author{Elizabeth Hunt}
|
|
\date{\today}
|
|
\title{LIZFCM Software Manual (v0.6)}
|
|
\hypersetup{
|
|
pdfauthor={Elizabeth Hunt},
|
|
pdftitle={LIZFCM Software Manual (v0.6)},
|
|
pdfkeywords={},
|
|
pdfsubject={},
|
|
pdfcreator={Emacs 28.2 (Org mode 9.7-pre)},
|
|
pdflang={English}}
|
|
\begin{document}
|
|
|
|
\maketitle
|
|
\tableofcontents
|
|
|
|
\setlength\parindent{0pt}
|
|
|
|
\section{Design}
|
|
\label{sec:org63deaf6}
|
|
The LIZFCM static library (at \url{https://github.com/Simponic/math-4610}) is a successor to my
|
|
attempt at writing codes for the Fundamentals of Computational Mathematics course in Common
|
|
Lisp, but the effort required to meet the requirement of creating a static library became
|
|
too difficult to integrate outside of the \texttt{ASDF} solution that Common Lisp already brings
|
|
to the table.
|
|
|
|
All of the work established in \texttt{deprecated-cl} has been painstakingly translated into
|
|
the C programming language. I have a couple tenets for its design:
|
|
|
|
\begin{itemize}
|
|
\item Implementations of routines should all be done immutably in respect to arguments.
|
|
\item Functional programming is good (it's\ldots{} rough in C though).
|
|
\item Routines are separated into "modules" that follow a form of separation of concerns
|
|
in files, and not individual files per function.
|
|
\end{itemize}
|
|
|
|
\section{Compilation}
|
|
\label{sec:org7291327}
|
|
A provided \texttt{Makefile} is added for convencience. It has been tested on an \texttt{arm}-based M1 machine running
|
|
MacOS as well as \texttt{x86} Arch Linux.
|
|
|
|
\begin{enumerate}
|
|
\item \texttt{cd} into the root of the repo
|
|
\item \texttt{make}
|
|
\end{enumerate}
|
|
|
|
Then, as of homework 5, the testing routines are provided in \texttt{test} and utilize the
|
|
\texttt{utest} "micro"library. They compile to a binary in \texttt{./dist/lizfcm.test}.
|
|
|
|
Execution of the Makefile will perform compilation of individual routines.
|
|
|
|
But, in the requirement of manual intervention (should the little alien workers
|
|
inside the computer fail to do their job), one can use the following command to
|
|
produce an object file:
|
|
|
|
\begin{verbatim}
|
|
gcc -Iinc/ -lm -Wall -c src/<the_routine>.c -o build/<the_routine>.o
|
|
\end{verbatim}
|
|
|
|
Which is then bundled into a static library in \texttt{lib/lizfcm.a} and can be linked
|
|
in the standard method.
|
|
|
|
\section{The LIZFCM API}
|
|
\label{sec:org1ebe7fa}
|
|
\subsection{Simple Routines}
|
|
\label{sec:orgff18c6b}
|
|
\subsubsection{\texttt{smaceps}}
|
|
\label{sec:org443df5e}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{smaceps}
|
|
\item Location: \texttt{src/maceps.c}
|
|
\item Input: none
|
|
\item Output: a \texttt{float} returning the specific "Machine Epsilon" of a machine on a
|
|
single precision floating point number at which it becomes "indistinguishable".
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
float smaceps() {
|
|
float one = 1.0;
|
|
float machine_epsilon = 1.0;
|
|
float one_approx = one + machine_epsilon;
|
|
|
|
while (fabsf(one_approx - one) > 0) {
|
|
machine_epsilon /= 2;
|
|
one_approx = one + machine_epsilon;
|
|
}
|
|
|
|
return machine_epsilon;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{dmaceps}}
|
|
\label{sec:org5121603}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{dmaceps}
|
|
\item Location: \texttt{src/maceps.c}
|
|
\item Input: none
|
|
\item Output: a \texttt{double} returning the specific "Machine Epsilon" of a machine on a
|
|
double precision floating point number at which it becomes "indistinguishable".
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double dmaceps() {
|
|
double one = 1.0;
|
|
double machine_epsilon = 1.0;
|
|
double one_approx = one + machine_epsilon;
|
|
|
|
while (fabs(one_approx - one) > 0) {
|
|
machine_epsilon /= 2;
|
|
one_approx = one + machine_epsilon;
|
|
}
|
|
|
|
return machine_epsilon;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsection{Derivative Routines}
|
|
\label{sec:org6fd324c}
|
|
\subsubsection{\texttt{central\_derivative\_at}}
|
|
\label{sec:orge9f0821}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{central\_derivative\_at}
|
|
\item Location: \texttt{src/approx\_derivative.c}
|
|
\item Input:
|
|
\begin{itemize}
|
|
\item \texttt{f} is a pointer to a one-ary function that takes a double as input and produces
|
|
a double as output
|
|
\item \texttt{a} is the domain value at which we approximate \texttt{f'}
|
|
\item \texttt{h} is the step size
|
|
\end{itemize}
|
|
\item Output: a \texttt{double} of the approximate value of \texttt{f'(a)} via the central difference
|
|
method.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double central_derivative_at(double (*f)(double), double a, double h) {
|
|
assert(h > 0);
|
|
|
|
double x2 = a + h;
|
|
double x1 = a - h;
|
|
|
|
double y2 = f(x2);
|
|
double y1 = f(x1);
|
|
|
|
return (y2 - y1) / (x2 - x1);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{forward\_derivative\_at}}
|
|
\label{sec:org8720f28}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{forward\_derivative\_at}
|
|
\item Location: \texttt{src/approx\_derivative.c}
|
|
\item Input:
|
|
\begin{itemize}
|
|
\item \texttt{f} is a pointer to a one-ary function that takes a double as input and produces
|
|
a double as output
|
|
\item \texttt{a} is the domain value at which we approximate \texttt{f'}
|
|
\item \texttt{h} is the step size
|
|
\end{itemize}
|
|
\item Output: a \texttt{double} of the approximate value of \texttt{f'(a)} via the forward difference
|
|
method.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double forward_derivative_at(double (*f)(double), double a, double h) {
|
|
assert(h > 0);
|
|
|
|
double x2 = a + h;
|
|
double x1 = a;
|
|
|
|
double y2 = f(x2);
|
|
double y1 = f(x1);
|
|
|
|
return (y2 - y1) / (x2 - x1);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{backward\_derivative\_at}}
|
|
\label{sec:org1589b19}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{backward\_derivative\_at}
|
|
\item Location: \texttt{src/approx\_derivative.c}
|
|
\item Input:
|
|
\begin{itemize}
|
|
\item \texttt{f} is a pointer to a one-ary function that takes a double as input and produces
|
|
a double as output
|
|
\item \texttt{a} is the domain value at which we approximate \texttt{f'}
|
|
\item \texttt{h} is the step size
|
|
\end{itemize}
|
|
\item Output: a \texttt{double} of the approximate value of \texttt{f'(a)} via the backward difference
|
|
method.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double backward_derivative_at(double (*f)(double), double a, double h) {
|
|
assert(h > 0);
|
|
|
|
double x2 = a;
|
|
double x1 = a - h;
|
|
|
|
double y2 = f(x2);
|
|
double y1 = f(x1);
|
|
|
|
return (y2 - y1) / (x2 - x1);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsection{Vector Routines}
|
|
\label{sec:org493841e}
|
|
\subsubsection{Vector Arithmetic: \texttt{add\_v, minus\_v}}
|
|
\label{sec:org3912c29}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name(s): \texttt{add\_v}, \texttt{minus\_v}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: two pointers to locations in memory wherein \texttt{Array\_double}'s lie
|
|
\item Output: a pointer to a new \texttt{Array\_double} as the result of addition or subtraction
|
|
of the two input \texttt{Array\_double}'s
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *add_v(Array_double *v1, Array_double *v2) {
|
|
assert(v1->size == v2->size);
|
|
|
|
Array_double *sum = copy_vector(v1);
|
|
for (size_t i = 0; i < v1->size; i++)
|
|
sum->data[i] += v2->data[i];
|
|
return sum;
|
|
}
|
|
|
|
Array_double *minus_v(Array_double *v1, Array_double *v2) {
|
|
assert(v1->size == v2->size);
|
|
|
|
Array_double *sub = InitArrayWithSize(double, v1->size, 0);
|
|
for (size_t i = 0; i < v1->size; i++)
|
|
sub->data[i] = v1->data[i] - v2->data[i];
|
|
return sub;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{Norms: \texttt{l1\_norm}, \texttt{l2\_norm}, \texttt{linf\_norm}}
|
|
\label{sec:orged74cfb}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name(s): \texttt{l1\_norm}, \texttt{l2\_norm}, \texttt{linf\_norm}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to a location in memory wherein an \texttt{Array\_double} lies
|
|
\item Output: a \texttt{double} representing the value of the norm the function applies
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double l1_norm(Array_double *v) {
|
|
double sum = 0;
|
|
for (size_t i = 0; i < v->size; ++i)
|
|
sum += fabs(v->data[i]);
|
|
return sum;
|
|
}
|
|
|
|
double l2_norm(Array_double *v) {
|
|
double norm = 0;
|
|
for (size_t i = 0; i < v->size; ++i)
|
|
norm += v->data[i] * v->data[i];
|
|
return sqrt(norm);
|
|
}
|
|
|
|
double linf_norm(Array_double *v) {
|
|
assert(v->size > 0);
|
|
double max = v->data[0];
|
|
for (size_t i = 0; i < v->size; ++i)
|
|
max = c_max(v->data[i], max);
|
|
return max;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{vector\_distance}}
|
|
\label{sec:org20a5773}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{vector\_distance}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: two pointers to locations in memory wherein \texttt{Array\_double}'s lie, and a pointer to a
|
|
one-ary function \texttt{norm} taking as input a pointer to an \texttt{Array\_double} and returning a double
|
|
representing the norm of that \texttt{Array\_double}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double vector_distance(Array_double *v1, Array_double *v2,
|
|
double (*norm)(Array_double *)) {
|
|
Array_double *minus = minus_v(v1, v2);
|
|
double dist = (*norm)(minus);
|
|
free(minus);
|
|
return dist;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{Distances: \texttt{l1\_distance}, \texttt{l2\_distance}, \texttt{linf\_distance}}
|
|
\label{sec:orgac16178}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name(s): \texttt{l1\_distance}, \texttt{l2\_distance}, \texttt{linf\_distance}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: two pointers to locations in memory wherein \texttt{Array\_double}'s lie, and the distance
|
|
via the corresponding \texttt{l1}, \texttt{l2}, or \texttt{linf} norms
|
|
\item Output: A \texttt{double} representing the distance between the two \texttt{Array\_doubles}'s by the given
|
|
norm.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double l1_distance(Array_double *v1, Array_double *v2) {
|
|
return vector_distance(v1, v2, &l1_norm);
|
|
}
|
|
|
|
double l2_distance(Array_double *v1, Array_double *v2) {
|
|
return vector_distance(v1, v2, &l2_norm);
|
|
}
|
|
|
|
double linf_distance(Array_double *v1, Array_double *v2) {
|
|
return vector_distance(v1, v2, &linf_norm);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{sum\_v}}
|
|
\label{sec:org876aafa}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{sum\_v}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to an \texttt{Array\_double}
|
|
\item Output: a \texttt{double} representing the sum of all the elements of an \texttt{Array\_double}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double sum_v(Array_double *v) {
|
|
double sum = 0;
|
|
for (size_t i = 0; i < v->size; i++)
|
|
sum += v->data[i];
|
|
return sum;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{scale\_v}}
|
|
\label{sec:orgf1d236c}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{scale\_v}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to an \texttt{Array\_double} and a scalar \texttt{double} to scale the vector
|
|
\item Output: a pointer to a new \texttt{Array\_double} of the scaled input \texttt{Array\_double}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *scale_v(Array_double *v, double m) {
|
|
Array_double *copy = copy_vector(v);
|
|
for (size_t i = 0; i < v->size; i++)
|
|
copy->data[i] *= m;
|
|
return copy;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{free\_vector}}
|
|
\label{sec:org2ca163d}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{free\_vector}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to an \texttt{Array\_double}
|
|
\item Output: nothing.
|
|
\item Side effect: free the memory of the reserved \texttt{Array\_double} on the heap
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
void free_vector(Array_double *v) {
|
|
free(v->data);
|
|
free(v);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{add\_element}}
|
|
\label{sec:org7a99233}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{add\_element}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to an \texttt{Array\_double}
|
|
\item Output: a new \texttt{Array\_double} with element \texttt{x} appended.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *add_element(Array_double *v, double x) {
|
|
Array_double *pushed = InitArrayWithSize(double, v->size + 1, 0.0);
|
|
for (size_t i = 0; i < v->size; ++i)
|
|
pushed->data[i] = v->data[i];
|
|
pushed->data[v->size] = x;
|
|
return pushed;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{slice\_element}}
|
|
\label{sec:org6c07c99}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{slice\_element}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to an \texttt{Array\_double}
|
|
\item Output: a new \texttt{Array\_double} with element \texttt{x} sliced.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *slice_element(Array_double *v, size_t x) {
|
|
Array_double *sliced = InitArrayWithSize(double, v->size - 1, 0.0);
|
|
for (size_t i = 0; i < v->size - 1; ++i)
|
|
sliced->data[i] = i >= x ? v->data[i + 1] : v->data[i];
|
|
return sliced;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{copy\_vector}}
|
|
\label{sec:org81f7cc1}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{copy\_vector}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to an \texttt{Array\_double}
|
|
\item Output: a pointer to a new \texttt{Array\_double} whose \texttt{data} and \texttt{size} are copied from the input
|
|
\texttt{Array\_double}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *copy_vector(Array_double *v) {
|
|
Array_double *copy = InitArrayWithSize(double, v->size, 0.0);
|
|
for (size_t i = 0; i < copy->size; ++i)
|
|
copy->data[i] = v->data[i];
|
|
return copy;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{format\_vector\_into}}
|
|
\label{sec:orgd168171}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{format\_vector\_into}
|
|
\item Location: \texttt{src/vector.c}
|
|
\item Input: a pointer to an \texttt{Array\_double} and a pointer to a c-string \texttt{s} to "print" the vector out
|
|
into
|
|
\item Output: nothing.
|
|
\item Side effect: overwritten memory into \texttt{s}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
void format_vector_into(Array_double *v, char *s) {
|
|
if (v->size == 0) {
|
|
strcat(s, "empty");
|
|
return;
|
|
}
|
|
|
|
for (size_t i = 0; i < v->size; ++i) {
|
|
char num[64];
|
|
strcpy(num, "");
|
|
|
|
sprintf(num, "%f,", v->data[i]);
|
|
strcat(s, num);
|
|
}
|
|
strcat(s, "\n");
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsection{Matrix Routines}
|
|
\label{sec:org5c45c12}
|
|
\subsubsection{\texttt{lu\_decomp}}
|
|
\label{sec:orgf1e0ac3}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{lu\_decomp}
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double} \(m\) to decompose into a lower triangular and upper triangular
|
|
matrix \(L\), \(U\), respectively such that \(LU = m\).
|
|
\item Output: a pointer to the location in memory in which two \texttt{Matrix\_double}'s reside: the first
|
|
representing \(L\), the second, \(U\).
|
|
\item Errors: Fails assertions when encountering a matrix that cannot be
|
|
decomposed
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Matrix_double **lu_decomp(Matrix_double *m) {
|
|
assert(m->cols == m->rows);
|
|
|
|
Matrix_double *u = copy_matrix(m);
|
|
Matrix_double *l_empt = InitMatrixWithSize(double, m->rows, m->cols, 0.0);
|
|
Matrix_double *l = put_identity_diagonal(l_empt);
|
|
free_matrix(l_empt);
|
|
|
|
Matrix_double **u_l = malloc(sizeof(Matrix_double *) * 2);
|
|
|
|
for (size_t y = 0; y < m->rows; y++) {
|
|
if (u->data[y]->data[y] == 0) {
|
|
printf("ERROR: a pivot is zero in given matrix\n");
|
|
assert(false);
|
|
}
|
|
}
|
|
|
|
if (u && l) {
|
|
for (size_t x = 0; x < m->cols; x++) {
|
|
for (size_t y = x + 1; y < m->rows; y++) {
|
|
double denom = u->data[x]->data[x];
|
|
|
|
if (denom == 0) {
|
|
printf("ERROR: non-factorable matrix\n");
|
|
assert(false);
|
|
}
|
|
|
|
double factor = -(u->data[y]->data[x] / denom);
|
|
|
|
Array_double *scaled = scale_v(u->data[x], factor);
|
|
Array_double *added = add_v(scaled, u->data[y]);
|
|
free_vector(scaled);
|
|
free_vector(u->data[y]);
|
|
|
|
u->data[y] = added;
|
|
l->data[y]->data[x] = -factor;
|
|
}
|
|
}
|
|
}
|
|
|
|
u_l[0] = u;
|
|
u_l[1] = l;
|
|
return u_l;
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{bsubst}}
|
|
\label{sec:orgec7e4b5}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{bsubst}
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to an upper-triangular \texttt{Matrix\_double} \(u\) and a \texttt{Array\_double}
|
|
\(b\)
|
|
\item Output: a pointer to a new \texttt{Array\_double} whose entries are given by performing
|
|
back substitution
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *bsubst(Matrix_double *u, Array_double *b) {
|
|
assert(u->rows == b->size && u->cols == u->rows);
|
|
|
|
Array_double *x = copy_vector(b);
|
|
for (int64_t row = b->size - 1; row >= 0; row--) {
|
|
for (size_t col = b->size - 1; col > row; col--)
|
|
x->data[row] -= x->data[col] * u->data[row]->data[col];
|
|
x->data[row] /= u->data[row]->data[row];
|
|
}
|
|
return x;
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{fsubst}}
|
|
\label{sec:org72ff2ed}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{fsubst}
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a lower-triangular \texttt{Matrix\_double} \(l\) and a \texttt{Array\_double}
|
|
\(b\)
|
|
\item Output: a pointer to a new \texttt{Array\_double} whose entries are given by performing
|
|
forward substitution
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *fsubst(Matrix_double *l, Array_double *b) {
|
|
assert(l->rows == b->size && l->cols == l->rows);
|
|
|
|
Array_double *x = copy_vector(b);
|
|
|
|
for (size_t row = 0; row < b->size; row++) {
|
|
for (size_t col = 0; col < row; col++)
|
|
x->data[row] -= x->data[col] * l->data[row]->data[col];
|
|
x->data[row] /= l->data[row]->data[row];
|
|
}
|
|
|
|
return x;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{solve\_matrix\_lu\_bsubst}}
|
|
\label{sec:orga735557}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double} \(m\) and a pointer to an \texttt{Array\_double} \(b\)
|
|
\item Output: \(x\) such that \(mx = b\) if such a solution exists (else it's non LU-factorable as discussed
|
|
above)
|
|
\end{itemize}
|
|
|
|
Here we make use of forward substitution to first solve \(Ly = b\) given \(L\) as the \(L\) factor in
|
|
\texttt{lu\_decomp}. Then we use back substitution to solve \(Ux = y\) for \(x\) similarly given \(U\).
|
|
|
|
Then, \(LUx = b\), thus \(x\) is a solution.
|
|
|
|
\begin{verbatim}
|
|
Array_double *solve_matrix_lu_bsubst(Matrix_double *m, Array_double *b) {
|
|
assert(b->size == m->rows);
|
|
assert(m->rows == m->cols);
|
|
|
|
Array_double *x = copy_vector(b);
|
|
Matrix_double **u_l = lu_decomp(m);
|
|
Matrix_double *u = u_l[0];
|
|
Matrix_double *l = u_l[1];
|
|
|
|
Array_double *b_fsub = fsubst(l, b);
|
|
x = bsubst(u, b_fsub);
|
|
free_vector(b_fsub);
|
|
|
|
free_matrix(u);
|
|
free_matrix(l);
|
|
free(u_l);
|
|
|
|
return x;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{gaussian\_elimination}}
|
|
\label{sec:org71d2519}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double} \(m\)
|
|
\item Output: a pointer to a copy of \(m\) in reduced echelon form
|
|
\end{itemize}
|
|
|
|
This works by finding the row with a maximum value in the column \(k\). Then, it uses that as a pivot, and
|
|
applying reduction to all other rows. The general idea is available at \url{https://en.wikipedia.org/wiki/Gaussian\_elimination}.
|
|
|
|
\begin{verbatim}
|
|
Matrix_double *gaussian_elimination(Matrix_double *m) {
|
|
uint64_t h = 0, k = 0;
|
|
|
|
Matrix_double *m_cp = copy_matrix(m);
|
|
|
|
while (h < m_cp->rows && k < m_cp->cols) {
|
|
uint64_t max_row = h;
|
|
double max_val = 0.0;
|
|
|
|
for (uint64_t row = h; row < m_cp->rows; row++) {
|
|
double val = fabs(m_cp->data[row]->data[k]);
|
|
if (val > max_val) {
|
|
max_val = val;
|
|
max_row = row;
|
|
}
|
|
}
|
|
|
|
if (max_val == 0.0) {
|
|
k++;
|
|
continue;
|
|
}
|
|
|
|
if (max_row != h) {
|
|
Array_double *swp = m_cp->data[max_row];
|
|
m_cp->data[max_row] = m_cp->data[h];
|
|
m_cp->data[h] = swp;
|
|
}
|
|
|
|
for (uint64_t row = h + 1; row < m_cp->rows; row++) {
|
|
double factor = m_cp->data[row]->data[k] / m_cp->data[h]->data[k];
|
|
m_cp->data[row]->data[k] = 0.0;
|
|
|
|
for (uint64_t col = k + 1; col < m_cp->cols; col++) {
|
|
m_cp->data[row]->data[col] -= m_cp->data[h]->data[col] * factor;
|
|
}
|
|
}
|
|
|
|
h++;
|
|
k++;
|
|
}
|
|
|
|
return m_cp;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{solve\_matrix\_gaussian}}
|
|
\label{sec:org230915f}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double} \(m\) and a target \texttt{Array\_double} \(b\)
|
|
\item Output: a pointer to a vector \(x\) being the solution to the equation \(mx = b\)
|
|
\end{itemize}
|
|
|
|
We first perform \texttt{gaussian\_elimination} after augmenting \(m\) and \(b\). Then, as \(m\) is in reduced echelon form, it's an upper
|
|
triangular matrix, so we can perform back substitution to compute \(x\).
|
|
|
|
\begin{verbatim}
|
|
Array_double *solve_matrix_gaussian(Matrix_double *m, Array_double *b) {
|
|
assert(b->size == m->rows);
|
|
assert(m->rows == m->cols);
|
|
|
|
Matrix_double *m_augment_b = add_column(m, b);
|
|
Matrix_double *eliminated = gaussian_elimination(m_augment_b);
|
|
|
|
Array_double *b_gauss = col_v(eliminated, m->cols);
|
|
Matrix_double *u = slice_column(eliminated, m->rows);
|
|
|
|
Array_double *solution = bsubst(u, b_gauss);
|
|
|
|
free_matrix(m_augment_b);
|
|
free_matrix(eliminated);
|
|
free_matrix(u);
|
|
free_vector(b_gauss);
|
|
|
|
return solution;
|
|
}
|
|
\end{verbatim}
|
|
|
|
|
|
\subsubsection{\texttt{m\_dot\_v}}
|
|
\label{sec:org83c8351}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double} \(m\) and \texttt{Array\_double} \(v\)
|
|
\item Output: the dot product \(mv\) as an \texttt{Array\_double}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Array_double *m_dot_v(Matrix_double *m, Array_double *v) {
|
|
assert(v->size == m->cols);
|
|
|
|
Array_double *product = copy_vector(v);
|
|
|
|
for (size_t row = 0; row < v->size; ++row)
|
|
product->data[row] = v_dot_v(m->data[row], v);
|
|
|
|
return product;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{put\_identity\_diagonal}}
|
|
\label{sec:orge3fcb3e}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double}
|
|
\item Output: a pointer to a copy to \texttt{Matrix\_double} whose diagonal is full of 1's
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Matrix_double *put_identity_diagonal(Matrix_double *m) {
|
|
assert(m->rows == m->cols);
|
|
Matrix_double *copy = copy_matrix(m);
|
|
for (size_t y = 0; y < m->rows; ++y)
|
|
copy->data[y]->data[y] = 1.0;
|
|
return copy;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{slice\_column}}
|
|
\label{sec:org95e39ba}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double}
|
|
\item Output: a pointer to a copy of the given \texttt{Matrix\_double} with column at \texttt{x} sliced
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Matrix_double *slice_column(Matrix_double *m, size_t x) {
|
|
Matrix_double *sliced = copy_matrix(m);
|
|
|
|
for (size_t row = 0; row < m->rows; row++) {
|
|
Array_double *old_row = sliced->data[row];
|
|
sliced->data[row] = slice_element(old_row, x);
|
|
free_vector(old_row);
|
|
}
|
|
sliced->cols--;
|
|
|
|
return sliced;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{add\_column}}
|
|
\label{sec:org9a2ad93}
|
|
\begin{itemize}
|
|
\item Author: Elizabet Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double} and a new vector representing the appended column \texttt{x}
|
|
\item Output: a pointer to a copy of the given \texttt{Matrix\_double} with a new column \texttt{x}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Matrix_double *add_column(Matrix_double *m, Array_double *v) {
|
|
Matrix_double *pushed = copy_matrix(m);
|
|
|
|
for (size_t row = 0; row < m->rows; row++) {
|
|
Array_double *old_row = pushed->data[row];
|
|
pushed->data[row] = add_element(old_row, v->data[row]);
|
|
free_vector(old_row);
|
|
}
|
|
|
|
pushed->cols++;
|
|
return pushed;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{copy\_matrix}}
|
|
\label{sec:org63765c0}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double}
|
|
\item Output: a pointer to a copy of the given \texttt{Matrix\_double}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Matrix_double *copy_matrix(Matrix_double *m) {
|
|
Matrix_double *copy = InitMatrixWithSize(double, m->rows, m->cols, 0.0);
|
|
for (size_t y = 0; y < copy->rows; y++) {
|
|
free_vector(copy->data[y]);
|
|
copy->data[y] = copy_vector(m->data[y]);
|
|
}
|
|
return copy;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{free\_matrix}}
|
|
\label{sec:orgc337967}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double}
|
|
\item Output: none.
|
|
\item Side Effects: frees memory reserved by a given \texttt{Matrix\_double} and its member
|
|
\texttt{Array\_double} vectors describing its rows.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
void free_matrix(Matrix_double *m) {
|
|
for (size_t y = 0; y < m->rows; ++y)
|
|
free_vector(m->data[y]);
|
|
free(m);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{format\_matrix\_into}}
|
|
\label{sec:org6b188b4}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{format\_matrix\_into}
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \texttt{Matrix\_double} and a pointer to a c-string \texttt{s} to "print" the vector out
|
|
into
|
|
\item Output: nothing.
|
|
\item Side effect: overwritten memory into \texttt{s}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
void format_matrix_into(Matrix_double *m, char *s) {
|
|
if (m->rows == 0)
|
|
strcpy(s, "empty");
|
|
|
|
for (size_t y = 0; y < m->rows; ++y) {
|
|
char row_s[5192];
|
|
strcpy(row_s, "");
|
|
|
|
format_vector_into(m->data[y], row_s);
|
|
strcat(s, row_s);
|
|
}
|
|
strcat(s, "\n");
|
|
}
|
|
\end{verbatim}
|
|
\subsection{Root Finding Methods}
|
|
\label{sec:org352ccdf}
|
|
\subsubsection{\texttt{find\_ivt\_range}}
|
|
\label{sec:orgb9a0d16}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{find\_ivt\_range}
|
|
\item Location: \texttt{src/roots.c}
|
|
\item Input: a pointer to a oneary function taking a double and producing a double, the beginning point
|
|
in \(R\) to search for a range, a \texttt{delta} step that is taken, and a \texttt{max\_steps} number of maximum
|
|
iterations to perform.
|
|
\item Output: a pair of \texttt{double}'s in an \texttt{Array\_double} representing a closed closed interval \texttt{[beginning, end]}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
// f is well defined at start_x + delta*n for all n on the integer range [0,
|
|
// max_iterations]
|
|
Array_double *find_ivt_range(double (*f)(double), double start_x, double delta,
|
|
size_t max_iterations) {
|
|
double a = start_x;
|
|
|
|
while (f(a) * f(a + delta) >= 0 && max_iterations > 0) {
|
|
max_iterations--;
|
|
a += delta;
|
|
}
|
|
|
|
double end = a + delta;
|
|
double begin = a - delta;
|
|
|
|
if (max_iterations == 0 && f(begin) * f(end) >= 0)
|
|
return NULL;
|
|
return InitArray(double, {begin, end});
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{bisect\_find\_root}}
|
|
\label{sec:org25382b3}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name(s): \texttt{bisect\_find\_root}
|
|
\item Input: a one-ary function taking a double and producing a double, a closed interval represented
|
|
by \texttt{a} and \texttt{b}: \texttt{[a, b]}, a \texttt{tolerance} at which we return the estimated root once \(b-a < \text{tolerance}\), and a
|
|
\texttt{max\_iterations} to break us out of a loop if we can never reach the \texttt{tolerance}.
|
|
\item Output: a vector of size of 3, \texttt{double}'s representing first the range \texttt{[a,b]} and then the midpoint,
|
|
\texttt{c} of the range.
|
|
\item Description: recursively uses binary search to split the interval until we reach \texttt{tolerance}. We
|
|
also assume the function \texttt{f} is continuous on \texttt{[a, b]}.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
// f is continuous on [a, b]
|
|
Array_double *bisect_find_root(double (*f)(double), double a, double b,
|
|
double tolerance, size_t max_iterations) {
|
|
assert(a <= b);
|
|
// guarantee there's a root somewhere between a and b by IVT
|
|
assert(f(a) * f(b) < 0);
|
|
|
|
double c = (1.0 / 2) * (a + b);
|
|
if (b - a < tolerance || max_iterations == 0)
|
|
return InitArray(double, {a, b, c});
|
|
|
|
if (f(a) * f(c) < 0)
|
|
return bisect_find_root(f, a, c, tolerance, max_iterations - 1);
|
|
return bisect_find_root(f, c, b, tolerance, max_iterations - 1);
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{bisect\_find\_root\_with\_error\_assumption}}
|
|
\label{sec:org4b9cb72}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{bisect\_find\_root\_with\_error\_assumption}
|
|
\item Input: a one-ary function taking a double and producing a double, a closed interval represented
|
|
by \texttt{a} and \texttt{b}: \texttt{[a, b]}, and a \texttt{tolerance} equivalent to the above definition in \texttt{bisect\_find\_root}
|
|
\item Output: a \texttt{double} representing the estimated root
|
|
\item Description: using the bisection method we know that \(e_k \le (\frac{1}{2})^k (b_0 - a_0)\). So we can
|
|
calculate \(k\) at the worst possible case (that the error is exactly the tolerance) to be
|
|
\(\frac{log(tolerance) - log(b_0 - a_0)}{log(\frac{1}{2})}\). We pass this value into the \texttt{max\_iterations}
|
|
of \texttt{bisect\_find\_root} as above.
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
double bisect_find_root_with_error_assumption(double (*f)(double), double a,
|
|
double b, double tolerance) {
|
|
assert(a <= b);
|
|
|
|
uint64_t max_iterations =
|
|
(uint64_t)ceil((log(tolerance) - log(b - a)) / log(1 / 2.0));
|
|
|
|
Array_double *a_b_root = bisect_find_root(f, a, b, tolerance, max_iterations);
|
|
double root = a_b_root->data[2];
|
|
free_vector(a_b_root);
|
|
|
|
return root;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{fixed\_point\_iteration\_method}}
|
|
\label{sec:org4cee2bd}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{fixed\_point\_iteration\_method}
|
|
\item Location: \texttt{src/roots.c}
|
|
\item Input: a pointer to a oneary function \(f\) taking a double and producing a double of which we are
|
|
trying to find a root, a guess \(x_0\), and a function \(g\) of the same signature of \(f\) at which we
|
|
"step" our guesses according to the fixed point iteration method: \(x_k = g(x_{k-1})\). Additionally, a
|
|
\texttt{max\_iterations} representing the maximum number of "steps" to take before arriving at our
|
|
approximation and a \texttt{tolerance} to return our root if it becomes within [0 - tolerance, 0 + tolerance].
|
|
\item Assumptions: \(g(x)\) must be a function such that at the point \(x^*\) (the found root) the derivative
|
|
\(|g'(x^*)| \lt 1\)
|
|
\item Output: a double representing the found approximate root \(\approx x^*\).
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double fixed_point_iteration_method(double (*f)(double), double (*g)(double),
|
|
double x_0, double tolerance,
|
|
size_t max_iterations) {
|
|
if (max_iterations <= 0)
|
|
return x_0;
|
|
|
|
double root = g(x_0);
|
|
if (tolerance >= fabs(f(root)))
|
|
return root;
|
|
|
|
return fixed_point_iteration_method(f, g, root, tolerance,
|
|
max_iterations - 1);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{fixed\_point\_newton\_method}}
|
|
\label{sec:org93e3999}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{fixed\_point\_newton\_method}
|
|
\item Location: \texttt{src/roots.c}
|
|
\item Input: a pointer to a oneary function \(f\) taking a double and producing a double of which we are
|
|
trying to find a root and another pointer to a function fprime of the same signature, a guess \(x_0\),
|
|
and a \texttt{max\_iterations} and \texttt{tolerance} as defined in the above method are required inputs.
|
|
\item Description: continually computes elements in the sequence \(x_n = x_{n-1} - \frac{f(x_{n-1})}{f'p(x_{n-1})}\)
|
|
\item Output: a double representing the found approximate root \(\approx x^*\) recursively applied to the sequence
|
|
given
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
double fixed_point_newton_method(double (*f)(double), double (*fprime)(double),
|
|
double x_0, double tolerance,
|
|
size_t max_iterations) {
|
|
if (max_iterations <= 0)
|
|
return x_0;
|
|
|
|
double root = x_0 - f(x_0) / fprime(x_0);
|
|
if (tolerance >= fabs(f(root)))
|
|
return root;
|
|
|
|
return fixed_point_newton_method(f, fprime, root, tolerance,
|
|
max_iterations - 1);
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{fixed\_point\_secant\_method}}
|
|
\label{sec:orgf3f0711}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{fixed\_point\_secant\_method}
|
|
\item Location: \texttt{src/roots.c}
|
|
\item Input: a pointer to a oneary function \(f\) taking a double and producing a double of which we are
|
|
trying to find a root, a guess \(x_0\) and \(x_1\) in which a root lies between \([x_0, x_1]\); applying the
|
|
sequence \(x_n = x_{n-1} - f(x_{n-1}) \frac{x_{n-1} - x_{n-2}}{f(x_{n-1}) - f(x_{n-2})}\).
|
|
Additionally, a \texttt{max\_iterations} and \texttt{tolerance} as defined in the above method are required
|
|
inputs.
|
|
\item Output: a double representing the found approximate root \(\approx x^*\) recursively applied to the sequence.
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
double fixed_point_secant_method(double (*f)(double), double x_0, double x_1,
|
|
double tolerance, size_t max_iterations) {
|
|
if (max_iterations == 0)
|
|
return x_1;
|
|
|
|
double root = x_1 - f(x_1) * ((x_1 - x_0) / (f(x_1) - f(x_0)));
|
|
|
|
if (tolerance >= fabs(f(root)))
|
|
return root;
|
|
|
|
return fixed_point_secant_method(f, x_1, root, tolerance, max_iterations - 1);
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{fixed\_point\_secant\_bisection\_method}}
|
|
\label{sec:orgeaef048}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{fixed\_point\_secant\_method}
|
|
\item Location: \texttt{src/roots.c}
|
|
\item Input: a pointer to a oneary function \(f\) taking a double and producing a double of which we are
|
|
trying to find a root, a guess \(x_0\), and a \(x_1\) of which we define our first interval \([x_0, x_1]\).
|
|
Then, we perform a single iteration of the \texttt{fixed\_point\_secant\_method} on this interval; if it
|
|
produces a root outside, we refresh the interval and root respectively with the given
|
|
\texttt{bisect\_find\_root} method. Additionally, a \texttt{max\_iterations} and \texttt{tolerance} as defined in the above method are required
|
|
inputs.
|
|
\item Output: a double representing the found approximate root \(\approx x^*\) continually applied with the
|
|
constraints defined.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double fixed_point_secant_bisection_method(double (*f)(double), double x_0,
|
|
double x_1, double tolerance,
|
|
size_t max_iterations) {
|
|
double begin = x_0;
|
|
double end = x_1;
|
|
double root = x_0;
|
|
|
|
while (tolerance < fabs(f(root)) && max_iterations > 0) {
|
|
max_iterations--;
|
|
|
|
double secant_root = fixed_point_secant_method(f, begin, end, tolerance, 1);
|
|
|
|
if (secant_root < begin || secant_root > end) {
|
|
Array_double *range_root = bisect_find_root(f, begin, end, tolerance, 1);
|
|
|
|
begin = range_root->data[0];
|
|
end = range_root->data[1];
|
|
root = range_root->data[2];
|
|
|
|
free_vector(range_root);
|
|
continue;
|
|
}
|
|
|
|
root = secant_root;
|
|
|
|
if (f(root) * f(begin) < 0)
|
|
end = secant_root; // the root exists in [begin, secant_root]
|
|
else
|
|
begin = secant_root;
|
|
}
|
|
|
|
return root;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsection{Linear Routines}
|
|
\label{sec:orge3b6d97}
|
|
\subsubsection{\texttt{least\_squares\_lin\_reg}}
|
|
\label{sec:orgcc90c4a}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{least\_squares\_lin\_reg}
|
|
\item Location: \texttt{src/lin.c}
|
|
\item Input: two pointers to \texttt{Array\_double}'s whose entries correspond two ordered
|
|
pairs in R\textsuperscript{2}
|
|
\item Output: a linear model best representing the ordered pairs via least squares
|
|
regression
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Line *least_squares_lin_reg(Array_double *x, Array_double *y) {
|
|
assert(x->size == y->size);
|
|
|
|
uint64_t n = x->size;
|
|
double sum_x = sum_v(x);
|
|
double sum_y = sum_v(y);
|
|
double sum_xy = v_dot_v(x, y);
|
|
double sum_xx = v_dot_v(x, x);
|
|
double denom = ((n * sum_xx) - (sum_x * sum_x));
|
|
|
|
Line *line = malloc(sizeof(Line));
|
|
line->m = ((sum_xy * n) - (sum_x * sum_y)) / denom;
|
|
line->a = ((sum_y * sum_xx) - (sum_x * sum_xy)) / denom;
|
|
|
|
return line;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsection{Eigen-Adjacent}
|
|
\label{sec:orga3c637f}
|
|
\subsubsection{\texttt{dominant\_eigenvalue}}
|
|
\label{sec:org0306c8a}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{dominant\_eigenvalue}
|
|
\item Location: \texttt{src/eigen.c}
|
|
\item Input: a pointer to an invertible matrix \texttt{m}, an initial eigenvector guess \texttt{v} (that is non
|
|
zero or orthogonal to an eigenvector with the dominant eigenvalue), a \texttt{tolerance} and
|
|
\texttt{max\_iterations} that act as stop conditions
|
|
\item Output: the dominant eigenvalue with the highest magnitude, approximated with the Power
|
|
Iteration Method
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double dominant_eigenvalue(Matrix_double *m, Array_double *v, double tolerance,
|
|
size_t max_iterations) {
|
|
assert(m->rows == m->cols);
|
|
assert(m->rows == v->size);
|
|
|
|
double error = tolerance;
|
|
size_t iter = max_iterations;
|
|
double lambda = 0.0;
|
|
Array_double *eigenvector_1 = copy_vector(v);
|
|
|
|
while (error >= tolerance && (--iter) > 0) {
|
|
Array_double *eigenvector_2 = m_dot_v(m, eigenvector_1);
|
|
Array_double *normalized_eigenvector_2 =
|
|
scale_v(eigenvector_2, 1.0 / linf_norm(eigenvector_2));
|
|
free_vector(eigenvector_2);
|
|
eigenvector_2 = normalized_eigenvector_2;
|
|
|
|
Array_double *mx = m_dot_v(m, eigenvector_2);
|
|
double new_lambda =
|
|
v_dot_v(mx, eigenvector_2) / v_dot_v(eigenvector_2, eigenvector_2);
|
|
|
|
error = fabs(new_lambda - lambda);
|
|
lambda = new_lambda;
|
|
free_vector(eigenvector_1);
|
|
eigenvector_1 = eigenvector_2;
|
|
}
|
|
|
|
return lambda;
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{shift\_inverse\_power\_eigenvalue}}
|
|
\label{sec:orgc29637a}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{least\_dominant\_eigenvalue}
|
|
\item Location: \texttt{src/eigen.c}
|
|
\item Input: a pointer to an invertible matrix \texttt{m}, an initial eigenvector guess \texttt{v} (that is non
|
|
zero or orthogonal to an eigenvector with the dominant eigenvalue), a \texttt{shift} to act as the
|
|
shifted \(\delta\), and \texttt{tolerance} and \texttt{max\_iterations} that act as stop conditions.
|
|
\item Output: the eigenvalue closest to \texttt{shift} with the lowest magnitude closest to 0, approximated
|
|
with the Inverse Power Iteration Method
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
double shift_inverse_power_eigenvalue(Matrix_double *m, Array_double *v,
|
|
double shift, double tolerance,
|
|
size_t max_iterations) {
|
|
assert(m->rows == m->cols);
|
|
assert(m->rows == v->size);
|
|
|
|
Matrix_double *m_c = copy_matrix(m);
|
|
for (size_t y = 0; y < m_c->rows; ++y)
|
|
m_c->data[y]->data[y] = m_c->data[y]->data[y] - shift;
|
|
|
|
double error = tolerance;
|
|
size_t iter = max_iterations;
|
|
double lambda = shift;
|
|
Array_double *eigenvector_1 = copy_vector(v);
|
|
|
|
while (error >= tolerance && (--iter) > 0) {
|
|
Array_double *eigenvector_2 = solve_matrix_lu_bsubst(m_c, eigenvector_1);
|
|
Array_double *normalized_eigenvector_2 =
|
|
scale_v(eigenvector_2, 1.0 / linf_norm(eigenvector_2));
|
|
free_vector(eigenvector_2);
|
|
|
|
Array_double *mx = m_dot_v(m, normalized_eigenvector_2);
|
|
double new_lambda =
|
|
v_dot_v(mx, normalized_eigenvector_2) /
|
|
v_dot_v(normalized_eigenvector_2, normalized_eigenvector_2);
|
|
|
|
error = fabs(new_lambda - lambda);
|
|
lambda = new_lambda;
|
|
free_vector(eigenvector_1);
|
|
eigenvector_1 = normalized_eigenvector_2;
|
|
}
|
|
|
|
return lambda;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{least\_dominant\_eigenvalue}}
|
|
\label{sec:org5df73a2}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{least\_dominant\_eigenvalue}
|
|
\item Location: \texttt{src/eigen.c}
|
|
\item Input: a pointer to an invertible matrix \texttt{m}, an initial eigenvector guess \texttt{v} (that is non
|
|
zero or orthogonal to an eigenvector with the dominant eigenvalue), a \texttt{tolerance} and
|
|
\texttt{max\_iterations} that act as stop conditions.
|
|
\item Output: the least dominant eigenvalue with the lowest magnitude closest to 0, approximated
|
|
with the Inverse Power Iteration Method.
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
double least_dominant_eigenvalue(Matrix_double *m, Array_double *v,
|
|
double tolerance, size_t max_iterations) {
|
|
return shift_inverse_power_eigenvalue(m, v, 0.0, tolerance, max_iterations);
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{partition\_find\_eigenvalues}}
|
|
\label{sec:org3dde7af}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{partition\_find\_eigenvalues}
|
|
\item Location: \texttt{src/eigen.c}
|
|
\item Input: a pointer to an invertible matrix \texttt{m}, a matrix whose rows correspond to initial
|
|
eigenvector guesses at each "partition" which is computed from a uniform distribution
|
|
between the number of rows this "guess matrix" has and the distance between the least
|
|
dominant eigenvalue and the most dominant. Additionally, a \texttt{max\_iterations} and a \texttt{tolerance}
|
|
that act as stop conditions.
|
|
\item Output: a vector of \texttt{doubles} corresponding to the "nearest" eigenvalue at the midpoint of
|
|
each partition, via the given guess of that partition.
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
Array_double *partition_find_eigenvalues(Matrix_double *m,
|
|
Matrix_double *guesses,
|
|
double tolerance,
|
|
size_t max_iterations) {
|
|
assert(guesses->rows >=
|
|
2); // we need at least, the most and least dominant eigenvalues
|
|
|
|
double end = dominant_eigenvalue(m, guesses->data[guesses->rows - 1],
|
|
tolerance, max_iterations);
|
|
double begin =
|
|
least_dominant_eigenvalue(m, guesses->data[0], tolerance, max_iterations);
|
|
|
|
double delta = (end - begin) / guesses->rows;
|
|
Array_double *eigenvalues = InitArrayWithSize(double, guesses->rows, 0.0);
|
|
for (size_t i = 0; i < guesses->rows; i++) {
|
|
double box_midpoint = ((delta * i) + (delta * (i + 1))) / 2;
|
|
|
|
double nearest_eigenvalue = shift_inverse_power_eigenvalue(
|
|
m, guesses->data[i], box_midpoint, tolerance, max_iterations);
|
|
|
|
eigenvalues->data[i] = nearest_eigenvalue;
|
|
}
|
|
|
|
return eigenvalues;
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{\texttt{leslie\_matrix}}
|
|
\label{sec:orgca10ed3}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{leslie\_matrix}
|
|
\item Location: \texttt{src/eigen.c}
|
|
\item Input: two pointers to \texttt{Array\_double}'s representing the ratio of individuals in an age class
|
|
\(x\) getting to the next age class \(x+1\) and the number of offspring that individuals in an age
|
|
class create in age class 0.
|
|
\item Output: the leslie matrix generated from the input vectors.
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
Matrix_double *leslie_matrix(Array_double *age_class_surivor_ratio,
|
|
Array_double *age_class_offspring) {
|
|
assert(age_class_surivor_ratio->size + 1 == age_class_offspring->size);
|
|
|
|
Matrix_double *leslie = InitMatrixWithSize(double, age_class_offspring->size,
|
|
age_class_offspring->size, 0.0);
|
|
|
|
free_vector(leslie->data[0]);
|
|
leslie->data[0] = age_class_offspring;
|
|
|
|
for (size_t i = 0; i < age_class_surivor_ratio->size; i++)
|
|
leslie->data[i + 1]->data[i] = age_class_surivor_ratio->data[i];
|
|
return leslie;
|
|
}
|
|
\end{verbatim}
|
|
\subsection{Jacobi / Gauss-Siedel}
|
|
\label{sec:org91c563c}
|
|
\subsubsection{\texttt{jacobi\_solve}}
|
|
\label{sec:org2cd6098}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{jacobi\_solve}
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a diagonally dominant square matrix \(m\), a vector representing
|
|
the value \(b\) in \(mx = b\), a double representing the maximum distance between
|
|
the solutions produced by iteration \(i\) and \(i+1\) (by L2 norm a.k.a cartesian
|
|
distance), and a \texttt{max\_iterations} which we force stop.
|
|
\item Output: the converged-upon solution \(x\) to \(mx = b\)
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
Array_double *jacobi_solve(Matrix_double *m, Array_double *b,
|
|
double l2_convergence_tolerance,
|
|
size_t max_iterations) {
|
|
assert(m->rows == m->cols);
|
|
assert(b->size == m->cols);
|
|
size_t iter = max_iterations;
|
|
|
|
Array_double *x_k = InitArrayWithSize(double, b->size, 0.0);
|
|
Array_double *x_k_1 =
|
|
InitArrayWithSize(double, b->size, rand_from(0.1, 10.0));
|
|
|
|
while ((--iter) > 0 && l2_distance(x_k_1, x_k) > l2_convergence_tolerance) {
|
|
for (size_t i = 0; i < m->rows; i++) {
|
|
double delta = 0.0;
|
|
for (size_t j = 0; j < m->cols; j++) {
|
|
if (i == j)
|
|
continue;
|
|
delta += m->data[i]->data[j] * x_k->data[j];
|
|
}
|
|
x_k_1->data[i] = (b->data[i] - delta) / m->data[i]->data[i];
|
|
}
|
|
|
|
Array_double *tmp = x_k;
|
|
x_k = x_k_1;
|
|
x_k_1 = tmp;
|
|
}
|
|
|
|
free_vector(x_k);
|
|
return x_k_1;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsubsection{\texttt{gauss\_siedel\_solve}}
|
|
\label{sec:org6633923}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{gauss\_siedel\_solve}
|
|
\item Location: \texttt{src/matrix.c}
|
|
\item Input: a pointer to a \href{https://en.wikipedia.org/wiki/Gauss\%E2\%80\%93Seidel\_method}{diagonally dominant or symmetric and positive definite}
|
|
square matrix \(m\), a vector representing
|
|
the value \(b\) in \(mx = b\), a double representing the maximum distance between
|
|
the solutions produced by iteration \(i\) and \(i+1\) (by L2 norm a.k.a cartesian
|
|
distance), and a \texttt{max\_iterations} which we force stop.
|
|
\item Output: the converged-upon solution \(x\) to \(mx = b\)
|
|
\item Description: we use almost the exact same method as \texttt{jacobi\_solve} but modify
|
|
only one array in accordance to the Gauss-Siedel method, but which is necessarily
|
|
copied before due to the convergence check.
|
|
\end{itemize}
|
|
\begin{verbatim}
|
|
|
|
Array_double *gauss_siedel_solve(Matrix_double *m, Array_double *b,
|
|
double l2_convergence_tolerance,
|
|
size_t max_iterations) {
|
|
assert(m->rows == m->cols);
|
|
assert(b->size == m->cols);
|
|
size_t iter = max_iterations;
|
|
|
|
Array_double *x_k = InitArrayWithSize(double, b->size, 0.0);
|
|
Array_double *x_k_1 =
|
|
InitArrayWithSize(double, b->size, rand_from(0.1, 10.0));
|
|
|
|
while ((--iter) > 0) {
|
|
for (size_t i = 0; i < x_k->size; i++)
|
|
x_k->data[i] = x_k_1->data[i];
|
|
|
|
for (size_t i = 0; i < m->rows; i++) {
|
|
double delta = 0.0;
|
|
for (size_t j = 0; j < m->cols; j++) {
|
|
if (i == j)
|
|
continue;
|
|
delta += m->data[i]->data[j] * x_k_1->data[j];
|
|
}
|
|
x_k_1->data[i] = (b->data[i] - delta) / m->data[i]->data[i];
|
|
}
|
|
|
|
if (l2_distance(x_k_1, x_k) <= l2_convergence_tolerance)
|
|
break;
|
|
}
|
|
|
|
free_vector(x_k);
|
|
return x_k_1;
|
|
}
|
|
\end{verbatim}
|
|
|
|
\subsection{Appendix / Miscellaneous}
|
|
\label{sec:orga72494e}
|
|
\subsubsection{Random}
|
|
\label{sec:org4940c39}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Name: \texttt{rand\_from}
|
|
\item Location: \texttt{src/rand.c}
|
|
\item Input: a pair of doubles, min and max to generate a random number min
|
|
\(\le\) x \(\le\) max
|
|
\item Output: a random double in the constraints shown
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
double rand_from(double min, double max) {
|
|
return min + (rand() / (RAND_MAX / (max - min)));
|
|
}
|
|
\end{verbatim}
|
|
\subsubsection{Data Types}
|
|
\label{sec:org8d3f6e1}
|
|
\begin{enumerate}
|
|
\item \texttt{Line}
|
|
\label{sec:orgc0df901}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{inc/types.h}
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
typedef struct Line {
|
|
double m;
|
|
double a;
|
|
} Line;
|
|
\end{verbatim}
|
|
\item The \texttt{Array\_<type>} and \texttt{Matrix\_<type>}
|
|
\label{sec:org435e816}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{inc/types.h}
|
|
\end{itemize}
|
|
|
|
We define two Pre processor Macros \texttt{DEFINE\_ARRAY} and \texttt{DEFINE\_MATRIX} that take
|
|
as input a type, and construct a struct definition for the given type for
|
|
convenient access to the vector or matrices dimensions.
|
|
|
|
Such that \texttt{DEFINE\_ARRAY(int)} would expand to:
|
|
|
|
\begin{verbatim}
|
|
typedef struct {
|
|
int* data;
|
|
size_t size;
|
|
} Array_int
|
|
\end{verbatim}
|
|
|
|
And \texttt{DEFINE\_MATRIX(int)} would expand a to \texttt{Matrix\_int}; containing a pointer to
|
|
a collection of pointers of \texttt{Array\_int}'s and its dimensions.
|
|
|
|
\begin{verbatim}
|
|
typedef struct {
|
|
Array_int **data;
|
|
size_t cols;
|
|
size_t rows;
|
|
} Matrix_int
|
|
\end{verbatim}
|
|
\end{enumerate}
|
|
|
|
\subsubsection{Macros}
|
|
\label{sec:orga2161be}
|
|
\begin{enumerate}
|
|
\item \texttt{c\_max} and \texttt{c\_min}
|
|
\label{sec:org16ca9c3}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{inc/macros.h}
|
|
\item Input: two structures that define an order measure
|
|
\item Output: either the larger or smaller of the two depending on the measure
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
#define c_max(x, y) (((x) >= (y)) ? (x) : (y))
|
|
#define c_min(x, y) (((x) <= (y)) ? (x) : (y))
|
|
\end{verbatim}
|
|
|
|
\item \texttt{InitArray}
|
|
\label{sec:orgcaff993}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{inc/macros.h}
|
|
\item Input: a type and array of values to initialze an array with such type
|
|
\item Output: a new \texttt{Array\_type} with the size of the given array and its data
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
#define InitArray(TYPE, ...) \
|
|
({ \
|
|
TYPE temp[] = __VA_ARGS__; \
|
|
Array_##TYPE *arr = malloc(sizeof(Array_##TYPE)); \
|
|
arr->size = sizeof(temp) / sizeof(temp[0]); \
|
|
arr->data = malloc(arr->size * sizeof(TYPE)); \
|
|
memcpy(arr->data, temp, arr->size * sizeof(TYPE)); \
|
|
arr; \
|
|
})
|
|
\end{verbatim}
|
|
|
|
\item \texttt{InitArrayWithSize}
|
|
\label{sec:orga925ddb}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{inc/macros.h}
|
|
\item Input: a type, a size, and initial value
|
|
\item Output: a new \texttt{Array\_type} with the given size filled with the initial value
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
#define InitArrayWithSize(TYPE, SIZE, INIT_VALUE) \
|
|
({ \
|
|
Array_##TYPE *arr = malloc(sizeof(Array_##TYPE)); \
|
|
arr->size = SIZE; \
|
|
arr->data = malloc(arr->size * sizeof(TYPE)); \
|
|
for (size_t i = 0; i < arr->size; i++) \
|
|
arr->data[i] = INIT_VALUE; \
|
|
arr; \
|
|
})
|
|
\end{verbatim}
|
|
|
|
\item \texttt{InitMatrixWithSize}
|
|
\label{sec:orgf90d7c8}
|
|
\begin{itemize}
|
|
\item Author: Elizabeth Hunt
|
|
\item Location: \texttt{inc/macros.h}
|
|
\item Input: a type, number of rows, columns, and initial value
|
|
\item Output: a new \texttt{Matrix\_type} of size \texttt{rows x columns} filled with the initial
|
|
value
|
|
\end{itemize}
|
|
|
|
\begin{verbatim}
|
|
#define InitMatrixWithSize(TYPE, ROWS, COLS, INIT_VALUE) \
|
|
({ \
|
|
Matrix_##TYPE *matrix = malloc(sizeof(Matrix_##TYPE)); \
|
|
matrix->rows = ROWS; \
|
|
matrix->cols = COLS; \
|
|
matrix->data = malloc(matrix->rows * sizeof(Array_##TYPE *)); \
|
|
for (size_t y = 0; y < matrix->rows; y++) \
|
|
matrix->data[y] = InitArrayWithSize(TYPE, COLS, INIT_VALUE); \
|
|
matrix; \
|
|
})
|
|
\end{verbatim}
|
|
\end{enumerate}
|
|
\end{document} |