add software manual updates for hw8

This commit is contained in:
Elizabeth Hunt 2023-12-09 20:46:16 -07:00
parent b5ad184c1b
commit fdc1c5642e
Signed by: simponic
GPG Key ID: 52B3774857EB24B1
6 changed files with 259 additions and 35 deletions

View File

@ -1,4 +1,4 @@
#+TITLE: LIZFCM Software Manual (v0.5)
#+TITLE: LIZFCM Software Manual (v0.6)
#+AUTHOR: Elizabeth Hunt
#+LATEX_HEADER: \notindent \notag \usepackage{amsmath} \usepackage[a4paper,margin=1in,portrait]{geometry}
#+LATEX: \setlength\parindent{0pt}
@ -544,30 +544,32 @@ applying reduction to all other rows. The general idea is available at [[https:/
#+BEGIN_SRC c
Matrix_double *gaussian_elimination(Matrix_double *m) {
uint64_t h = 0;
uint64_t k = 0;
uint64_t h = 0, k = 0;
Matrix_double *m_cp = copy_matrix(m);
while (h < m_cp->rows && k < m_cp->cols) {
uint64_t max_row = 0;
double total_max = 0.0;
uint64_t max_row = h;
double max_val = 0.0;
for (uint64_t row = h; row < m_cp->rows; row++) {
double this_max = c_max(fabs(m_cp->data[row]->data[k]), total_max);
if (c_max(this_max, total_max) == this_max) {
double val = fabs(m_cp->data[row]->data[k]);
if (val > max_val) {
max_val = val;
max_row = row;
}
}
if (max_row == 0) {
if (max_val == 0.0) {
k++;
continue;
}
Array_double *swp = m_cp->data[max_row];
m_cp->data[max_row] = m_cp->data[h];
m_cp->data[h] = swp;
if (max_row != h) {
Array_double *swp = m_cp->data[max_row];
m_cp->data[max_row] = m_cp->data[h];
m_cp->data[h] = swp;
}
for (uint64_t row = h + 1; row < m_cp->rows; row++) {
double factor = m_cp->data[row]->data[k] / m_cp->data[h]->data[k];
@ -743,7 +745,7 @@ void format_matrix_into(Matrix_double *m, char *s) {
strcpy(s, "empty");
for (size_t y = 0; y < m->rows; ++y) {
char row_s[256];
char row_s[5192];
strcpy(row_s, "");
format_vector_into(m->data[y], row_s);
@ -1159,8 +1161,112 @@ Matrix_double *leslie_matrix(Array_double *age_class_surivor_ratio,
return leslie;
}
#+END_SRC
** Jacobi / Gauss-Siedel
*** ~jacobi_solve~
+ Author: Elizabeth Hunt
+ Name: ~jacobi_solve~
+ Location: ~src/matrix.c~
+ Input: a pointer to a diagonally dominant square matrix $m$, a vector representing
the value $b$ in $mx = b$, a double representing the maximum distance between
the solutions produced by iteration $i$ and $i+1$ (by L2 norm a.k.a cartesian
distance), and a ~max_iterations~ which we force stop.
+ Output: the converged-upon solution $x$ to $mx = b$
#+BEGIN_SRC c
Array_double *jacobi_solve(Matrix_double *m, Array_double *b,
double l2_convergence_tolerance,
size_t max_iterations) {
assert(m->rows == m->cols);
assert(b->size == m->cols);
size_t iter = max_iterations;
Array_double *x_k = InitArrayWithSize(double, b->size, 0.0);
Array_double *x_k_1 =
InitArrayWithSize(double, b->size, rand_from(0.1, 10.0));
while ((--iter) > 0 && l2_distance(x_k_1, x_k) > l2_convergence_tolerance) {
for (size_t i = 0; i < m->rows; i++) {
double delta = 0.0;
for (size_t j = 0; j < m->cols; j++) {
if (i == j)
continue;
delta += m->data[i]->data[j] * x_k->data[j];
}
x_k_1->data[i] = (b->data[i] - delta) / m->data[i]->data[i];
}
Array_double *tmp = x_k;
x_k = x_k_1;
x_k_1 = tmp;
}
free_vector(x_k);
return x_k_1;
}
#+END_SRC
*** ~gauss_siedel_solve~
+ Author: Elizabeth Hunt
+ Name: ~gauss_siedel_solve~
+ Location: ~src/matrix.c~
+ Input: a pointer to a [[https://en.wikipedia.org/wiki/Gauss%E2%80%93Seidel_method][diagonally dominant or symmetric and positive definite]]
square matrix $m$, a vector representing
the value $b$ in $mx = b$, a double representing the maximum distance between
the solutions produced by iteration $i$ and $i+1$ (by L2 norm a.k.a cartesian
distance), and a ~max_iterations~ which we force stop.
+ Output: the converged-upon solution $x$ to $mx = b$
+ Description: we use almost the exact same method as ~jacobi_solve~ but modify
only one array in accordance to the Gauss-Siedel method, but which is necessarily
copied before due to the convergence check.
#+BEGIN_SRC c
Array_double *gauss_siedel_solve(Matrix_double *m, Array_double *b,
double l2_convergence_tolerance,
size_t max_iterations) {
assert(m->rows == m->cols);
assert(b->size == m->cols);
size_t iter = max_iterations;
Array_double *x_k = InitArrayWithSize(double, b->size, 0.0);
Array_double *x_k_1 =
InitArrayWithSize(double, b->size, rand_from(0.1, 10.0));
while ((--iter) > 0) {
for (size_t i = 0; i < x_k->size; i++)
x_k->data[i] = x_k_1->data[i];
for (size_t i = 0; i < m->rows; i++) {
double delta = 0.0;
for (size_t j = 0; j < m->cols; j++) {
if (i == j)
continue;
delta += m->data[i]->data[j] * x_k_1->data[j];
}
x_k_1->data[i] = (b->data[i] - delta) / m->data[i]->data[i];
}
if (l2_distance(x_k_1, x_k) <= l2_convergence_tolerance)
break;
}
free_vector(x_k);
return x_k_1;
}
#+END_SRC
** Appendix / Miscellaneous
*** Random
+ Author: Elizabeth Hunt
+ Name: ~rand_from~
+ Location: ~src/rand.c~
+ Input: a pair of doubles, min and max to generate a random number min
\le x \le max
+ Output: a random double in the constraints shown
#+BEGIN_SRC c
double rand_from(double min, double max) {
return min + (rand() / (RAND_MAX / (max - min)));
}
#+END_SRC
*** Data Types
**** ~Line~
+ Author: Elizabeth Hunt

View File

@ -4,14 +4,34 @@
#+LATEX: \setlength\parindent{0pt}
#+OPTIONS: toc:nil
TODO: Update LIZFCM org file with jacobi solve, format_matrix_into, rand
TODO: Update LIZFCM org file with jacobi solve
* Question One
See ~UTEST(jacobi, solve_jacobi)~ in ~test/jacobi.t.c~ and the entry
~Jacobi -> solve_jacobi~ in the LIZFCM API documentation.
* Question Two
A problem arises when using the Jacobi method to solve for the previous population
distribution, $n_k$, from $Ln_{k} = n_{k+1}$, because a Leslie matrix is not diagonally
dominant and will cause a division by zero. Likewise, we cannot factor it into $L$
and $U$ terms and apply back substitution because pivot points are zero.
We cannot just perform the Jacobi algorithm on a Leslie matrix since
it is obviously not diagonally dominant - which is a requirement. It is
certainly not always the case, but, if a Leslie matrix $L$ is invertible, we can
first perform gaussian elimination on $L$ augmented with $n_{k+1}$
to obtain $n_k$ with the Jacobi method. See ~UTEST(jacobi, leslie_solve)~
in ~test/jacobi.t.c~ for an example wherein this method is tested on a Leslie
matrix to recompute a given initial population distribution.
In terms of accuracy, an LU factorization and back substitution approach will
always be as correct as possible within the limits of computation; it's a
direct solution method. It's simply the nature of the Jacobi algorithm being
a convergent solution that determines its accuracy.
LU factorization also performs in order $O(n^3)$ runtime for an $n \times n$
matrix, whereas the Jacobi algorithm runs in order $O(k n^2) = O(n^2)$ but with the
con that $k$ is given by the convergence criteria, which might end up worse in
some cases, than LU.
* Question Three
See ~UTEST(jacobi, gauss_siedel_solve)~ in ~test/jacobi.t.c~ which runs the same
unit test as ~UTEST(jacobi, solve_jacobi)~ but using the
~Jacobi -> gauss_siedel_solve~ method as documented in the LIZFCM API reference.
* Question Four, Five

View File

@ -93,5 +93,8 @@ extern double rand_from(double min, double max);
extern Array_double *jacobi_solve(Matrix_double *m, Array_double *b,
double tolerance, size_t max_iterations);
extern Array_double *gauss_siedel_solve(Matrix_double *m, Array_double *b,
double l2_convergence_tolerance,
size_t max_iterations);
#endif // LIZFCM_H

View File

@ -2,9 +2,9 @@
#include <assert.h>
#include <math.h>
#include <stdio.h>
n #include<string.h>
#include <string.h>
Array_double *m_dot_v(Matrix_double *m, Array_double *v) {
Array_double *m_dot_v(Matrix_double *m, Array_double *v) {
assert(v->size == m->cols);
Array_double *product = copy_vector(v);
@ -161,30 +161,32 @@ Array_double *solve_matrix_lu_bsubst(Matrix_double *m, Array_double *b) {
}
Matrix_double *gaussian_elimination(Matrix_double *m) {
uint64_t h = 0;
uint64_t k = 0;
uint64_t h = 0, k = 0;
Matrix_double *m_cp = copy_matrix(m);
while (h < m_cp->rows && k < m_cp->cols) {
uint64_t max_row = 0;
double total_max = 0.0;
uint64_t max_row = h;
double max_val = 0.0;
for (uint64_t row = h; row < m_cp->rows; row++) {
double this_max = c_max(fabs(m_cp->data[row]->data[k]), total_max);
if (c_max(this_max, total_max) == this_max) {
double val = fabs(m_cp->data[row]->data[k]);
if (val > max_val) {
max_val = val;
max_row = row;
}
}
if (max_row == 0) {
if (max_val == 0.0) {
k++;
continue;
}
Array_double *swp = m_cp->data[max_row];
m_cp->data[max_row] = m_cp->data[h];
m_cp->data[h] = swp;
if (max_row != h) {
Array_double *swp = m_cp->data[max_row];
m_cp->data[max_row] = m_cp->data[h];
m_cp->data[h] = swp;
}
for (uint64_t row = h + 1; row < m_cp->rows; row++) {
double factor = m_cp->data[row]->data[k] / m_cp->data[h]->data[k];
@ -225,16 +227,18 @@ Array_double *solve_matrix_gaussian(Matrix_double *m, Array_double *b) {
Array_double *jacobi_solve(Matrix_double *m, Array_double *b,
double l2_convergence_tolerance,
size_t max_iterations) {
assert(m->rows == m->cols);
assert(b->size == m->cols);
size_t iter = max_iterations;
Array_double *x_k = InitArrayWithSize(double, b->size, rand_from(0.1, 10.0));
Array_double *x_k = InitArrayWithSize(double, b->size, 0.0);
Array_double *x_k_1 =
InitArrayWithSize(double, b->size, rand_from(0.1, 10.0));
while ((--iter) > 0 && l2_distance(x_k_1, x_k) > l2_convergence_tolerance) {
for (size_t i = 0; i < x_k->size; i++) {
for (size_t i = 0; i < m->rows; i++) {
double delta = 0.0;
for (size_t j = 0; j < x_k->size; j++) {
for (size_t j = 0; j < m->cols; j++) {
if (i == j)
continue;
delta += m->data[i]->data[j] * x_k->data[j];
@ -251,6 +255,39 @@ Array_double *jacobi_solve(Matrix_double *m, Array_double *b,
return x_k_1;
}
Array_double *gauss_siedel_solve(Matrix_double *m, Array_double *b,
double l2_convergence_tolerance,
size_t max_iterations) {
assert(m->rows == m->cols);
assert(b->size == m->cols);
size_t iter = max_iterations;
Array_double *x_k = InitArrayWithSize(double, b->size, 0.0);
Array_double *x_k_1 =
InitArrayWithSize(double, b->size, rand_from(0.1, 10.0));
while ((--iter) > 0) {
for (size_t i = 0; i < x_k->size; i++)
x_k->data[i] = x_k_1->data[i];
for (size_t i = 0; i < m->rows; i++) {
double delta = 0.0;
for (size_t j = 0; j < m->cols; j++) {
if (i == j)
continue;
delta += m->data[i]->data[j] * x_k_1->data[j];
}
x_k_1->data[i] = (b->data[i] - delta) / m->data[i]->data[i];
}
if (l2_distance(x_k_1, x_k) <= l2_convergence_tolerance)
break;
}
free_vector(x_k);
return x_k_1;
}
Matrix_double *slice_column(Matrix_double *m, size_t x) {
Matrix_double *sliced = copy_matrix(m);

View File

@ -1,7 +1,5 @@
#include "lizfcm.h"
double rand_from(double min, double max) {
double range = (max - min);
double div = RAND_MAX / range;
return min + (rand() / div);
return min + (rand() / (RAND_MAX / (max - min)));
}

View File

@ -1,4 +1,5 @@
#include "lizfcm.test.h"
#include <assert.h>
#include <math.h>
Matrix_double *generate_ddm(size_t n) {
@ -31,3 +32,62 @@ UTEST(jacobi, jacobi_solve) {
free_vector(b);
free_vector(solution);
}
UTEST(jacobi, gauss_siedel_solve) {
Matrix_double *m = generate_ddm(2);
Array_double *b_1 = InitArrayWithSize(double, m->rows, 1.0);
Array_double *b = m_dot_v(m, b_1);
double tolerance = 0.001;
size_t max_iter = 400;
Array_double *solution = gauss_siedel_solve(m, b, tolerance, max_iter);
for (size_t y = 0; y < m->rows; y++) {
double dot = v_dot_v(m->data[y], solution);
EXPECT_NEAR(b->data[y], dot, 0.1);
}
free_matrix(m);
free_vector(b_1);
free_vector(b);
free_vector(solution);
}
UTEST(jacobi, leslie_solve) {
Array_double *felicity = InitArray(double, {0.0, 1.5, 0.8});
Array_double *survivor_ratios = InitArray(double, {0.8, 0.55});
Matrix_double *leslie = leslie_matrix(survivor_ratios, felicity);
Array_double *initial_pop = InitArray(double, {10.0, 20.0, 15.0});
Array_double *next = m_dot_v(leslie, initial_pop);
Matrix_double *augmented = add_column(leslie, next);
Matrix_double *leslie_augmented_echelon = gaussian_elimination(augmented);
Array_double *next_echelon =
col_v(leslie_augmented_echelon, leslie_augmented_echelon->cols - 1);
Matrix_double *leslie_echelon = slice_column(
leslie_augmented_echelon, leslie_augmented_echelon->cols - 1);
double tolerance = 0.001;
size_t max_iter = 400;
Array_double *initial_pop_guess =
jacobi_solve(leslie_echelon, next_echelon, tolerance, max_iter);
for (size_t y = 0; y < initial_pop->size; y++) {
EXPECT_NEAR(initial_pop_guess->data[y], initial_pop->data[y], 0.05);
}
free_matrix(leslie);
free_matrix(augmented);
free_matrix(leslie_augmented_echelon);
free_matrix(leslie_echelon);
free_vector(felicity);
free_vector(survivor_ratios);
free_vector(next);
free_vector(next_echelon);
free_vector(initial_pop);
free_vector(initial_pop_guess);
}