## module gaussElimin
''' x = gaussElimin(a,b).
    Solves [a]{b} = {x} by Gauss elimination.
'''
import numpy as np

def gaussElimin(a,b):
    n = len(b)
  # Elimination Phase
    for k in range(0,n-1):
        for i in range(k+1,n):
           if a[i,k] != 0.0:
               lam = a [i,k]/a[k,k]
               a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
               b[i] = b[i] - lam*b[k]
  # Back substitution
    for k in range(n-1,-1,-1):
        b[k] = (b[k] - np.dot(a[k,k+1:n],b[k+1:n]))/a[k,k]
    return b

















































## example2_4
'''
An n ×n Vandermodematrix A is defined by A_ij = v^(n−j)_i ,
 i = 1, 2, . . . , n, j = 1, 2, . . . , n
Use the function gaussElimin to compute the solution of Ax = b, 
where A is the 6 × 6 Vandermodematrix generated from the vector
v =[1.0 1.2 1.4 1.6 1.8 2.0]T
and
b =[0 1 0 1 0 1]T
Also evaluate the accuracy of the solution
'''
import numpy as np
from gaussElimin import *
def vandermode(v):
    n = len(v)
    a = np.zeros((n,n))
    for j in range(n):
        a[:,j] = v**(n-j-1)
    return a

v = np.array([1.0, 1.2, 1.4, 1.6, 1.8, 2.0])
b = np.array([0.0, 1.0, 0.0, 1.0, 0.0, 1.0])
a = vandermode(v)
aOrig = a.copy() # Save original matrix
bOrig = b.copy() # and the constant vector
x = gaussElimin(a,b)
det = np.prod(np.diagonal(a))
print('x =\n',x)
print('\ndet =',det)
print('\nCheck result: [a]{x} - b =\n',np.dot(aOrig,x) - bOrig)
input("\nPress return to exit")

























## module LUdecomp
''' a = LUdecomp(a)
    LUdecomposition: [L][U] = [a]

    x = LUsolve(a,b)
    Solution phase: solves [L][U]{x} = {b}
'''
import numpy as np

def LUdecomp(a):
    n = len(a)
    for k in range(0,n-1):
        for i in range(k+1,n):
           if a[i,k] != 0.0:
               lam = a [i,k]/a[k,k]
               a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
               a[i,k] = lam
    return a

def LUsolve(a,b):
    n = len(a)
    for k in range(1,n):
        b[k] = b[k] - np.dot(a[k,0:k],b[0:k])
    b[n-1] = b[n-1]/a[n-1,n-1]    
    for k in range(n-2,-1,-1):
       b[k] = (b[k] - np.dot(a[k,k+1:n],b[k+1:n]))/a[k,k]
    return b

























## example2_7
# program that solves AX = B with Doolittle’s decomposition method and computes|A|
# Test the program with A =[3 −1 4
#                           −2 0 5
#                           7 2 −2]
# B =[6 −4
#     3  2
#     7 −5]
import numpy as np
from LUdecomp import *
a = np.array([[ 3.0, -1.0, 4.0], \
                [-2.0, 0.0, 5.0], \
                [ 7.0, 2.0, -2.0]])
b = np.array([[ 6.0, 3.0, 7.0], \
                [-4.0, 2.0, -5.0]])
a = LUdecomp(a) # Decompose [a]
det = np.prod(np.diagonal(a))
print("\nDeterminant =",det)
for i in range(len(b)): # Back-substitute one
    x = LUsolve(a,b[i]) # constant vector at a time
    print("x",i+1,"=",x)
input("\nPress return to exit")
























## module choleski
''' L = choleski(a)
    Choleski decomposition: [L][L]transpose = [a]

    x = choleskiSol(L,b)
    Solution phase of Choleski's decomposition method
'''
import numpy as np
import math
import error

def choleski(a):
    n = len(a)
    for k in range(n):
        try:
            a[k,k] = math.sqrt(a[k,k] - np.dot(a[k,0:k],a[k,0:k]))
        except ValueError:
            error.err('Matrix is not positive definite')
        for i in range(k+1,n):
            a[i,k] = (a[i,k] - np.dot(a[i,0:k],a[k,0:k]))/a[k,k]
    for k in range(1,n): a[0:k,k] = 0.0
    return a

def choleskiSol(L,b):
    n = len(b)
  # Solution of [L]{y} = {b}  
    for k in range(n):
        b[k] = (b[k] - np.dot(L[k,0:k],b[0:k]))/L[k,k]
  # Solution of [L_transpose]{x} = {y}      
    for k in range(n-1,-1,-1):
        b[k] = (b[k] - np.dot(L[k+1:n,k],b[k+1:n]))/L[k,k]
    return b
























## example2_8
# Solve the equations Ax = b by Choleski’s decomposition, where
# A =[1.44 −0.36 5.52 0.00
   # −0.36 10.33 −7.78 0.00
    # 5.52 −7.78 28.40 9.00
    # 0.00 0.00 9.00 61.00]
# b =[0.04
   # −2.15
    # 0
    # 0.88]
# Also check the solution
import numpy as np
from choleski import *
a = np.array([[ 1.44, -0.36, 5.52, 0.0], \
                [-0.36, 10.33, -7.78, 0.0], \
                [ 5.52, -7.78, 28.40, 9.0], \
                [ 0.0, 0.0, 9.0, 61.0]])
b = np.array([0.04, -2.15, 0.0, 0.88])
aOrig = a.copy()
L = choleski(a)
x = choleskiSol(L,b)
print("x =",x)
print('\nCheck: A*x =\n',np.dot(aOrig,x))
input("\nPress return to exit")
























## module LUdecomp3
''' c,d,e = LUdecomp3(c,d,e).
    LU decomposition of tridiagonal matrix [a], where {c}, {d}
    and {e} are the diagonals of [a]. On output
    {c},{d} and {e} are the diagonals of the decomposed matrix.

    x = LUsolve3(c,d,e,b).
    Solution of [a]{x} {b}, where {c}, {d} and {e} are the
    vectors returned from LUdecomp3.
'''

def LUdecomp3(c,d,e):
    n = len(d)
    for k in range(1,n):
        lam = c[k-1]/d[k-1]
        d[k] = d[k] - lam*e[k-1]
        c[k-1] = lam
    return c,d,e

def LUsolve3(c,d,e,b):
    n = len(d)
    for k in range(1,n):
        b[k] = b[k] - c[k-1]*b[k-1]
    b[n-1] = b[n-1]/d[n-1]
    for k in range(n-2,-1,-1):
        b[k] = (b[k] - e[k]*b[k+1])/d[k]
    return b
























# example2_11
# Use the functions LUdecmp3 and LUsolve3 to solve Ax = b, where
# A =[2 −1 0 0 0
 #   −1 2 −1 0 0
 #    0 −1 2 −1 0
 #    0 0 −1 2 −1
 #    0 0 0 −1 2]
# b =[5 
 #   −5
 #    4
 #   −5
 #    5]
import numpy as np
from LUdecomp3 import *

d = np.ones((5))*2.0
c = np.ones((4))*(-1.0)
b = np.array([5.0, -5.0, 4.0, -5.0, 5.0])
e = c.copy()
c,d,e = LUdecomp3(c,d,e)
x = LUsolve3(c,d,e,b)
print("\nx =\n",x)
input("\nPress return to exit")
























## module LUdecomp5
''' d,e,f = LUdecomp5(d,e,f).
    LU decomposition of symetric pentadiagonal matrix [a], where
    {f}, {e} and {d} are the diagonals of [a]. On output
    {d},{e} and {f} are the diagonals of the decomposed matrix.
    
    x = LUsolve5(d,e,f,b).
    Solves [a]{x} = {b}, where {d}, {e} and {f} are the vectors
    returned from LUdecomp5.
    '''
def LUdecomp5(d,e,f):
    n = len(d)
    for k in range(n-2):
        lam = e[k]/d[k]
        d[k+1] = d[k+1] - lam*e[k]
        e[k+1] = e[k+1] - lam*f[k]
        e[k] = lam
        lam = f[k]/d[k]
        d[k+2] = d[k+2] - lam*f[k]
        f[k] = lam
    lam = e[n-2]/d[n-2]
    d[n-1] = d[n-1] - lam*e[n-2]
    e[n-2] = lam
    return d,e,f

def LUsolve5(d,e,f,b):
    n = len(d)
    b[1] = b[1] - e[0]*b[0]
    for k in range(2,n):
        b[k] = b[k] - e[k-1]*b[k-1] - f[k-2]*b[k-2]
        
    b[n-1] = b[n-1]/d[n-1]
    b[n-2] = b[n-2]/d[n-2] - e[n-2]*b[n-1]
    for k in range(n-3,-1,-1):
        b[k] = b[k]/d[k] - e[k]*b[k+1] - f[k]*b[k+2]
    return b
























## module swap
''' swapRows(v,i,j).
    Swaps rows i and j of a vector or matrix [v].

    swapCols(v,i,j).
    Swaps columns of matrix [v].
'''
def swapRows(v,i,j):
    if len(v.shape) == 1:
        v[i],v[j] = v[j],v[i]
    else:
        v[[i,j],:] = v[[j,i],:]
        
def swapCols(v,i,j):
    v[:,[i,j]] = v[:,[j,i]]
























## module LUpivot
''' a,seq = LUdecomp(a,tol=1.0e-9).
    LU decomposition of matrix [a] using scaled row pivoting.
    The returned matrix [a] = contains [U] in the upper
    triangle and the nondiagonal terms of [L] in the lower triangle.
    Note that [L][U] is a row-wise permutation of the original [a];
    the permutations are recorded in the vector {seq}.
    
    x = LUsolve(a,b,seq).
    Solves [L][U]{x} = {b}, where the matrix [a] = and the
    permutation vector {seq} are returned from LUdecomp.
'''
import numpy as np
import swap
import error

def LUdecomp(a,tol=1.0e-9):
    n = len(a)
    seq = np.array(range(n))
    
  # Set up scale factors
    s = np.zeros((n))
    for i in range(n):
        s[i] = max(abs(a[i,:]))        
    
    for k in range(0,n-1):
        
      # Row interchange, if needed
        p = np.argmax(np.abs(a[k:n,k])/s[k:n]) + k
        if abs(a[p,k]) <  tol: error.err('Matrix is singular')
        if p != k:
            swap.swapRows(s,k,p)
            swap.swapRows(a,k,p)
            swap.swapRows(seq,k,p)
            
      # Elimination            
        for i in range(k+1,n):
            if a[i,k] != 0.0:
                lam = a[i,k]/a[k,k]
                a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
                a[i,k] = lam
    return a,seq

def LUsolve(a,b,seq):
    n = len(a)
    
  # Rearrange constant vector; store it in [x]
    x = b.copy()
    for i in range(n):
        x[i] = b[seq[i]]
        
  # Solution
    for k in range(1,n):
        x[k] = x[k] - np.dot(a[k,0:k],x[0:k])
    x[n-1] = x[n-1]/a[n-1,n-1]    
    for k in range(n-2,-1,-1):
       x[k] = (x[k] - np.dot(a[k,k+1:n],x[k+1:n]))/a[k,k]
    return x
























## example2_13
# function that inverts a matrix using LU decomposition with pivoting. Test the
# function by inverting
# A =[0.6 −0.4 1.0
#    −0.3 0.2 0.5
#     0.6 −1.0 0.5]
import numpy as np
from LUpivot import *
def matInv(a):
	n = len(a[0])
	aInv = np.identity(n)
	a,seq = LUdecomp(a)
	for i in range(n):
		aInv[:,i] = LUsolve(a,aInv[:,i],seq)
	return aInv
a = np.array([[ 0.6, -0.4, 1.0],\
			[-0.3, 0.2, 0.5],\
			[ 0.6, -1.0, 0.5]])
aOrig = a.copy() # Save original [a]
aInv = matInv(a) # Invert [a] (original [a] is destroyed)
print("\naInv =\n",aInv)
print("\nCheck: a*aInv =\n", np.dot(aOrig,aInv))
input("\nPress return to exit")

























## example2_14
# Invert thematrix
# A =[2 −1 0 0 0 0
#    −1 2 −1 0 0 0
#     0 −1 2 −1 0 0
#     0 0 −1 2 −1 0
#     0 0 0 −1 2 −1
#     0 0 0 0 −1 5]
import numpy as np
from LUdecomp3 import *
n = 6
d = np.ones((n))*2.0
e = np.ones((n-1))*(-1.0)
c = e.copy()
d[n-1] = 5.0
aInv = np.identity(n)
c,d,e = LUdecomp3(c,d,e)
for i in range(n):
	aInv[:,i] = LUsolve3(c,d,e,aInv[:,i])
print("\nThe inverse matrix is:\n",aInv)
input("\nPress return to exit")

























## module gaussSeidel
''' x,numIter,omega = gaussSeidel(iterEqs,x,tol = 1.0e-9)
    Gauss-Seidel method for solving [A]{x} = {b}.
    The matrix [A] should be sparse. User must supply the
    function iterEqs(x,omega) that returns the improved {x},
    given the current {x} ('omega' is the relaxation factor).
'''
import numpy as np
import math

def gaussSeidel(iterEqs,x,tol = 1.0e-9):
    omega = 1.0
    k = 10
    p = 1
    for i in range(1,501):
        xOld = x.copy()
        x = iterEqs(x,omega)
        dx = math.sqrt(np.dot(x-xOld,x-xOld))
        if dx < tol: return x,i,omega
      # Compute relaxation factor after k+p iterations
        if i == k: dx1 = dx
        if i == k + p:
            dx2 = dx
            omega = 2.0/(1.0 + math.sqrt(1.0 - (dx2/dx1)**(1.0/p)))
    print('Gauss-Seidel failed to converge')
























## module conjGrad
''' x, numIter = conjGrad(Av,x,b,tol=1.0e-9)
    Conjugate gradient method for solving [A]{x} = {b}.
    The matrix [A] should be sparse. User must supply
    the function Av(v) that returns the vector [A]{v}
    and the starting vector x.
'''    
import numpy as np
import math

def conjGrad(Av,x,b,tol=1.0e-9):
    n = len(b)
    r = b - Av(x)
    s = r.copy()
    for i in range(n):
        u = Av(s)
        alpha = np.dot(s,r)/np.dot(s,u)
        x = x + alpha*s
        r = b - Av(x)
        if(math.sqrt(np.dot(r,r))) < tol:
            break
        else:
            beta = -np.dot(r,u)/np.dot(s,u)
            s = r + beta*s
    return x,i
























## example2_17
# program to solve the following n simultaneous equations by the
# Gauss-Seidel method with relaxation (the program should work with any value of n):
# [2 −1 0 0 . . . 0 0 0 1
# −1 2 −1 0 . . . 0 0 0 0
# 0 −1 2 −1 . . . 0 0 0 0
# .  . .  .       . . . .
# .  . .  .       . . . .
# .  . .  .       . . . .
# 0 0 0 0 . . . −1 2 −1 0
# 0 0 0 0 . . . 0 −1 2 −1
# 1 0 0 0 . . . 0 0 −1 2]
# [x1
# x2
# x3
# .
# .
# .
# xn−2
# xn−1
# xn] =
# [0
# 0
# 0
# .
# .
# .
# 0
# 0
# 1]
#Run the program with n = 20
import numpy as np
from gaussSeidel import *
def iterEqs(x,omega):
	n = len(x)
	x[0] = omega*(x[1] - x[n-1])/2.0 + (1.0 - omega)*x[0]
	for i in range(1,n-1):
		x[i] = omega*(x[i-1] + x[i+1])/2.0 + (1.0 - omega)*x[i]
	x[n-1] = omega*(1.0 - x[0] + x[n-2])/2.0 \
		+ (1.0 - omega)*x[n-1]
	return x
n = eval(input("Number of equations ==> "))
x = np.zeros(n)
x,numIter,omega = gaussSeidel(iterEqs,x)
print("\nNumber of iterations =",numIter)
print("\nRelaxation factor =",omega)
print("\nThe solution is:\n",x)
input("\nPress return to exit")
























## example2_18
# Solve Example 2.17 with the conjugate gradient method, also using n = 20
import numpy as np
from conjGrad import *
def Ax(v):
	n = len(v)
	Ax = np.zeros(n)
	Ax[0] = 2.0*v[0] - v[1]+v[n-1]
	Ax[1:n-1] = -v[0:n-2] + 2.0*v[1:n-1] -v[2:n]
	Ax[n-1] = -v[n-2] + 2.0*v[n-1] + v[0]
	return Ax
n = eval(input("Number of equations ==> "))
b = np.zeros(n)
b[n-1] = 1.0
x = np.zeros(n)
x,numIter = conjGrad(Ax,x,b)
print("\nThe solution is:\n",x)
print("\nNumber of iterations =",numIter)
input("\nPress return to exit")