Chapter 2: Matrix Theory and Visualization

In [ ]:
#If using Colab, you can only pull files from your Google Drive
#Run the following code to give Colab access
from google.colab import drive
drive.mount('/content/drive')

#File paths should then look like this:
file_path = '/content/drive/MyDrive/LinAlg/data/file.png'
#As opposed to your computer's directory, which might look like this:
file_path = '/Users/yourname/LinAlg/data/file.png'
In [ ]:
import numpy as np
from numpy.linalg import inv, solve, det
from scipy.linalg import null_space, svd
from numpy.linalg import matrix_rank
from numpy import dot, cross
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from sympy import Matrix, eye, symbols, simplify
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive

Computational Examples of Matrices

In [ ]:
A = np.array([[1, 0, 0],
              [4, 3, 2]])
B = np.array([[0, 1],
              [-1, 2],
              [0, 0]])
C = A @ B  #Matrix multiplication
print("C =\n", C)

print("Transpose of C:\n", C.T) #Transpose of C


A = np.array([[1, -1],
              [1,  2]])
print("A =\n", A)
print("First column of A:", A[:, 0])
print("First row of A:", A[0, :])
A_inv = inv(A) #Inverse of A
print("Inverse of A =\n", A_inv)
print("A * A_inv =\n", A @ A_inv)  #Verify the inverse

#Solve linear equations
A = np.array([[30, 40],
              [1, 1]])
b = np.array([1000, 30])
x = solve(A, b)
print("Solution x =\n", x)
print("Alternative way: inv(A) @ b =\n", inv(A) @ b) #Another way to solve
print("Determinant of A =", det(A)) #Compute the determinant

#Dot Product of 2 vectors
u = np.array([2, 0, 1])
v = np.array([2, -4, 3])
print("Dot product:", np.dot(u, v)) #For nD
print("Cross product:", np.cross(u, v))  #For 3D

print("Rank of A:", matrix_rank(A)) #Rank of a matrix

#Orthogonal Matrices
p = np.sqrt(2) / 2
Q = np.array([[p, -p],
              [p,  p]])
print("Q =\n", Q) #Q is orthogonal
print("Q * Q^T =\n", Q @ Q.T)  #Verify as orthogonal, should be the identity matrix
print("Det(Q) =", det(Q))  #Determinant should be 1 or -1

#Generate a 2-by-4 zero matrix
zeroM = np.zeros((2, 4))
print("Zero matrix 2x4:\n", zeroM)
print("Alternative:\n", np.reshape(np.repeat(0, 8), (2, 4)))

##Generate a 3-by-3 identity matrix
print("3x3 identity matrix:\n", np.identity(3))
print("Same using np.eye:\n", np.eye(3))

##Generate a 3-by-3 diagonal matrix
diagM = np.diag([2.4, 3.1, -0.9])
print("Diagonal matrix:\n", diagM)
C =
 [[ 0  1]
 [-3 10]]
Transpose of C:
 [[ 0 -3]
 [ 1 10]]
A =
 [[ 1 -1]
 [ 1  2]]
First column of A: [1 1]
First row of A: [ 1 -1]
Inverse of A =
 [[ 0.66666667  0.33333333]
 [-0.33333333  0.33333333]]
A * A_inv =
 [[1.00000000e+00 0.00000000e+00]
 [1.11022302e-16 1.00000000e+00]]
Solution x =
 [20. 10.]
Alternative way: inv(A) @ b =
 [20. 10.]
Determinant of A = -9.999999999999998
Dot product: 7
Cross product: [ 4 -4 -8]
Rank of A: 2
Q =
 [[ 0.70710678 -0.70710678]
 [ 0.70710678  0.70710678]]
Q * Q^T =
 [[1.00000000e+00 4.26642159e-17]
 [4.26642159e-17 1.00000000e+00]]
Det(Q) = 1.0
Zero matrix 2x4:
 [[0. 0. 0. 0.]
 [0. 0. 0. 0.]]
Alternative:
 [[0 0 0 0]
 [0 0 0 0]]
3x3 identity matrix:
 [[1. 0. 0.]
 [0. 1. 0.]
 [0. 0. 1.]]
Same using np.eye:
 [[1. 0. 0.]
 [0. 1. 0.]
 [0. 0. 1.]]
Diagonal matrix:
 [[ 2.4  0.   0. ]
 [ 0.   3.1  0. ]
 [ 0.   0.  -0.9]]

Eigenvalues and Eigenvectors

In [ ]:
A = np.array([[1, 2],
              [2, 1]])
eigvals, eigvecs = np.linalg.eig(A)

print("Eigenvalues:\n", eigvals)
print("Eigenvectors:\n", eigvecs)

# Plot eigenvector v vs non-eigenvector u
plt.figure(figsize=(6,6))
plt.title("An eigenvector vs a non-eigenvector", fontsize=14)
plt.xlabel(r'$x_1$', fontsize=14)
plt.ylabel(r'$x_2$', fontsize=14)
plt.xlim(0, 3)
plt.ylim(0, 3)

plt.arrow(0, 0, 1, 0, width=0.03, color='blue', label='u')
plt.arrow(0, 0, 1, 2, linestyle='dashed', width=0.015, color='blue', label='Au')

v = eigvecs[:, 0]
Av = A @ v
plt.arrow(0, 0, *v, width=0.03, color='red', label='v')
plt.arrow(0, 0, *Av, linestyle='dashed', width=0.015, color='red', label='Av')

plt.text(1.4, 0.1, "Non-eigenvector u", fontsize=12, color='blue')
plt.text(1.0, 2.1, "Au", fontsize=12, color='blue')
plt.text(1.5, 0.9, "Eigenvector v", fontsize=12, color='red')
plt.text(2.7, 2.95, "Av", fontsize=12, color='red')

plt.grid(True)
plt.gca().set_aspect('equal')
plt.show()

#Verify diagonalization and decomposition
C = np.array([[2, 1],
              [1, 2]])
eigvals_C, Q = np.linalg.eig(C)
D = np.diag(eigvals_C)  #Matrix diagonalization
Q_T = Q.T

D_check = np.linalg.inv(Q) @ C @ Q
print("Diagonalized matrix D =\n", D_check)

#Decomposition Q * D * Q^(-1)
reconstructed_C = Q @ D @ np.linalg.inv(Q)
print("Reconstructed C =\n", reconstructed_C)

#Decomposition w/ sum of outer products
outer_sum = eigvals_C[0] * np.outer(Q[:,0], Q[:,0]) + eigvals_C[1] * np.outer(Q[:,1], Q[:,1])
print("Spectral decomposition of C =\n", outer_sum)
Eigenvalues:
 [ 3. -1.]
Eigenvectors:
 [[ 0.70710678 -0.70710678]
 [ 0.70710678  0.70710678]]
No description has been provided for this image
Diagonalized matrix D =
 [[3. 0.]
 [0. 1.]]
Reconstructed C =
 [[2. 1.]
 [1. 2.]]
Spectral decomposition of C =
 [[2. 1.]
 [1. 2.]]

Some Types of Matrix and Vector Products

In [ ]:
#Hadamard product of two matrices
A = np.array([[1, 2], [3, 4]])
B = np.array([[2, 4], [6, 8]])
hadamard = A * B  # Element-wise multiplication
print(hadamard)

#Jordan product of A and B
A = np.array([[1, 2], [3, 4]])
B = np.array([[2, 1], [2, 1]])
jordan = (A @ B + B @ A) / 2
print(jordan)

#Commutator of A and B
commutator = A @ B - B @ A
print(commutator)

#Trace
trace_comm = np.trace(commutator)
print(trace_comm)

#Cross and dot product
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])

cross_product = np.cross(x, y)
dot_product = np.dot(x, y)

print(cross_product)
print(dot_product)

#Outer product of two vectors
a = np.array([1, 2])
b = np.array([1, 2, 3, 4])
outer = np.outer(a, b) #Outer product a_2-by-1 times t(b)_1-by-4
print(outer)

#Outer product of matrices
A = np.arange(1, 5).reshape(2, 2)
B = np.arange(1, 7).reshape(2, 3)
outer_AB = np.einsum('ij,kl->ijkl', A, B)
print(outer_AB.shape)

#Kronecker product
A = np.diag([1, 2])
B = np.array([[1, 2], [3, 4]])
kron = np.kron(A, B)
print(kron)

# Kronecker product with vectors
ones = np.ones((2,))
y = np.array([1, 2, 3, 4])
kron_vec = np.kron(ones, y.reshape(1, -1)) #2-by-4 matrix
print(kron_vec)

#Cholesky decomposition
dat = np.arange(1, 5).reshape(2, 2)
A = dat @ dat.T
cholesky = np.linalg.cholesky(A)
print(cholesky)

#Direct sum of two matrices
A = np.arange(1, 5).reshape(2, 2)
B = np.arange(1, 7).reshape(2, 3)
A1 = np.vstack([A, np.zeros((2, 2))])
B1 = np.vstack([np.zeros((2, 3)), B])
direct_sum = np.hstack([A1, B1])
print(direct_sum)

#Express the direct sum by Kronecker products
I2 = np.eye(2)
Z1 = np.zeros((2, 2))
Z2 = np.zeros((2, 3))
#direct_sum_kron = np.kron(np.array([[1, 0], [0, 0]]), A) + np.kron(np.array([[0, 0], [0, 1]]), B)
#print(direct_sum_kron)
[[ 2  8]
 [18 32]]
[[5.5 5.5]
 [9.5 7.5]]
[[ 1 -5]
 [ 9 -1]]
0
[-3  6 -3]
32
[[1 2 3 4]
 [2 4 6 8]]
(2, 2, 2, 3)
[[1 2 0 0]
 [3 4 0 0]
 [0 0 2 4]
 [0 0 6 8]]
[[1. 2. 3. 4. 1. 2. 3. 4.]]
[[2.23606798 0.        ]
 [4.91934955 0.89442719]]
[[1. 2. 0. 0. 0.]
 [3. 4. 0. 0. 0.]
 [0. 0. 1. 2. 3.]
 [0. 0. 4. 5. 6.]]

SVD Decompoisitions and Reconstructions

In [ ]:
A = np.array([[-1, 1, 0], [2, -2, 3]])
print("Matrix A:\n", A)

#SVD example for a 2-by-3 matrix
U, d, Vt = np.linalg.svd(A, full_matrices=False)
print(np.round(d, 2))
print(np.round(U, 2))
print(np.round(Vt, 2))

eigvals = np.linalg.eigvalsh(A @ A.T)
print(np.sqrt(np.round(eigvals, 10)))

# Data reconstruction by singular vectors
recon1 = d[0] * np.outer(U[:, 0], Vt[0, :])
print(np.round(recon1, 1))
recon2 = d[0] * np.outer(U[:, 0], Vt[0, :]) + d[1] * np.outer(U[:, 1], Vt[1, :])
print(np.round(recon2, 2))

#Schematic diagram of SVD
fig, ax = plt.subplots(figsize=(15, 10))
ax.axis('off')
def draw_matrix_box(ax, x, y, w, h, color='black', label='', lcolor='black', dash=False):
    rect_style = 'dashed' if dash else 'solid'
    rect = plt.Rectangle((x, y), w, h, linewidth=3, edgecolor=color, facecolor='none', linestyle=rect_style)
    ax.add_patch(rect)
    if label:
        ax.text(x + w/2, y + h + 0.3, label, ha='center', va='bottom', fontsize=12, color=lcolor)
def draw_label(ax, x, y, text, **kwargs):
    ax.text(x, y, text, **kwargs)
draw_label(ax, 13, 15.5, r"SVD: $A = UDV^T$ when $n > m$ or $n < m$", ha='center', fontsize=20)
draw_matrix_box(ax, 0, 7, 3, 6, color='blue', label='A[n×m]', lcolor='black')
draw_label(ax, -0.8, 10, 'n', rotation=90, color='blue')
draw_label(ax, 1.5, 13, 'm', color='red')
draw_label(ax, 2.0, 10, '...')
draw_label(ax, 5, 10, '=', fontsize=18)
draw_matrix_box(ax, 7, 7, 3, 6, color='blue', label='U[n×m]', lcolor='blue')
draw_label(ax, 6.2, 10, 'n', rotation=90, color='blue')
draw_label(ax, 8.5, 13, 'm', color='red')
draw_label(ax, 9, 10, '...', color='blue')
draw_matrix_box(ax, 12, 10, 3, 3, color='brown', label='D[m×m]', lcolor='brown')
draw_label(ax, 11.2, 11.5, 'm', rotation=90, color='red')
draw_label(ax, 13.5, 13.8, 'm', color='red')
draw_label(ax, 14.1, 12.3, '0', color='brown')
draw_label(ax, 12.9, 11, '0', color='brown')
draw_matrix_box(ax, 17, 10, 3, 3, color='red', label='(Vᵗ)[m×m]', lcolor='red')
draw_label(ax, 16.2, 11.5, 'm', rotation=90, color='red')
draw_label(ax, 18.5, 13.5, 'm', color='red')
draw_label(ax, 18.5, 11, '...', color='red', rotation=90)
draw_matrix_box(ax, 0, 0, 6, 3, color='blue', label='A[n×m]', lcolor='black')
draw_label(ax, -0.8, 1.5, 'n', rotation=90, color='blue')
draw_label(ax, 3, 3.8, 'm', color='red')
draw_label(ax, 3.5, 1.5, '...')
draw_label(ax, 8, 1.5, '=', fontsize=18)
draw_matrix_box(ax, 11, 0, 3, 3, color='blue', label='U[n×n]', lcolor='blue')
draw_label(ax, 10.2, 1.5, 'n', rotation=90, color='blue')
draw_label(ax, 12.5, 3.8, 'n', color='blue')
draw_label(ax, 13.2, 1.5, '...', color='blue')
draw_matrix_box(ax, 16, 0, 3, 3, color='brown', label='D[n×n]', lcolor='brown')
draw_label(ax, 15.2, 1.5, 'n', rotation=90, color='blue')
draw_label(ax, 17.5, 3.8, 'n', color='blue')
draw_label(ax, 18.1, 2.3, '0', color='brown')
draw_label(ax, 16.9, 1.0, '0', color='brown')
draw_matrix_box(ax, 21, 0, 6, 3, color='red', label='(Vᵗ)[n×m]', lcolor='red')
draw_label(ax, 20.2, 1.5, 'n', rotation=90, color='blue')
draw_label(ax, 24, 3.8, 'm', color='red')
draw_label(ax, 24, -1.5, r'(Vᵗ)[n×m]', color='red')
draw_label(ax, 24, 1, '...', color='red', rotation=90)
plt.xlim(-2, 30)
plt.ylim(-3, 17)
plt.show()
Matrix A:
 [[-1  1  0]
 [ 2 -2  3]]
[4.24 1.  ]
[[-0.24  0.97]
 [ 0.97  0.24]]
[[ 0.51 -0.51  0.69]
 [-0.49  0.49  0.73]]
[1.         4.24264069]
[[-0.5  0.5 -0.7]
 [ 2.1 -2.1  2.8]]
[[-1.  1.  0.]
 [ 2. -2.  3.]]
No description has been provided for this image

SVD Example: Darwin/Tahiti Data

In [ ]:
#SVD analysis for the weighted SOI from SLP data
pda = pd.read_csv("/content/drive/MyDrive/LinAlg/data/PSTANDdarwin.txt", delim_whitespace=True, header=None)
pta = pd.read_csv("/content/drive/MyDrive/LinAlg/data/PSTANDtahiti.txt", delim_whitespace=True, header=None)

#December anomalies: 13th column, index 12 in Python
pdaDec = pda.iloc[:, 12]
ptaDec = pta.iloc[:, 12]

#Space-time data format
ptada1 = np.column_stack((pdaDec, ptaDec))

#2009-2015
ptada = ptada1[58:65+1].T  #58:65 in Python (inclusive)
years = np.arange(2009, 2016)
stations = ["Darwin", "Tahiti"]

ptada_df = pd.DataFrame(ptada, index=stations, columns=years)
print(ptada_df)
print(ptada_df.shape)

#SVD
U, d, Vt = np.linalg.svd(ptada, full_matrices=False)
D = np.diag(d)

print(np.round(U, 2))
print(np.round(D, 2))
print(np.round(Vt, 2))

#1951-2015
ptada = ptada1.T
years_full = np.arange(1951, 2016)
ptada_df_full = pd.DataFrame(ptada, index=stations, columns=years_full)

print(ptada_df_full.iloc[:, :6])
print("Shape:", ptada_df_full.shape)

#SVD
U, d, Vt = np.linalg.svd(ptada, full_matrices=False)
D = np.diag(d)
V = Vt.T

print("U:\n", np.round(U, 2))
print("D:\n", np.round(D, 2))
print("V (first 5 years):\n", np.round(V[:5], 2))

#Plot PC1 and PC2
plt.figure(figsize=(10, 5))
plt.plot(years_full, -V[:, 0], 'o-', color='black', label='PC1')
plt.plot(years_full, -V[:, 1], linestyle='--', color='purple', label='PC2')
plt.title("Tahiti-Darwin SLP Principal Components")
plt.xlabel("Year")
plt.ylabel("PC")
plt.ylim(-0.45, 0.45)
plt.legend()

#El Niño samples
plt.plot([1982, 1997], [-V[31, 0], -V[46, 0]], 'ro')
plt.text(1982, -V[31, 0] + 0.07, "EN", color='red')
plt.text(1997, -V[46, 0] + 0.07, "EN", color='red')

#La Niña samples
plt.plot([1975, 2010], [-V[24, 0], -V[59, 0]], 'bo')
plt.text(1975, -V[24, 0] - 0.07, "LN", color='blue')
plt.text(2010, -V[59, 0] - 0.07, "EN", color='blue')

plt.show()

#Custom PC index
y = -3 * V[:, 0] + 0.3 * np.cumsum(V[:, 0]) - 0.3 * np.cumsum(V[:, 1])

plt.figure(figsize=(10, 5))
plt.plot(years_full, y, 'o-', color='black', label='Custom PC')
plt.plot(years_full, -V[:, 1], linestyle='--', color='purple', label='PC2')
plt.title("Tahiti-Darwin SLP Principal Components")
plt.xlabel("Year")
plt.ylabel("PC")
plt.ylim(-1, 1)
plt.legend()

#El Niño and La Niña
plt.plot([1982, 1997], [-V[31, 0], -V[46, 0]], 'ro')
plt.text(1982, -V[31, 0] + 0.07, "EN", color='red')
plt.text(1997, -V[46, 0] + 0.07, "EN", color='red')

plt.plot([1975, 2010], [-V[24, 0], -V[59, 0]], 'bo')
plt.text(1975, -V[24, 0] - 0.07, "LN", color='blue')
plt.text(2010, -V[59, 0] - 0.07, "EN", color='blue')

plt.show()

#Cumulative sums of PCs
plt.figure(figsize=(10, 5))
plt.plot(years_full, np.cumsum(V[:, 0]), label='cumsum PC1')
plt.plot(years_full, np.cumsum(V[:, 1]), label='cumsum PC2', color='red')
plt.plot(years_full, -np.cumsum(V[:, 0]) + np.cumsum(V[:, 1]), label='-PC1 + PC2', color='blue')
plt.ylim(-1, 1)
plt.ylabel("Cumsum index")
plt.legend()
plt.show()

#For a better computer display
PC_df = pd.DataFrame({'Year': years_full, 'PC1': V[:, 0], 'PC2': V[:, 1]})

fig = go.Figure()
fig.add_trace(go.Scatter(x=PC_df['Year'], y=-PC_df['PC1'], mode='lines+markers', name='PC1'))
fig.update_layout(title='PC1 Interactive Plot', yaxis_title='PC1', xaxis_title='Year')
fig.show()
<ipython-input-11-61b55928095e>:2: FutureWarning: The 'delim_whitespace' keyword in pd.read_csv is deprecated and will be removed in a future version. Use ``sep='\s+'`` instead
  pda = pd.read_csv("/content/drive/MyDrive/LinAlg/data/PSTANDdarwin.txt", delim_whitespace=True, header=None)
<ipython-input-11-61b55928095e>:3: FutureWarning: The 'delim_whitespace' keyword in pd.read_csv is deprecated and will be removed in a future version. Use ``sep='\s+'`` instead
  pta = pd.read_csv("/content/drive/MyDrive/LinAlg/data/PSTANDtahiti.txt", delim_whitespace=True, header=None)
        2009  2010  2011  2012  2013  2014  2015
Darwin   0.5  -2.3  -2.2   0.3   0.3   0.1  -0.4
Tahiti  -0.7   2.5   1.9  -0.7   0.4  -0.8  -1.3
(2, 7)
[[-0.66  0.75]
 [ 0.75  0.66]]
[[4.7  0.  ]
 [0.   1.42]]
[[-0.18  0.72  0.61 -0.15  0.02 -0.14 -0.15]
 [-0.06 -0.06 -0.28 -0.17  0.34 -0.32 -0.82]]
        1951  1952  1953  1954  1955  1956
Darwin   1.4   0.9   0.6  -0.8   0.2  -1.6
Tahiti   0.1  -1.1  -0.1   1.5   1.8   0.1
Shape: (2, 65)
U:
 [[-0.75  0.66]
 [ 0.66  0.75]]
D:
 [[10.02  0.  ]
 [ 0.    7.09]]
V (first 5 years):
 [[-0.1   0.14]
 [-0.14 -0.03]
 [-0.05  0.05]
 [ 0.16  0.08]
 [ 0.1   0.21]]
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image

Minors and Co-factor

In [ ]:
def minors(matrix):
    n = matrix.shape[0]
    minor_matrix = np.empty((n, n))
    for i in range(n):
        for j in range(n):
            submatrix = np.delete(np.delete(matrix, i, axis=0), j, axis=1)
            minor_matrix[i, j] = np.linalg.det(submatrix)
    return minor_matrix

b = np.array([
    [2, 3, 5],
    [6, 7, 1],
    [9, 4, 5]
])
print(minors(b))

def cofactors(matrix):
    sign_matrix = np.fromfunction(lambda i, j: (-1)**(i + j), matrix.shape)
    return sign_matrix * minors(matrix)

print(cofactors(b))

#Adjoint matrix, aka, adjugate of a square matrix
A = np.array([
    [1, 4, 5],
    [3, 7, 2],
    [2, 8, 3]
])
print(A)

adjoint_A = cofactors(A).T
print(adjoint_A)

#adjoint = det(A) * inverse(A)
det_A = np.linalg.det(A)
inv_A = np.linalg.inv(A)
C = det_A * inv_A

print(C)
print(C - adjoint_A)
[[ 31.  21. -39.]
 [ -5. -35. -19.]
 [-32. -28.  -4.]]
[[ 31. -21. -39.]
 [  5. -35.  19.]
 [-32.  28.  -4.]]
[[1 4 5]
 [3 7 2]
 [2 8 3]]
[[  5.  28. -27.]
 [ -5.  -7.  13.]
 [ 10.  -0.  -5.]]
[[  5.  28. -27.]
 [ -5.  -7.  13.]
 [ 10.   0.  -5.]]
[[ 8.88178420e-16  3.55271368e-15 -3.55271368e-15]
 [ 8.88178420e-16  8.88178420e-16  1.77635684e-15]
 [ 0.00000000e+00  0.00000000e+00  0.00000000e+00]]

Characteristic polynomials, Cayley-Hamilton theorem, Jordon forms

In [ ]:
A = np.array([[1, 1], [0, 2]])
print(A)

det_A = np.linalg.det(A)
print(det_A)

x = symbols('x')
A_sym = Matrix(A)
cpA = A_sym.charpoly(x)
print(cpA) #1 x^2 - 3 x + 2

A_inv = np.linalg.inv(A)
print(A_inv)

print(np.dot(A_inv, A))

#Cayley-Hamilton theorem
#cp(x) = a2 x^2 + a1 x + a0
#cp(A) = a2 A^2 + a1 A + a0 I = 0
a2, a1, a0 = cpA.all_coeffs()
a2, a1, a0 = float(a2), float(a1), float(a0)

lhs = a2 * np.linalg.matrix_power(A, 2) + a1 * A + a0 * np.identity(2)
print(lhs)

#Jordan normal form
A2 = np.array([[2, 1], [0, 2]])
A3 = np.array([[2, 2], [0, 2]])
J2, P2 = Matrix(A2).jordan_form()
print(J2)
J3, P3 = Matrix(A3).jordan_form()
print(J3)

#Custom Jordan block matrices
def jordan_block(val, size):
    J = np.zeros((size, size))
    np.fill_diagonal(J, val)
    for i in range(size - 1):
        J[i, i+1] = 1
    return J

#Create Jordan blocks with repeated eigenvalues
J1 = jordan_block(4, 2)
print("Jordan block (λ=4, size=2):\n", J1)
print("Eigenvalues:", np.linalg.eigvals(J1))

J2 = jordan_block(5, 3)
print("Jordan block (λ=5, size=3):\n", J2)
print("Eigenvalues:", np.linalg.eigvals(J2))

J3 = jordan_block(6, 4)
print("Jordan block (λ=6, size=4):\n", J3)
print("Eigenvalues:", np.linalg.eigvals(J3))

#Jordan matrix with multiple blocks
from scipy.linalg import block_diag

J_multi = block_diag(jordan_block(4, 2), jordan_block(5, 3), jordan_block(6, 1))
print("Composite Jordan matrix:\n", J_multi)

#Cayley-Hamilton verification for a Jordan block
A = jordan_block(4, 3)
A_sym = Matrix(A)
cpA = A_sym.charpoly(x)
coeffs = [float(c) for c in cpA.all_coeffs()]
lhs = sum(coeffs[i] * np.linalg.matrix_power(A, len(coeffs) - i - 1) for i in range(len(coeffs)))
print("Cayley-Hamilton on Jordan block:\n", lhs)

#Example generalized eigenvectors and Jordan chains
X = np.array([
    [1, 1, 0, 0],
    [1, 1, 0, 0],
    [1, 0, 1, 0],
    [1, 0, 0, 1]
]).T

J = np.array([
    [1, 0, 0, 0],
    [1, 1, 0, 0],
    [0, 0, 1, 0],
    [0, 0, 0, 1]
]).T

#Recover original matrix from Jordan decomposition
#A_reconstructed = X @ J @ np.linalg.inv(X)
#print(A_reconstructed)

#Confirm if reconstruction is accurate
#print("Matrix match:", np.allclose(A_reconstructed, X @ J @ np.linalg.inv(X)))
[[1 1]
 [0 2]]
2.0
PurePoly(x**2 - 3*x + 2, x, domain='ZZ')
[[ 1.  -0.5]
 [ 0.   0.5]]
[[1. 0.]
 [0. 1.]]
[[0. 0.]
 [0. 0.]]
Matrix([[1, 0], [0, 1]])
Matrix([[2, 0], [0, 1]])
Jordan block (λ=4, size=2):
 [[4. 1.]
 [0. 4.]]
Eigenvalues: [4. 4.]
Jordan block (λ=5, size=3):
 [[5. 1. 0.]
 [0. 5. 1.]
 [0. 0. 5.]]
Eigenvalues: [5. 5. 5.]
Jordan block (λ=6, size=4):
 [[6. 1. 0. 0.]
 [0. 6. 1. 0.]
 [0. 0. 6. 1.]
 [0. 0. 0. 6.]]
Eigenvalues: [6. 6. 6. 6.]
Composite Jordan matrix:
 [[4. 1. 0. 0. 0. 0.]
 [0. 4. 0. 0. 0. 0.]
 [0. 0. 5. 1. 0. 0.]
 [0. 0. 0. 5. 1. 0.]
 [0. 0. 0. 0. 5. 0.]
 [0. 0. 0. 0. 0. 6.]]
Cayley-Hamilton on Jordan block:
 [[0. 0. 0.]
 [0. 0. 0.]
 [0. 0. 0.]]