-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathResidualDynamics_DL_validation.py
113 lines (92 loc) · 3.86 KB
/
ResidualDynamics_DL_validation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
# TODO: data augmentation and transfer learning
# TODO: different fancier models, LSTMs to PINNs
# TODO: more learning into-the-loop
# Define the neural network architecture
torch.set_default_dtype(torch.float64)
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(6, 10) # 6 input features, 10 hidden units
self.fc2 = nn.Linear(10, 10) # 10 hidden units, 10 hidden units
self.fc3 = nn.Linear(10, 10) # 10 hidden units, 10 hidden units
self.fc4 = nn.Linear(10, 10) # 10 hidden units, 10 hidden units
self.fc5 = nn.Linear(10, 3) # 10 hidden units, 3 output units
def forward(self, x):
x = torch.relu(self.fc1(x)) # ReLU activation for the first layer
x = torch.relu(self.fc2(x)) # ReLU activation for the second layer
x = torch.relu(self.fc3(x)) # ReLU activation for the third layer
x = torch.relu(self.fc4(x)) # ReLU activation for the fourth layer
x = self.fc5(x) # Final output layer, no activation
return x
# Create an instance of the neural network
model = SimpleNN()
# Define loss function and optimizer
criterion = nn.MSELoss() # Mean Squared Error loss
optimizer = optim.Adam(model.parameters(), lr=0.0001) # Adam optimizer
# Load the filtered state data
data = np.load("Project/filtered_state_EKF_CR3BP.npy")
np.random.shuffle(data.T) # Shuffle each column randomly, before splitting
# Define the sizes for training and validation sets
train_size = int(0.9 * len(data.T))
val_size = len(data.T) - train_size
# Split the data into inputs and targets
train_inputs = torch.tensor(data[:6, :train_size]).t()
train_targets = torch.tensor(data[6:, :train_size]).t()
val_inputs = torch.tensor(data[:6, train_size:]).t()
val_targets = torch.tensor(data[6:, train_size:]).t()
# Lists to store training loss for plotting
train_loss_history = []
# Lists to store prediction errors for verification plot
prediction_errors = []
# Training loop
num_epochs = 300000
for epoch in range(num_epochs):
# Forward pass
model.train()
optimizer.zero_grad() # Clear gradients
outputs = model(train_inputs)
loss = criterion(outputs, train_targets)
# Backward pass and optimization
loss.backward() # Compute gradients
optimizer.step() # Update weights
# Store the loss for plotting
train_loss_history.append(loss.item())
# Calculate prediction error for validation set
model.eval()
with torch.no_grad():
val_outputs = model(val_inputs)
prediction_error = (
100
* torch.abs(val_outputs - val_targets).mean().item()
/ torch.abs(val_targets.mean())
) # Percentage error
prediction_errors.append(prediction_error)
# Logging every 100 epochs
if (epoch + 1) % 100 == 0:
print(
f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.14f}, Prediction Relative Percent Error: {prediction_error:.14f}"
)
# Optionally, save the trained model
torch.save(model.state_dict(), "Project/simple_nn_model.pth")
# Plot the training loss
plt.figure()
# plt.rc("text", usetex=True)
plt.semilogy(train_loss_history, color="blue")
plt.xlabel(r"Training Epoch")
plt.ylabel(r"Loss, Training Dataset")
plt.grid(True, which="both", linestyle="--")
plt.savefig("Project/TrainingLoss.pdf", format="pdf")
plt.show()
# Plot the prediction errors for verification
plt.figure()
plt.plot(prediction_errors, color="red")
plt.xlabel(r"Training Epoch")
plt.ylabel(r"Prediction Relative Percent Error, Validation Dataset")
plt.grid(True, which="both", linestyle="--")
plt.savefig("Project/PredictionError.pdf", format="pdf")
plt.show()