#Used for Analysis import pandas as pd pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) import numpy as np import missingno print('Pandas') #Like jupyter notebook from IPython.display import display print('Ipython') #Use for ploting import matplotlib.pyplot as plt #matplotlib inline import seaborn as sns import plotly.express as px print('Matplotlib') #Model Junk import torch from torch import nn print('PyFlashight') from peewee import * print('All Imported') import sqlite3 #print('All Imported') db = sqlite3.connect('../Greg/leo_sats.sqlite') cursor = db.cursor() odds = 'select apoapsis, arg_of_pericenter, bstar, eccentricity, inclination, mean_anomaly, mean_motion, mean_motion_ddot, mean_motion_dot, periapsis, ra_of_asc_node, rev_at_epoch, semimajor_axis from orbit where id % 2 != 0' evens = 'select apoapsis, arg_of_pericenter, bstar, eccentricity, inclination, mean_anomaly, mean_motion, mean_motion_ddot, mean_motion_dot, periapsis, ra_of_asc_node, rev_at_epoch, semimajor_axis from orbit where id % 2 == 0' cursor.execute(odds) input_seq = cursor.fetchall() cursor.close() cursor = db.cursor() cursor.execute(evens) target_seq = cursor.fetchall() cursor.close() #print(target_seq) #df = pd.read_csv('../AGENA-TARGET_1966-065A.csv') #practice_df1 = df[['APOAPSIS', 'ARG_OF_PERICENTER', 'BSTAR', 'ECCENTRICITY','INCLINATION', 'MEAN_ANOMALY', 'MEAN_MOTION', 'MEAN_MOTION_DDOT', 'MEAN_MOTION_DOT', 'PERIAPSIS', 'RA_OF_ASC_NODE', 'REV_AT_EPOCH', 'SEMIMAJOR_AXIS']] #print(practice_df1) # #input_seq = practice_df1.iloc[::2] #target_seq = practice_df1.iloc[1::2] # #print(target_seq) input_seq = np.asarray(input_seq, dtype=np.float32) target_seq = np.asarray(target_seq, dtype=np.float32) input_seq.astype(float) target_seq.astype(float) input_seq = torch.from_numpy(input_seq) target_seq = torch.from_numpy(target_seq) #input_seq = torch.from_numpy(input_seq) #target_seq = torch.Tensor(target_seq is_cuda = torch.cuda.is_available() #dict_size = len(practice_df.iloc[0]) # If we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code. if is_cuda: device = torch.device("cuda") print("GPU is available") else: device = torch.device("cpu") print("GPU not available, CPU used") class Model(nn.Module): def __init__(self, input_size, output_size, hidden_dim, n_layers): super(Model, self).__init__() # Defining some parameters self.hidden_dim = hidden_dim self.n_layers = n_layers #Defining the layers # RNN Layer self.rnn = nn.RNN(input_size, hidden_dim, n_layers, batch_first=True) # Fully connected layer self.fc = nn.Linear(hidden_dim, output_size) def forward(self, x): batch_size = x.size(0) #Initializing hidden state for first input using method defined below hidden = self.init_hidden(batch_size) # Passing in the input and hidden state into the model and obtaining outputs out, hidden = self.rnn(x) # Reshaping the outputs such that it can be fit into the fully connected layer out = out.contiguous().view(-1, self.hidden_dim) out = self.fc(out) return out, hidden def init_hidden(self, batch_size): # This method generates the first hidden state of zeros which we'll use in the forward pass hidden = torch.zeros(self.n_layers, batch_size, self.hidden_dim).to(device) # We'll send the tensor holding the hidden state to the device we specified earlier as well return hidden # Instantiate the model with hyperparameters dict_size = 13 model = Model(input_size=dict_size, output_size=dict_size, hidden_dim=12, n_layers=1) # We'll also set the model to the device that we defined earlier (default is CPU) model = model.to(device) # Define hyperparameters n_epochs = 100 lr=0.01 # Define Loss, Optimizer criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=lr) input_seq = input_seq.to(device) for epoch in range(1, n_epochs + 1): optimizer.zero_grad() # Clears existing gradients from previous epoch input_seq = input_seq.to(device) print(input_seq.dim()) output, hidden = model(input_seq) output = output.to(device) target_seq = target_seq.to(device) loss = criterion(output, target_seq) loss.backward() # Does backpropagation and calculates gradients optimizer.step() # Updates the weights accordingly if epoch%10 == 0: print('Epoch: {}/{}.............'.format(epoch, n_epochs), end=' ') print("Loss: {:.4f}".format(loss.item())) output = output.detach().numpy() target = target_seq.numpy() sub = np.subtract(output, target) #print(sub) sub[sub == 0] = 1 target[target == 0] = 1 div = np.divide(sub, target) percent = np.mean(div) print(percent) #abs1 = torch.abs(output) #abs2 = torch.abs(target_seq) # #sub = torch.sub(abs1, abs2) #print(sub) #div = torch.div(sub, abs2) #print(div) #precent = torch.nanmean(div) #print(precent) #yes = torch.eq(output, target_seq) #print(yes)