This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Credits - https://github.com/hunkim/PyTorchZeroToAll/blob/master/05_linear_regression.py | |
import torch | |
from torch.autograd import Variable | |
x_data = Variable(torch.Tensor([[1.0],[2.0],[3.0]])) | |
y_data = Variable(torch.Tensor(([2.0],[4.0],[6.0]))) | |
class Model(torch.nn.Module): | |
def __init__(self): | |
#Constructor | |
super(Model,self).__init__() | |
self.linear = torch.nn.Linear(1,1) #One in and One out | |
def forward(self,x): | |
#Variable of input data | |
#Variable of output data | |
y_pred = self.linear(x) | |
return y_pred | |
#Initialize model | |
model = Model() | |
#Loss Function and Optimizer | |
criterion = torch.nn.MSELoss(size_average=False) | |
optimizer = torch.optim.SGD(model.parameters(),lr=0.01) | |
total_loss = 0 | |
#Training Loop | |
for epoch in range(500): | |
y_pred = model(x_data) | |
#compute and print loss | |
loss = criterion(y_pred,y_data) | |
print(epoch,loss.item()) | |
#Zero gradients | |
optimizer.zero_grad() | |
loss.backward() | |
optimizer.step() | |
#After training | |
hour_var = Variable(torch.Tensor([[4.0]])) | |
y_pred = model(hour_var) | |
print('Predict after training',4,model(hour_var).data[0][0]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Credits - https://www.youtube.com/watch?v=wbJJudn-Xn0&list=PLX5lD3sNR32CELTjbVRNMUCUakEO1Lu0H&index=2 | |
#https://github.com/hunkim/PyTorchZeroToAll/blob/master/10_1_cnn_mnist.py | |
import torch | |
import torch.nn as nn | |
import torchvision.transforms as transforms | |
import torchvision.datasets as datasets | |
from torch.autograd import Variable | |
import torch.optim as optim | |
#Load our dataset | |
train_dataset = datasets.MNIST(root=r'C:\Intel\Data',train=True,transform=transforms.ToTensor(),download=True) | |
test_dataset = datasets.MNIST(root=r'C:\Intel\Data',train=False,transform=transforms.ToTensor(),download=True) | |
batch_size=100 | |
epochs = 10 | |
#Dataset Iterable | |
train_load = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = batch_size, shuffle = True) | |
test_load = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = batch_size, shuffle = False) | |
print(len(train_dataset)) | |
print(len(test_dataset)) | |
print(len(train_load)) | |
print(len(test_load)) | |
#model class | |
class NET(nn.Module): | |
def __init__(self): | |
super(NET,self).__init__() | |
#First Layer | |
#Grey - One Channel | |
#Same Padding Input Size = Output Size | |
#Same Padding = (Filter-1)/2 | |
self.cnn1 = nn.Conv2d(in_channels=1,out_channels=8,kernel_size=3,stride=1,padding=1) | |
self.batchnorm1 = nn.BatchNorm2d(8) | |
#Relu | |
self.relu = nn.ReLU() | |
self.maxpool1 = nn.MaxPool2d(kernel_size=2) | |
#After max pool feature map 28/2 = 14 | |
self.cnn2 = nn.Conv2d(in_channels=8,out_channels=32,kernel_size=5,stride=1,padding=2) | |
#Output remains 14 | |
self.batchnorm2 = nn.BatchNorm2d(32) | |
self.maxpool2 = nn.MaxPool2d(kernel_size=2) | |
#Feature map = 14/2 = 7 | |
#32*7*7 = 1568 | |
#(Input + output) / 2 | |
# Arbitrary to choose | |
self.fc1 = nn.Linear(in_features=1568,out_features=600) | |
# Randomly Disables some Neurons | |
# Probability of Drop out 0.5 | |
self.dropout = nn.Dropout(p=0.5) | |
self.fc2 = nn.Linear(in_features=600,out_features=10) | |
def forward(self,x): | |
out = self.cnn1(x) | |
out = self.batchnorm1(out) | |
out = self.relu(out) | |
out = self.maxpool1(out) | |
out = self.cnn2(out) | |
out = self.batchnorm2(out) | |
out = self.relu(out) | |
out = self.maxpool2(out) | |
# (batch size, 1568) | |
# 100 x 1568 | |
out = out.view(-1,1568) | |
out = self.fc1(out) | |
out = self.relu(out) | |
out = self.dropout(out) | |
out = self.fc2(out) | |
return out | |
model = NET() | |
print(model) | |
loss_fn = nn.CrossEntropyLoss() | |
#Optimizers | |
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) | |
iteration = 0 | |
correct_nodata = 0 | |
correct_data = 0 | |
#Run for One iteration and check | |
for i,(inputs,labels) in enumerate(train_load): | |
if iteration==1: | |
break | |
inputs = Variable(inputs) | |
labels = Variable(labels) | |
print("for one iteration, this is what happens:") | |
print('Input Shape:',inputs.shape) | |
print('Labels Shape:',labels.shape) | |
output = model(inputs) | |
print('Outputs Shape:',output.shape) | |
_, predicted_nodata = torch.max(output,1) | |
print('predicted shape',predicted_nodata.shape) | |
print('predicted tensor', predicted_nodata) | |
correct_nodata += (predicted_nodata==labels).sum() | |
print('correct predictions',correct_nodata) | |
_, predicted_data = torch.max(output,1) | |
print('predicted shape',predicted_data.shape) | |
correct_data += (predicted_data==labels).sum() | |
print('predicted tensor', predicted_data) | |
print('correct predictions',correct_data) | |
iteration+=1 | |
iter = 0 | |
for epoch in range(epochs): | |
for i,(images,labels) in enumerate(train_load): | |
iter +=1 | |
images = Variable(images) | |
labels = Variable(labels) | |
optimizer.zero_grad() | |
outputs = model(images) | |
loss = loss_fn(outputs,labels) | |
loss.backward() | |
optimizer.step() | |
#Test the model every 100 iterations | |
if (i+1)%100 ==0: | |
correct = 0 | |
total = 0 | |
for images, labels in test_load: | |
images = Variable(images) | |
output = model(images) | |
_,predicted = torch.max(outputs.data,1) | |
total += labels.size(0) | |
correct += (predicted==labels).sum() | |
print('total',total) | |
print('correct',correct) | |
accuracy = float(100.*(float(correct/total))) | |
#print('iteration:{}, train loss: {}, test accuracy: {}%'.format(iter,loss.data[0],accuracy)) | |
print('Iteration') | |
print(iter) | |
print('Loss') | |
print(loss.item()) | |
print('Accuracy') | |
print(accuracy) | |
print('Done!') |
No comments:
Post a Comment