728x90
# import library
import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torch.nn.init
import sys
import cv2

import matplotlib.pyplot as plt

import numpy as np

def make_output(x):
if not (type(x) is list):
return x.cpu().data.numpy()
else:
return [make_output(i) for i in x]

# using gpu'

device='cuda' if torch.cuda.is_available() else 'cpu'

#define parameters

batch_size=128
learning_rate=0.1

print(torch.__version__)

#data


#loading data
transforms_train=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

transforms_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])


CIFAR10_train = dsets.CIFAR10(root='CIFAR10_data/',
train=True,
transform=transforms_train,
download=True)

CIFAR10_test = dsets.CIFAR10(root='CIFAR10_data/',
train=False,
transform=transforms_test,
download=True)

# dataset loader
train_loader = torch.utils.data.DataLoader(dataset=CIFAR10_train,
batch_size=batch_size,
shuffle=True,
drop_last=True)

test_loader = torch.utils.data.DataLoader(dataset=CIFAR10_test,
batch_size=batch_size,
shuffle=False,
drop_last=True
)
#
# for X,Y in train_loader:
# print(X[0].shape)
# print(X[0].mean(axis=(0,1,2)))
# print(X[0].std(axis=(0,1,2)))
# break
#
# sys.exit()
# define model

class Resnet(torch.nn.Module):

def __init__(self):
super(Resnet, self).__init__()
self.relu=torch.nn.ReLU(inplace=True)

self.layer_3x16_i=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=16, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16)
)
self.layer_3x16=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=16, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=16, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16)
)
self.layer_3x32_i = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=16, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=32, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32)
)
self.layer_3x32 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=32, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32)
)

self.layer_3x64_i=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,stride=2),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2),
torch.nn.BatchNorm2d(64)
)
# 3x3_64 layer
self.layer_3x64=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=64,out_channels=64,padding=1, kernel_size=3),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64,out_channels=64,padding=1, kernel_size=3),
torch.nn.BatchNorm2d(64)
)

#3x3_128 layer for initial
self.layer_3x128_i=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=64,out_channels=128,padding=1,kernel_size=3,stride=2),
torch.nn.Conv2d(in_channels=128,out_channels=128,padding=1,kernel_size=3)
)

#3x3_128 layer
self.layer_3x128 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=128, padding=1,kernel_size=3),
torch.nn.Conv2d(in_channels=128, out_channels=128, padding=1,kernel_size=3)
)

#3x3 256 layer for initial
self.layer_3x256_i = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=256, padding=1, kernel_size=3, stride=2),
torch.nn.Conv2d(in_channels=256, out_channels=256, padding=1, kernel_size=3)
)

#3x3 256 layer
self.layer_3x256 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=256, out_channels=256, padding=1, kernel_size=3),
torch.nn.Conv2d(in_channels=256, out_channels=256, padding=1, kernel_size=3)
)

#3x3 512 layer for initial
self.layer_3x512_i = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=256, out_channels=512, padding=1, kernel_size=3, stride=2),
torch.nn.Conv2d(in_channels=512, out_channels=512, padding=1, kernel_size=3)
)

#3x3 512 layer
self.layer_3x512 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=512, out_channels=512, padding=1, kernel_size=3),
torch.nn.Conv2d(in_channels=512, out_channels=512, padding=1, kernel_size=3)
)

# avg pooling layer _ needed check for the wanted result

self.fc = torch.nn.Linear(7 * 7 * 64, 10, bias=True)
torch.nn.init.kaiming_uniform(self.fc.weight)

def forward(self, x):
out=self.layer_3x16_i(x)
out = self.relu(out)
temp=torch.Tensor()
# print('the size after layer 3x16 for initial : {}'.format(out.shape))
for i in range(3):
temp=out.clone()
out=self.layer_3x16(out)
out=out+temp
out = self.relu(out)
# print('the size after layer 3x16: {}'.format(out.shape))
out=self.layer_3x32_i(out)
out = self.relu(out)
# print('the size after layer 3x32 for initial: {}'.format(out.shape))
for i in range(3):
temp = out.clone()
out = self.layer_3x32(out)
out = out + temp
out = self.relu(out)
# print('the size after layer 3x32: {}'.format(out.shape))
out=self.layer_3x64_i(out)
out = self.relu(out)
# print('the size after layer 3x64 for initial: {}'.format(out.shape))
for i in range(3):
temp = out.clone()
out=self.layer_3x64(out)
out = out + temp
out = self.relu(out)
# print('the size after layer 3x64: {}'.format(out.shape))
out=out.view(batch_size,-1)
out=self.fc(out)
return out

#instantiate model
model=Resnet().to(device)


#Test the model
# data=torch.rand(1,3,32,32)
# x=data.clone()
#
# data=data.to(device)
# result=model(data)
# print(result.shape)


# define cost/loss & optimizer
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate ,weight_decay=0.0001,momentum=0.9)
# lr_sche = optimizer.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
#train model

total_batch=len(train_loader)

#EPOCH 64000
for epoch in range(500):
avg_cost=0

for X,Y in train_loader:
X=X.to(device)
Y=Y.to(device)

optimizer.zero_grad()
hypothesis = model(X)
cost = criterion(hypothesis, Y)
cost.backward()
optimizer.step()

avg_cost += cost / total_batch

class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog',
'frog', 'horse', 'ship', 'truck')

with torch.no_grad():
for data in test_loader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
# 1st output is max value, 2nd output is indice of max value
_, predicted = torch.max(outputs, 1) # Find the index of max value of outputs tensor for dimension 1
c = (predicted == labels).squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1


if (epoch+1)==200:
learning_rate=learning_rate/10
print('Current learning rate is:{}'.format(learning_rate))

if (epoch+1)==400:
learning_rate = learning_rate / 10
print('Current learning rate is:{}'.format(learning_rate))

print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost))

total = 0
for i in range(10):
total += 100 * class_correct[i] / class_total[i]


print("the average of Acc is :{}%".format(total / 10))

print('Learning Finished!')

class_correct=list(0. for i in range(10))
class_total=list(0. for i in range(10))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog',
'frog', 'horse', 'ship', 'truck')


with torch.no_grad():
for data in test_loader:
images, labels = data
images=images.to(device)
labels=labels.to(device)
outputs = model(images)
#1st output is max value, 2nd output is indice of max value
_, predicted=torch.max(outputs, 1) #Find the index of max value of outputs tensor for dimension 1
c = (predicted==labels).squeeze()
for i in range(len(labels)):
label=labels[i]
class_correct[label]+=c[i].item()
class_total[label]+=1



total=0
for i in range(10):
print('Accuracy of {} : {} %'.format(classes[i],
100*class_correct[i]/class_total[i]))
total+=100*class_correct[i]/class_total[i]


print("the average of Acc is :{}%".format(total/10))







#
# with torch.no_grad():
# X_test = CIFAR10_test.test_list.view(len(CIFAR10_test), 3, 32, 32).float().to(device)
# Y_test = CIFAR10_test.test_labels.to(device)
#
# prediction = model(X_test)
# correct_prediction = torch.argmax(prediction, 1) == Y_test
# accuracy = correct_prediction.float().mean()
# print('Accuracy:', accuracy.item())

#
#
# for X,Y in data_loader:
# X_result=make_output(X)
# print(type(X_result))
# print(X_result.shape)
# X_result=np.array(X_result,dtype=np.uint8)
#
# K=X_result[0].reshape(32,32,3)
# print(K.shape)
#
# plt.imshow(K)
# plt.show()
# cv2.moveWindow("test",100,100)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
# plt.imshow(X_result)
# break
# plt.imshow(X[0])
#
# print('result is below')
# print(result.shape)

 

728x90

'Code' 카테고리의 다른 글

ZSL Basic in Pytorch  (0) 2019.07.04

+ Recent posts