728x90

https://medium.com/@cetinsamet/zero-shot-learning-53080995d45f

import VGGnet
import gzip
import _pickle as cPickle
import torch.nn as nn
import numpy as np
import torch
import time

from sklearn.preprocessing import LabelEncoder, normalize
from sklearn.neighbors import KDTree

# define train & zsl classes

global train_classes
global zsl_classes

train_classes = ['arm', 'boy', 'bread', 'chicken', 'child', 'computer', 'ear', 'house', 'leg', 'sandwich', 'television',
'truck', 'vehicle','watch', 'woman']
zsl_classes = ['car', 'food', 'hand', 'man', 'neck']

# define PATH

WORD2VECPATH="./data/class_vectors.npy"
DATAPATH= "./data/zeroshot_data.pkl"

def to_one_hot(list_,num):
one_hot=[0 for i in range(num)]
one_hot[list_[0]]=1

return one_hot

def un_one_hot(list_,num):
for i in range(num):
if list_[i]==1:
temp=[]
temp.append(i)
return temp

def custom_kernel_init(shape):
class_vectors = np.load(WORD2VECPATH)
training_vectors = sorted([(label, vec) for (label, vec) in class_vectors if label in train_classes], key=lambda x: x[0]) classnames, vectors = zip(*training_vectors)
print((classnames))
vectors = np.asarray(vectors, dtype=np.float)
vectors = vectors
return vectors

class Word2Vec_Model(nn.Module):
def __init__(self):
super(Word2Vec_Model, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(4096, 1024),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(1024, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Linear(256,300),
) self.LastLinear=nn.Linear(300,15)
self.Softmax=nn.Softmax()
self.LastLinear.weight=nn.Parameter(torch.from_numpy(custom_kernel_init(1)),requires_grad=False)

def forward(self, x):
x=self.classifier(x)
# print('after sequential:{}'.format(x.size()))
x=self.LastLinear(x)
# print('after Last Linear:{}'.format(x.size()))
return x

def load_data():
with gzip.GzipFile(DATAPATH, 'rb') as infile:
data=cPickle.load(infile)

label_encoder = LabelEncoder()
label_encoder.fit(train_classes)

training_data=[instance for instance in data if instance[0] in train_classes]
zero_shot_data=[instance for instance in data if instance[0] not in train_classes]

#SHUFFLE Training data
np.random.shuffle(training_data)

# SPLIT DATA FOR TRAINING
# 300 data per class -> total 4500( 300 * 15, class)
train_size=300
train_data=list()
valid_data=list()
# print(len(training_data))
for class_label in train_classes:
ct=0 for instance in training_data:
if ct< train_size:
train_data.append(instance)
ct+=1
continue
valid_data.append(instance)

#shuffle training data and validation data
np.random.shuffle(training_data)
np.random.shuffle(valid_data)
print(instance[0]) print(instance[1]) print(label_encoder.transform([instance[0]]))

print(to_one_hot([7],15))

#make data (img, class[onehot])

train_data = [(instance[1], to_one_hot(label_encoder.transform([instance[0]]), len(train_classes)))for instance in train_data]
valid_data = [(instance[1], to_one_hot(label_encoder.transform([instance[0]]), len(train_classes)))for instance in valid_data]

x_train, y_train = zip(*train_data)
x_train, y_train = np.squeeze(np.asarray(x_train)), np.squeeze(np.asarray(y_train))

x_train = normalize(x_train, norm='l2')

# FORM X_VALID AND Y_VALID
x_valid, y_valid = zip(*valid_data)
x_valid, y_valid = np.squeeze(np.asarray(x_valid)), np.squeeze(np.asarray(y_valid))

# L2 NORMALIZE X_VALID
x_valid = normalize(x_valid, norm='l2')

# FORM X_ZSL AND Y_ZSL
y_zsl, x_zsl = zip(*zero_shot_data)
x_zsl, y_zsl = np.squeeze(np.asarray(x_zsl)), np.squeeze(np.asarray(y_zsl))
# L2 NORMALIZE X_ZSL
x_zsl = normalize(x_zsl, norm='l2')

print("-> data loading is completed.")

return (x_train, x_valid, x_zsl), (y_train, y_valid, y_zsl)

def train_model(model,train_data,valid_data):
device='cuda'
training_epochs=80
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.
optimizer = torch.optim.Adam(model.parameters(), lr=0.0005)

# train my model
model.train() # set the model to train mode (dropout=True)
model.float()
print('Learning started. It takes sometime.')
for epoch in range(training_epochs):
avg_cost = 0
for X, Y in zip(*train_data):
Y=un_one_hot(Y,15)
Y=np.asarray(Y)
X = torch.from_numpy(X).to(device)
Y = torch.from_numpy(Y).to(device)
optimizer.zero_grad()
hypothesis = model(X.unsqueeze(0))
cost = criterion(hypothesis, Y)
cost.backward()
optimizer.step()
print('Epoch:{} is completed . time:{}'.format(epoch+1,time.time()-start))

print('Learning Finished!')

def main():
device='cuda'
global start
start=time.time()

VGGmodel = VGGnet.vgg16(pretrained=True)
VGGmodel.classifier = nn.Sequential(*[VGGmodel.classifier[i] for i in range(4)])

# ---------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------------- #
# SET HYPERPARAMETERS

global NUM_CLASS, NUM_ATTR, EPOCH, BATCH_SIZE
NUM_CLASS = 15
NUM_ATTR = 300
BATCH_SIZE = 128
EPOCH = 30

# ---------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------------- #
# TRAINING PHASE

(x_train, x_valid, x_zsl), (y_train, y_valid, y_zsl) = load_data()
word2vec_model=Word2Vec_Model().to(device)

print((x_train,y_train))

train_model(word2vec_model, (x_train, y_train), (x_valid, y_valid))

print(word2vec_model)

print("-------------------------------------")

ZSL_layers=list(word2vec_model.children())[:-2]
ZSL_model=nn.Sequential(*ZSL_layers).cuda()

print(ZSL_model)

# EVALUATION OF ZERO-SHOT LEARNING PERFORMANCE

class_vectors = sorted(np.load(WORD2VECPATH), key=lambda x: x[0]) classnames, vectors = zip(*class_vectors)
classnames = list(classnames)
vectors = np.asarray(vectors, dtype=np.float)

print(vectors[0].shape)

tree=KDTree(vectors)

print(tree)
print(x_zsl.shape)
x_zsl=torch.from_numpy(x_zsl).to(device)
pred_zsl=ZSL_model(x_zsl.unsqueeze(0))

print(pred_zsl.shape)

pred_zsl=pred_zsl.squeeze(0)

print(pred_zsl.shape)

top5, top3, top1 = 0, 0, 0

with torch.no_grad():
for i, pred in enumerate(pred_zsl):
pred=pred.to('cpu')
pred=pred.detach().numpy()
pred = np.expand_dims(pred, axis=0)
dist_5, index_5 = tree.query(pred, k=5)
pred_labels = [classnames[index] for index in index_5[0]]
true_label = y_zsl[i]
if true_label in pred_labels:
top5 += 1
if true_label in pred_labels[:3]: top3 += 1
if true_label in pred_labels[0]: top1 += 1

print()
print("ZERO SHOT LEARNING SCORE")
print("-> Top-5 Accuracy: %.2f" % (top5 / float(len(x_zsl))))
print("-> Top-3 Accuracy: %.2f" % (top3 / float(len(x_zsl))))
print("-> Top-1 Accuracy: %.2f" % (top1 / float(len(x_zsl))))

if __name__ == '__main__':
main()

-------------

# VGGnet.py

import torch.nn as nn
import torch.utils.model_zoo as zoo

__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]

model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', }

class VGG(nn.Module):

def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes)
) if init_weights:
self._initialize_weights()

def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x

def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)

def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)

cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], }

def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
print(model_urls[arch]) if pretrained:
state_dict = zoo.load_url(model_urls[arch],progress=progress)
model.load_state_dict(state_dict)
return model

def vgg11(pretrained=False, progress=True, **kwargs):
"""VGG 11-layer model (configuration "A")

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)

def vgg11_bn(pretrained=False, progress=True, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)

def vgg13(pretrained=False, progress=True, **kwargs):
"""VGG 13-layer model (configuration "B")

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)

def vgg13_bn(pretrained=False, progress=True, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)

def vgg16(pretrained=False, progress=True, **kwargs):
"""VGG 16-layer model (configuration "D")

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)

def vgg16_bn(pretrained=False, progress=True, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)

def vgg19(pretrained=False, progress=True, **kwargs):
"""VGG 19-layer model (configuration "E")

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)

def vgg19_bn(pretrained=False, progress=True, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
728x90

'Code' 카테고리의 다른 글

Resnet (reproducing in pytorch)  (0) 2019.07.03
728x90
# import library
import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torch.nn.init
import sys
import cv2

import matplotlib.pyplot as plt

import numpy as np

def make_output(x):
if not (type(x) is list):
return x.cpu().data.numpy()
else:
return [make_output(i) for i in x]

# using gpu'

device='cuda' if torch.cuda.is_available() else 'cpu'

#define parameters

batch_size=128
learning_rate=0.1

print(torch.__version__)

#data


#loading data
transforms_train=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

transforms_test=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])


CIFAR10_train = dsets.CIFAR10(root='CIFAR10_data/',
train=True,
transform=transforms_train,
download=True)

CIFAR10_test = dsets.CIFAR10(root='CIFAR10_data/',
train=False,
transform=transforms_test,
download=True)

# dataset loader
train_loader = torch.utils.data.DataLoader(dataset=CIFAR10_train,
batch_size=batch_size,
shuffle=True,
drop_last=True)

test_loader = torch.utils.data.DataLoader(dataset=CIFAR10_test,
batch_size=batch_size,
shuffle=False,
drop_last=True
)
#
# for X,Y in train_loader:
# print(X[0].shape)
# print(X[0].mean(axis=(0,1,2)))
# print(X[0].std(axis=(0,1,2)))
# break
#
# sys.exit()
# define model

class Resnet(torch.nn.Module):

def __init__(self):
super(Resnet, self).__init__()
self.relu=torch.nn.ReLU(inplace=True)

self.layer_3x16_i=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=16, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16)
)
self.layer_3x16=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=16, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=16, out_channels=16, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(16)
)
self.layer_3x32_i = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=16, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=32, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32)
)
self.layer_3x32 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=32, out_channels=32, padding=1, kernel_size=3),
torch.nn.BatchNorm2d(32)
)

self.layer_3x64_i=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,stride=2),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2),
torch.nn.BatchNorm2d(64)
)
# 3x3_64 layer
self.layer_3x64=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=64,out_channels=64,padding=1, kernel_size=3),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64,out_channels=64,padding=1, kernel_size=3),
torch.nn.BatchNorm2d(64)
)

#3x3_128 layer for initial
self.layer_3x128_i=torch.nn.Sequential(
torch.nn.Conv2d(in_channels=64,out_channels=128,padding=1,kernel_size=3,stride=2),
torch.nn.Conv2d(in_channels=128,out_channels=128,padding=1,kernel_size=3)
)

#3x3_128 layer
self.layer_3x128 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=128, padding=1,kernel_size=3),
torch.nn.Conv2d(in_channels=128, out_channels=128, padding=1,kernel_size=3)
)

#3x3 256 layer for initial
self.layer_3x256_i = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=256, padding=1, kernel_size=3, stride=2),
torch.nn.Conv2d(in_channels=256, out_channels=256, padding=1, kernel_size=3)
)

#3x3 256 layer
self.layer_3x256 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=256, out_channels=256, padding=1, kernel_size=3),
torch.nn.Conv2d(in_channels=256, out_channels=256, padding=1, kernel_size=3)
)

#3x3 512 layer for initial
self.layer_3x512_i = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=256, out_channels=512, padding=1, kernel_size=3, stride=2),
torch.nn.Conv2d(in_channels=512, out_channels=512, padding=1, kernel_size=3)
)

#3x3 512 layer
self.layer_3x512 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=512, out_channels=512, padding=1, kernel_size=3),
torch.nn.Conv2d(in_channels=512, out_channels=512, padding=1, kernel_size=3)
)

# avg pooling layer _ needed check for the wanted result

self.fc = torch.nn.Linear(7 * 7 * 64, 10, bias=True)
torch.nn.init.kaiming_uniform(self.fc.weight)

def forward(self, x):
out=self.layer_3x16_i(x)
out = self.relu(out)
temp=torch.Tensor()
# print('the size after layer 3x16 for initial : {}'.format(out.shape))
for i in range(3):
temp=out.clone()
out=self.layer_3x16(out)
out=out+temp
out = self.relu(out)
# print('the size after layer 3x16: {}'.format(out.shape))
out=self.layer_3x32_i(out)
out = self.relu(out)
# print('the size after layer 3x32 for initial: {}'.format(out.shape))
for i in range(3):
temp = out.clone()
out = self.layer_3x32(out)
out = out + temp
out = self.relu(out)
# print('the size after layer 3x32: {}'.format(out.shape))
out=self.layer_3x64_i(out)
out = self.relu(out)
# print('the size after layer 3x64 for initial: {}'.format(out.shape))
for i in range(3):
temp = out.clone()
out=self.layer_3x64(out)
out = out + temp
out = self.relu(out)
# print('the size after layer 3x64: {}'.format(out.shape))
out=out.view(batch_size,-1)
out=self.fc(out)
return out

#instantiate model
model=Resnet().to(device)


#Test the model
# data=torch.rand(1,3,32,32)
# x=data.clone()
#
# data=data.to(device)
# result=model(data)
# print(result.shape)


# define cost/loss & optimizer
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate ,weight_decay=0.0001,momentum=0.9)
# lr_sche = optimizer.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
#train model

total_batch=len(train_loader)

#EPOCH 64000
for epoch in range(500):
avg_cost=0

for X,Y in train_loader:
X=X.to(device)
Y=Y.to(device)

optimizer.zero_grad()
hypothesis = model(X)
cost = criterion(hypothesis, Y)
cost.backward()
optimizer.step()

avg_cost += cost / total_batch

class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog',
'frog', 'horse', 'ship', 'truck')

with torch.no_grad():
for data in test_loader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
# 1st output is max value, 2nd output is indice of max value
_, predicted = torch.max(outputs, 1) # Find the index of max value of outputs tensor for dimension 1
c = (predicted == labels).squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1


if (epoch+1)==200:
learning_rate=learning_rate/10
print('Current learning rate is:{}'.format(learning_rate))

if (epoch+1)==400:
learning_rate = learning_rate / 10
print('Current learning rate is:{}'.format(learning_rate))

print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost))

total = 0
for i in range(10):
total += 100 * class_correct[i] / class_total[i]


print("the average of Acc is :{}%".format(total / 10))

print('Learning Finished!')

class_correct=list(0. for i in range(10))
class_total=list(0. for i in range(10))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog',
'frog', 'horse', 'ship', 'truck')


with torch.no_grad():
for data in test_loader:
images, labels = data
images=images.to(device)
labels=labels.to(device)
outputs = model(images)
#1st output is max value, 2nd output is indice of max value
_, predicted=torch.max(outputs, 1) #Find the index of max value of outputs tensor for dimension 1
c = (predicted==labels).squeeze()
for i in range(len(labels)):
label=labels[i]
class_correct[label]+=c[i].item()
class_total[label]+=1



total=0
for i in range(10):
print('Accuracy of {} : {} %'.format(classes[i],
100*class_correct[i]/class_total[i]))
total+=100*class_correct[i]/class_total[i]


print("the average of Acc is :{}%".format(total/10))







#
# with torch.no_grad():
# X_test = CIFAR10_test.test_list.view(len(CIFAR10_test), 3, 32, 32).float().to(device)
# Y_test = CIFAR10_test.test_labels.to(device)
#
# prediction = model(X_test)
# correct_prediction = torch.argmax(prediction, 1) == Y_test
# accuracy = correct_prediction.float().mean()
# print('Accuracy:', accuracy.item())

#
#
# for X,Y in data_loader:
# X_result=make_output(X)
# print(type(X_result))
# print(X_result.shape)
# X_result=np.array(X_result,dtype=np.uint8)
#
# K=X_result[0].reshape(32,32,3)
# print(K.shape)
#
# plt.imshow(K)
# plt.show()
# cv2.moveWindow("test",100,100)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
# plt.imshow(X_result)
# break
# plt.imshow(X[0])
#
# print('result is below')
# print(result.shape)

 

728x90

'Code' 카테고리의 다른 글

ZSL Basic in Pytorch  (0) 2019.07.04

+ Recent posts