博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
PyTorch#181130
阅读量:6446 次
发布时间:2019-06-23

本文共 20998 字,大约阅读时间需要 69 分钟。

hot3.png

Numpy and Torch对比

import torchimport numpy as npnp_data =np.arange(6).reshape((2,3))torch_data  =torch.from_numpy(np_data)print(    '\nnumpy',np_data,    '\ntorch',torch_data)D:\Test>python test.pynumpy [[0 1 2] [3 4 5]]torch tensor([[0, 1, 2],        [3, 4, 5]], dtype=torch.int32)
import torchimport numpy as npnp_data =np.arange(6).reshape((2,3))torch_data  =torch.from_numpy(np_data)tensor2array =torch_data.numpy()print(    '\nnumpy',np_data,    '\ntorch',torch_data,    '\ntensor2array',tensor2array,)D:\Test>python test.pynumpy [[0 1 2] [3 4 5]]torch tensor([[0, 1, 2],        [3, 4, 5]], dtype=torch.int32)tensor2array [[0 1 2] [3 4 5]]
import torchimport numpy as npdata =[-1,-2,1,2]tensor =torch.FloatTensor(data)print(    '\nabs:',    '\ntorch:',np.abs(data),    '\ntorch:',torch.abs(tensor))D:\Test>python test.pyabs:torch: [1 2 1 2]torch: tensor([1., 2., 1., 2.])
import torchimport numpy as npdata =[-1,-2,1,2]tensor =torch.FloatTensor(data)print(    '\ntorch:',np.sin(data),    '\ntorch:',torch.sin(tensor))D:\Test>python test.pytorch: [-0.84147098 -0.90929743  0.84147098  0.90929743]torch: tensor([-0.8415, -0.9093,  0.8415,  0.9093])
import torchimport numpy as npdata =[[1,2],[3,4]]tensor =torch.FloatTensor(data)print(    '\nnumpy:',np.matmul(data,data),    '\ntorch:',torch.mm(tensor,tensor))D:\Test>python test.pynumpy: [[ 7 10] [15 22]]torch: tensor([[ 7., 10.],        [15., 22.]])
import torchimport numpy as npdata =[[1,2],[3,4]]tensor =torch.FloatTensor(data)data =np.array(data)print(    '\nnumpy:',data.dot(data),    '\ntorch:',tensor.dot(tensor))D:\Test>python test.pyTraceback (most recent call last):  File "test.py", line 10, in 
'\ntorch:',tensor.dot(tensor))RuntimeError: dot: Expected 1-D argument self, but got 2-D

Variable变量

import torchfrom torch.autograd import Variabledata =([1,2],[3,4])tensor =torch.FloatTensor(data)variable =Variable(tensor, requires_grad=True)print('\ntensor:',tensor)print('\nvariable:',variable)
比较import torchfrom torch.autograd import Variabletensor =torch.FloatTensor([1,2],[3,4])variable =Variable(tensor, requires_grad=True)print('\ntensor:',tensor)print('\nvariable:',variable)D:\Test>python test.pyTraceback (most recent call last):  File "test.py", line 4, in 
tensor =torch.FloatTensor([1,2],[3,4])TypeError: new() received an invalid combination of arguments - got (list, list), but expected one of: * (torch.device device) * (torch.Storage storage) * (Tensor other) * (tuple of ints size, torch.device device) didn't match because some of the arguments have invalid types: ([31;1mlist[0m, [31;1mlist[0m) * (object data, torch.device device) didn't match because some of the arguments have invalid types: ([31;1mlist[0m, [31;1mlist[0m)
import torchfrom torch.autograd import Variabledata =([1,2],[3,4])tensor =torch.FloatTensor(data)variable =Variable(tensor, requires_grad=True)t_out =torch.mean(tensor *tensor)v_out =torch.mean(variable *variable)print('\ntensor:',t_out)print('\nvariable:',v_out)D:\Test>python test.pytensor: tensor(7.5000)variable: tensor(7.5000, grad_fn=
)
注:torch在0.4版本后将Tensor与Variable合并

反向求解梯度:*.backward()

import torchfrom torch.autograd import Variabledata =([1,2],[3,4])tensor =torch.FloatTensor(data)variable =Variable(tensor, requires_grad=True)t_out =torch.mean(tensor *tensor)v_out =torch.mean(variable *variable)v_out.backward()print('\nvariable:',variable.grad)D:\Test>python test.pyvariable: tensor([[0.5000, 1.0000],        [1.5000, 2.0000]])
v_out =1 /4 *sum(variable *variable)d(v_out) /d(variable) =1 /4 *2 *variable =variable /2
v_out =1 /4 *sum(var *var)d(v_out) /d(var) =1 /4 *2 *variable =variable /2
import torchfrom torch.autograd import Variabledata =([1,2],[3,4])tensor =torch.FloatTensor(data)variable =Variable(tensor, requires_grad=True)t_out =torch.mean(tensor *tensor)v_out =torch.mean(variable *variable)v_out.backward()print('\nvariable:',variable.grad)print('\nvariable.data:',variable.data)print('\nvariable.data.numpy:',variable.data.numpy())D:\ptest>python test.pyvariable: tensor([[0.5000, 1.0000],        [1.5000, 2.0000]])variable.data: tensor([[1., 2.],        [3., 4.]])variable.data.numpy: [[1. 2.] [3. 4.]]

Activation Fundtion激励函数

import torchimport torch.nn.functional as Ffrom torch.autograd import Variableimport matplotlib.pyplot as pltx =torch.linspace(-5,5,200)x =Variable(x)x_np =x.data.numpy()y_relu =F.relu(x).data.numpy()y_sigmoid =F.sigmoid(x).data.numpy()y_tanh =F.tanh(x).data.numpy()y_softplus =F.softplus(x).data.numpy()plt.figure(1,figsize =(8,6))plt.subplot(221)plt.plot(x_np,y_relu,c ='red',label ='relu')plt.ylim((-1,5))plt.legend(loc ='best')plt.subplot(222)plt.plot(x_np,y_sigmoid,c ='red',label ='sigmoid')plt.ylim((-0.2,1.2))plt.legend(loc ='best')plt.subplot(223)plt.plot(x_np,y_tanh,c ='red',label ='tanh')plt.ylim((-1.2,1.2))plt.legend(loc ='best')plt.subplot(224)plt.plot(x_np,y_softplus,c ='red',label ='softplus')plt.ylim((-0.2,6))plt.legend(loc ='best')plt.show()D:\ptest>python test.pyD:\Python\Python36\lib\site-packages\torch\nn\functional.py:1006: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.  warnings.warn("nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.")D:\Python\Python36\lib\site-packages\torch\nn\functional.py:995: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.  warnings.warn("nn.functional.tanh is deprecated. Use torch.tanh instead.")
import torchimport torch.nn.functional as Ffrom torch.autograd import Variableimport matplotlib.pyplot as pltx =torch.linspace(-5,5,200)x =Variable(x)x_np =x.data.numpy()y_relu =torch.relu(x).data.numpy()y_sigmoid =torch.sigmoid(x).data.numpy()y_tanh =torch.tanh(x).data.numpy()y_softplus =F.softplus(x).data.numpy()plt.figure(1,figsize =(8,6))plt.subplot(221)plt.plot(x_np,y_relu,c ='red',label ='relu')plt.ylim((-1,5))plt.legend(loc ='best')plt.subplot(222)plt.plot(x_np,y_sigmoid,c ='red',label ='sigmoid')plt.ylim((-0.2,1.2))plt.legend(loc ='best')plt.subplot(223)plt.plot(x_np,y_tanh,c ='red',label ='tanh')plt.ylim((-1.2,1.2))plt.legend(loc ='best')plt.subplot(224)plt.plot(x_np,y_softplus,c ='red',label ='softplus')plt.ylim((-0.2,6))plt.legend(loc ='best')plt.show()

Regression回归(关系拟合)

import torchimport torchfrom torch.autograd import Variableimport torch.nn.functional as Fimport matplotlib.pyplot as pltx =torch.unsqueeze(torch.linspace(-1,1,100),dim =1)y =x.pow(2) +0.2 *torch.rand(x.size())x,y =Variable(x),Variable(y)#plt.scatter(x.data.numpy(),y.data.numpy())#plt.show()class Net(torch.nn.Module):    def __init__(self,n_feature,n_hidden,n_output):        super(Net,self).__init__()        self.hidden =torch.nn.Linear(n_feature,n_hidden)        self.predict =torch.nn.Linear(n_hidden,n_output)    def forward(self,x):        x =torch.relu(self.hidden(x))        x =self.predict(x)        return xnet =Net(n_feature =1,n_hidden =10,n_output =1)print(net)optimizer =torch.optim.SGD(net.parameters(),lr =0.2)loss_func =torch.nn.MSELoss()plt.ion()plt.show()for t in range(200):    prediction =net(x)    loss =loss_func(prediction,y)        optimizer.zero_grad()    loss.backward()    optimizer.step()    if t %5 ==0:        plt.cla()        plt.scatter(x.data.numpy(),y.data.numpy())        plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw =5)        plt.text(0.5,0,'loss =%.4f' % loss.data.numpy(),fontdict ={'size':20,'color':'red'})        plt.pause(0.1)plt.ioff()plt.show()

Classification分类(区分类型)

import torchfrom torch.autograd import Variableimport torch.nn.functional as Fimport matplotlib.pyplot as pltn_data =torch.ones(100,2)x0 =torch.normal(2 *n_data,1)y0 =torch.zeros(100)x1 =torch.normal(-2 *n_data,1)y1 =torch.ones(100)x =torch.cat((x0,x1),0).type(torch.FloatTensor)y =torch.cat((y0,y1),).type(torch.LongTensor)x,y =Variable(x),Variable(y)#plt.scatter(x.data.numpy(),y.data.numpy())#plt.show()class Net(torch.nn.Module):    def __init__(self,n_feature,n_hidden,n_output):        super(Net,self).__init__()        self.hidden =torch.nn.Linear(n_feature,n_hidden)        self.predict =torch.nn.Linear(n_hidden,n_output)    def forward(self,x):        x =torch.relu(self.hidden(x))        x =self.predict(x)        return xnet =Net(n_feature =2,n_hidden =10,n_output =2)print(net)optimizer =torch.optim.SGD(net.parameters(),lr =0.02)loss_func =torch.nn.CrossEntropyLoss()plt.ion()plt.show()for t in range(100):    out =net(x)    loss =loss_func(out,y)    optimizer.zero_grad()    loss.backward()    optimizer.step()    if t %2 ==0:        plt.cla()        prediction =torch.max(out,1)[1]        pred_y =prediction.data.numpy()        target_y =y.data.numpy()        plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c =pred_y,s =100,lw =0,cmap ='RdYlGn')        accuracy =float((pred_y ==target_y).astype(int).sum()) /float(target_y.size)        plt.text(1.5,-4,'Accuracy =%.2f' % accuracy,fontdict ={'size':20,'color':'red'})        plt.pause(0.1)plt.ioff()plt.show()

快速搭建法

import torchfrom torch.autograd import Variableimport torch.nn.functional as Fimport matplotlib.pyplot as pltn_data =torch.ones(100,2)x0 =torch.normal(2 *n_data,1)y0 =torch.zeros(100)x1 =torch.normal(-2 *n_data,1)y1 =torch.ones(100)x =torch.cat((x0,x1),0).type(torch.FloatTensor)y =torch.cat((y0,y1),).type(torch.LongTensor)x,y =Variable(x),Variable(y)#plt.scatter(x.data.numpy(),y.data.numpy())#plt.show()class Net(torch.nn.Module):    def __init__(self,n_feature,n_hidden,n_output):        super(Net,self).__init__()        self.hidden =torch.nn.Linear(n_feature,n_hidden)        self.predict =torch.nn.Linear(n_hidden,n_output)    def forward(self,x):        x =torch.relu(self.hidden(x))        x =self.predict(x)        return xnet1 =Net(n_feature =2,n_hidden =10,n_output =2)print(net1)net2 =torch.nn.Sequential(    torch.nn.Linear(2,10),    torch.nn.ReLU(),    torch.nn.Linear(10,2),)print(net2)D:\ptest>python test.pyNet(  (hidden): Linear(in_features=2, out_features=10, bias=True)  (predict): Linear(in_features=10, out_features=2, bias=True))Sequential(  (0): Linear(in_features=2, out_features=10, bias=True)  (1): ReLU()  (2): Linear(in_features=10, out_features=2, bias=True))

save保存提取

import torchfrom torch.autograd import Variableimport matplotlib.pyplot as plt#torch.manual_seed(1)x =torch.unsqueeze(torch.linspace(-1,1,100),dim =1)y =x.pow(2) +0.2 *torch.rand(x.size())x,y =Variable(x,requires_grad =False),Variable(y,requires_grad =False)def save():    net1 =torch.nn.Sequential(        torch.nn.Linear(1,10),        torch.nn.ReLU(),        torch.nn.Linear(10,1),)    print('\nnet1:',net1)    optimizer =torch.optim.SGD(net1.parameters(),lr =0.5)    loss_func =torch.nn.MSELoss()    for t in range(100):        prediction =net1(x)        loss =loss_func(prediction,y)        optimizer.zero_grad()        loss.backward()        optimizer.step()    plt.figure(1,figsize =(10,3))    plt.subplot(131)    plt.title('Net1')    plt.scatter(x.data.numpy(),y.data.numpy())    plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw =5)    torch.save(net1,'net.pkl')    torch.save(net1.state_dict(),'net_params.pkl')def restore_net():    net2 =torch.load('net.pkl')    prediction =net2(x)    print('\nnet2:',net2)    plt.subplot(132)    plt.title('Net2')    plt.scatter(x.data.numpy(),y.data.numpy())    plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw =5)def restore_params():    net3 =torch.nn.Sequential(        torch.nn.Linear(1,10),        torch.nn.ReLU(),        torch.nn.Linear(10,1),)    print('\nnet3:',net3)        net3.load_state_dict(torch.load('net_params.pkl'))    prediction =net3(x)    plt.subplot(133)    plt.title('Net3')    plt.scatter(x.data.numpy(),y.data.numpy())    plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw =5)    plt.show()save()restore_net()restore_params()

批数据训练

import torchimport torch.utils.data as DataBATCH_SIZE =5x =torch.linspace(1,10,10)y =torch.linspace(10,1,10)torch_dataset =Data.TensorDataset(x,y)#(data_tensor =x,target_tensor =y)loader =Data.DataLoader(    dataset =torch_dataset,    batch_size =BATCH_SIZE,    shuffle =True,    num_workers =2,)def show_batch():    for epoch in range(3):        for step,(batch_x,batch_y) in enumerate(loader):            print('Epoch:',epoch,'| Step:',step,'| batch x:',                batch_x.numpy(),'| batch y:',batch_y.numpy())if __name__ =='__main__':    show_batch()D:\ptest>python test.pyEpoch: 0 | Step: 0 | batch x: [ 3.  5.  6. 10.  8.] | batch y: [8. 6. 5. 1. 3.]Epoch: 0 | Step: 1 | batch x: [1. 2. 7. 9. 4.] | batch y: [10.  9.  4.  2.  7.]Epoch: 1 | Step: 0 | batch x: [2. 5. 1. 3. 8.] | batch y: [ 9.  6. 10.  8.  3.]Epoch: 1 | Step: 1 | batch x: [ 6.  7.  4. 10.  9.] | batch y: [5. 4. 7. 1. 2.]Epoch: 2 | Step: 0 | batch x: [10.  5.  9.  7.  4.] | batch y: [1. 6. 2. 4. 7.]Epoch: 2 | Step: 1 | batch x: [1. 6. 2. 3. 8.] | batch y: [10.  5.  9.  8.  3.]
import torchimport torch.utils.data as DataBATCH_SIZE =8x =torch.linspace(1,10,10)y =torch.linspace(10,1,10)torch_dataset =Data.TensorDataset(x,y)#(data_tensor =x,target_tensor =y)loader =Data.DataLoader(    dataset =torch_dataset,    batch_size =BATCH_SIZE,    shuffle =True,    num_workers =2,)def show_batch():    for epoch in range(3):        for step,(batch_x,batch_y) in enumerate(loader):            print('Epoch:',epoch,'| Step:',step,'| batch x:',                batch_x.numpy(),'| batch y:',batch_y.numpy())if __name__ =='__main__':    show_batch()D:\ptest>python test.pyEpoch: 0 | Step: 0 | batch x: [ 2.  7.  8.  4.  1.  3. 10.  5.] | batch y: [ 9.  4.  3.  7. 10.  8.  1.  6.]Epoch: 0 | Step: 1 | batch x: [9. 6.] | batch y: [2. 5.]Epoch: 1 | Step: 0 | batch x: [6. 5. 7. 3. 8. 9. 4. 1.] | batch y: [ 5.  6.  4.  8.  3.  2.  7. 10.]Epoch: 1 | Step: 1 | batch x: [ 2. 10.] | batch y: [9. 1.]Epoch: 2 | Step: 0 | batch x: [ 3.  7.  6.  5. 10.  1.  8.  4.] | batch y: [ 8.  4.  5.  6.  1. 10.  3.  7.]Epoch: 2 | Step: 1 | batch x: [2. 9.] | batch y: [9. 2.]

Optimizer优化器

import torchimport torch.utils.data as Dataimport torch.nn.functional as Ffrom torch.autograd import Variableimport matplotlib.pyplot as pltLR =0.01BATCH_SIZE =32EPOCH =12x =torch.unsqueeze(torch.linspace(-1,1,10000),dim =1)y =x.pow(2) +0.1 *torch.normal(torch.zeros(*x.size()))torch_dataset =Data.TensorDataset(x,y)#(data_tensor =x,target_tensor =y)loader =Data.DataLoader(dataset =torch_dataset,batch_size =BATCH_SIZE,shuffle =True,num_workers =2,)class Net(torch.nn.Module):    def __init__(self):        super(Net,self).__init__()        self.hidden =torch.nn.Linear(1,20)        self.predict =torch.nn.Linear(20,1)    def forward(self,x):        x =F.relu(self.hidden(x))        x =self.predict(x)        return xif __name__ == '__main__':    net_SGD =Net()    net_Momentum =Net()    net_RMSprop =Net()    net_Adam =Net()    nets =[net_SGD,net_Momentum,net_RMSprop,net_Adam]    opt_SGD =torch.optim.SGD(net_SGD.parameters(),lr =LR)    opt_Momentum =torch.optim.SGD(net_Momentum.parameters(),lr =LR,momentum =0.8)    opt_RMSprop  =torch.optim.RMSprop(net_RMSprop.parameters(),lr =LR,alpha =0.9)    opt_Adam =torch.optim.Adam(net_Adam.parameters(),lr =LR,betas =(0.9,0.99))    optimizers =[opt_SGD,opt_Momentum,opt_RMSprop,opt_Adam]    loss_func =torch.nn.MSELoss()    losses_his =[[],[],[],[]]    for epoch in range(EPOCH):        print(epoch)        for step,(batch_x,batch_y) in enumerate(loader):            b_x =Variable(batch_x)            b_y =Variable(batch_y)            for net,opt,l_his in zip(nets,optimizers,losses_his):                output =net(b_x)                loss =loss_func(output,b_y)                opt.zero_grad()                loss.backward()                opt.step()                l_his.append(loss.data[0])    labels =['SGD','Momentum','RMSprop','Adam']    for i,l_his in enumerate(losses_his):        plt.plot(l_his,label =labels[i])    plt.legend(loc ='best')    plt.xlabel('Steps')    plt.ylabel('Loss')    plt.ylim((0,0.2))    plt.show()

CNN卷积神经网络

import torchimport torch.utils.data as Datafrom torch.autograd import Variableimport matplotlib.pyplot as pltimport torchvisionimport torch.nn as nnEPOCH =1BATCH_SIZE =50LR =0.001DOWNLOAD_MNIST =Truetrain_data =torchvision.datasets.MNIST(    root ='./mnist',    train =True,    transform =torchvision.transforms.ToTensor(),    download =DOWNLOAD_MNIST)print(train_data.train_data.size())print(train_data.train_labels.size())plt.imshow(train_data.train_data[0].numpy(),cmap ='gray')plt.title('%i' % train_data.train_labels[0])plt.show()D:\ptest>python test.pytorch.Size([60000, 28, 28])torch.Size([60000])
import torchimport torch.utils.data as Datafrom torch.autograd import Variableimport matplotlib.pyplot as pltimport torchvisionimport torch.nn as nnEPOCH =1BATCH_SIZE =50LR =0.001DOWNLOAD_MNIST =Truetrain_data =torchvision.datasets.MNIST(    root ='./mnist',    train =True,    transform =torchvision.transforms.ToTensor(),    download =DOWNLOAD_MNIST)train_loader =Data.DataLoader(dataset =train_data,batch_size =BATCH_SIZE,shuffle =True,num_workers =2)test_data =torchvision.datasets.MNIST(root ='./mnist/',train =False)test_x =Variable(torch.unsqueeze(test_data.test_data,dim =1),volatile =True).type(torch.FloatTensor)[:2000] /255.test_y =test_data.test_labels[:2000]class CNN(nn.Module):    def __init__(self):        super(CNN,self).__init__()        self.conv1 =nn.Sequential(            nn.Conv2d(  #(1,28,28)                in_channels =1,                out_channels =16,                kernel_size =5,                stride =1,                padding =2,),#if stride =1,padding =(kernel_size -1) /2 =(5 -1) /2            # ->(16,28,28)            nn.ReLU(),  # ->(16,14,14)            nn.MaxPool2d(kernel_size =2),)        self.conv2 =nn.Sequential(  #(16,14,14)            nn.Conv2d(16,32,5,1,2), # ->(32,14,14)            nn.ReLU(),  # ->(32,14,14)            nn.MaxPool2d(2))    # ->(32,7,7)        self.out =nn.Linear(32 *7 *7,10)    def forward(self,x):        x =self.conv1(x)        x =self.conv2(x)        x =x.view(x.size(0),-1)        output =self.out(x)        return outputcnn =CNN()print(cnn)D:\ptest>python test.pytest.py:22: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead.  test_x =Variable(torch.unsqueeze(test_data.test_data,dim =1),volatile =True).type(torch.FloatTensor)[:2000] /255.CNN(  (conv1): Sequential(    (0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))    (1): ReLU()    (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)  )  (conv2): Sequential(    (0): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))    (1): ReLU()    (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)  )  (out): Linear(in_features=1568, out_features=10, bias=True))
错误import torchimport torch.utils.data as Datafrom torch.autograd import Variableimport matplotlib.pyplot as pltimport torchvisionimport torch.nn as nnEPOCH =1BATCH_SIZE =50LR =0.001DOWNLOAD_MNIST =Truetrain_data =torchvision.datasets.MNIST(    root ='./mnist',    train =True,    transform =torchvision.transforms.ToTensor(),    download =DOWNLOAD_MNIST)train_loader =Data.DataLoader(dataset =train_data,batch_size =BATCH_SIZE,shuffle =True,num_workers =2)test_data =torchvision.datasets.MNIST(root ='./mnist/',train =False)test_x =Variable(torch.unsqueeze(test_data.test_data,dim =1),volatile =True).type(torch.FloatTensor)[:2000] /255.test_y =test_data.test_labels[:2000]class CNN(nn.Module):    def __init__(self):        super(CNN,self).__init__()        self.conv1 =nn.Sequential(            nn.Conv2d(  #(1,28,28)                in_channels =1,                out_channels =16,                kernel_size =5,                stride =1,                padding =2,),#if stride =1,padding =(kernel_size -1) /2 =(5 -1) /2            # ->(16,28,28)            nn.ReLU(),  # ->(16,14,14)            nn.MaxPool2d(kernel_size =2),)        self.conv2 =nn.Sequential(  #(16,14,14)            nn.Conv2d(16,32,5,1,2), # ->(32,14,14)            nn.ReLU(),  # ->(32,14,14)            nn.MaxPool2d(2))    # ->(32,7,7)        self.out =nn.Linear(32 *7 *7,10)    def forward(self,x):        x =self.conv1(x)        x =self.conv2(x)        x =x.view(x.size(0),-1)        output =self.out(x)        return outputcnn =CNN()print(cnn)for epoch in range(EPOCH):    for step,(x,y) in enumerate(train_loader):        b_x =Variable(x)        b_y =Variable(y)        output =cnn(b_x)        loss =loss_func(output,b_y)        optimizer.zero_grad()        loss.backward()        optimizer.step()        if step % 50 ==0:            test_output =cnn(test_x)            pred_y =torch.max(test_output,1)[1].data.squeeze()            accuracy =sum(pred_y ==test_y) /test_y.size(0)            print('Epoch: ',epoch,'| train loss %.4f' % loss.data.numpy(),'| test accuracy: %.2f' % accuracy)test_output =cnn(test_x[:10])pred_y =torch.max(test_output,1)[1].data.numpy().squeeze()print(pred_y,'prediction number')print(test_y[:10].numpy(),'real numpy')

转载于:https://my.oschina.net/hellopasswd/blog/2963657

你可能感兴趣的文章
油藏工程学习笔记一
查看>>
python 接收post 、get的数据
查看>>
【排序算法】基于交换的排序算法(冒泡排序和快速排序)
查看>>
经常弹出的404页面是什么东西?
查看>>
9、ssh的集成方式1
查看>>
java的会话管理:Cookie和Session
查看>>
POJ-1904 Sorting It All Out 拓扑排序
查看>>
在我的博客里,你能看到什么?
查看>>
Group By 和 Having, Where ,Order by执行顺序
查看>>
Go:冒泡排序
查看>>
thinkphp中如何使用phpspreadsheet插件
查看>>
Textillate.js有什么用及使用实例
查看>>
OpenStack 2018 年终盘点
查看>>
MySQL(Navicat)运行.sql文件时报错[Err] 2006 - MySQL server has gone away 的解决方法
查看>>
jvm系列(八):jvm知识点总览-高级Java工程师面试必备
查看>>
原码反码补码
查看>>
mac版微信web开发者工具(小程序开发工具)无法显示二维码 解决方案
查看>>
wordcloud + jieba 生成词云
查看>>
关于“代码规范”,“Review”和“Check list”
查看>>
Xcode导入第三方库图文
查看>>