交流群:462197261站长百科站长论坛热门标签收藏本站北冥有鱼 互联网前沿资源第一站 助力全行业互联网+
点击这里给我发消息
  • 当前位置:
  • 利用PyTorch实现VGG16教程

    北冥有鱼 教程大全 2020-06-26 ,

    我就废话不多说了,大家还是直接看代码吧~

    import torch
    import torch.nn as nn
    import torch.nn.functional as F
     
    class VGG16(nn.Module):
      
      def __init__(self):
        super(VGG16, self).__init__()
        
        # 3 * 224 * 224
        self.conv1_1 = nn.Conv2d(3, 64, 3) # 64 * 222 * 222
        self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1)) # 64 * 222* 222
        self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 64 * 112 * 112
        
        self.conv2_1 = nn.Conv2d(64, 128, 3) # 128 * 110 * 110
        self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1)) # 128 * 110 * 110
        self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 128 * 56 * 56
        
        self.conv3_1 = nn.Conv2d(128, 256, 3) # 256 * 54 * 54
        self.conv3_2 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
        self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
        self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 256 * 28 * 28
        
        self.conv4_1 = nn.Conv2d(256, 512, 3) # 512 * 26 * 26
        self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
        self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
        self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 14 * 14
        
        self.conv5_1 = nn.Conv2d(512, 512, 3) # 512 * 12 * 12
        self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
        self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
        self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 7 * 7
        
        # view
        
        self.fc1 = nn.Linear(512 * 7 * 7, 4096)
        self.fc2 = nn.Linear(4096, 4096)
        self.fc3 = nn.Linear(4096, 1000)
        # softmax 1 * 1 * 1000
        
      def forward(self, x):
        
        # x.size(0)即为batch_size
        in_size = x.size(0)
        
        out = self.conv1_1(x) # 222
        out = F.relu(out)
        out = self.conv1_2(out) # 222
        out = F.relu(out)
        out = self.maxpool1(out) # 112
        
        out = self.conv2_1(out) # 110
        out = F.relu(out)
        out = self.conv2_2(out) # 110
        out = F.relu(out)
        out = self.maxpool2(out) # 56
        
        out = self.conv3_1(out) # 54
        out = F.relu(out)
        out = self.conv3_2(out) # 54
        out = F.relu(out)
        out = self.conv3_3(out) # 54
        out = F.relu(out)
        out = self.maxpool3(out) # 28
        
        out = self.conv4_1(out) # 26
        out = F.relu(out)
        out = self.conv4_2(out) # 26
        out = F.relu(out)
        out = self.conv4_3(out) # 26
        out = F.relu(out)
        out = self.maxpool4(out) # 14
        
        out = self.conv5_1(out) # 12
        out = F.relu(out)
        out = self.conv5_2(out) # 12
        out = F.relu(out)
        out = self.conv5_3(out) # 12
        out = F.relu(out)
        out = self.maxpool5(out) # 7
        
        # 展平
        out = out.view(in_size, -1)
        
        out = self.fc1(out)
        out = F.relu(out)
        out = self.fc2(out)
        out = F.relu(out)
        out = self.fc3(out)
        
        out = F.log_softmax(out, dim=1)
        return out

    补充知识:Pytorch实现VGG(GPU版)

    看代码吧~

    import torch
    from torch import nn
    from torch import optim
    
    from PIL import Image
    import numpy as np
    
    print(torch.cuda.is_available())
    device = torch.device('cuda:0')
    path="/content/drive/My Drive/Colab Notebooks/data/dog_vs_cat/"
    
    train_X=np.empty((2000,224,224,3),dtype="float32")
    train_Y=np.empty((2000,),dtype="int")
    train_XX=np.empty((2000,3,224,224),dtype="float32")
    
    for i in range(1000):
      file_path=path+"cat."+str(i)+".jpg"
      image=Image.open(file_path)
      resized_image = image.resize((224, 224), Image.ANTIALIAS)
      img=np.array(resized_image)
      train_X[i,:,:,:]=img
      train_Y[i]=0
    
    for i in range(1000):
      file_path=path+"dog."+str(i)+".jpg"
      image = Image.open(file_path)
      resized_image = image.resize((224, 224), Image.ANTIALIAS)
      img = np.array(resized_image)
      train_X[i+1000, :, :, :] = img
      train_Y[i+1000] = 1
    
    train_X /= 255
    
    index = np.arange(2000)
    np.random.shuffle(index)
    
    train_X = train_X[index, :, :, :]
    train_Y = train_Y[index]
    
    for i in range(3):
      train_XX[:,i,:,:]=train_X[:,:,:,i]
    
    # 创建网络
    
    class Net(nn.Module):
    
      def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Sequential(
          nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True),
          nn.MaxPool2d(kernel_size=2,stride=2)
        )
        self.conv2 = nn.Sequential(
          nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.BatchNorm2d(128,eps=1e-5,momentum=0.1,affine=True),
          nn.MaxPool2d(kernel_size=2,stride=2)
        )
        self.conv3 = nn.Sequential(
          nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.BatchNorm2d(256,eps=1e-5, momentum=0.1, affine=True),
          nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.conv4 = nn.Sequential(
          nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
          nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.conv5 = nn.Sequential(
          nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
          nn.ReLU(),
          nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
          nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.dense1 = nn.Sequential(
          nn.Linear(7*7*512,4096),
          nn.ReLU(),
          nn.Linear(4096,4096),
          nn.ReLU(),
          nn.Linear(4096,2)
        )
    
    
      def forward(self, x):
        x=self.conv1(x)
        x=self.conv2(x)
        x=self.conv3(x)
        x=self.conv4(x)
        x=self.conv5(x)
        x=x.view(-1,7*7*512)
        x=self.dense1(x)
        return x
    
    batch_size=16
    net = Net().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)
    
    train_loss = []
    
    for epoch in range(10):
    
      for i in range(2000//batch_size):
        x=train_XX[i*batch_size:i*batch_size+batch_size]
        y=train_Y[i*batch_size:i*batch_size+batch_size]
    
        x = torch.from_numpy(x)    #(batch_size,input_feature_shape)
        y = torch.from_numpy(y)    #(batch_size,label_onehot_shape)
        x = x.cuda()
        y = y.long().cuda()
    
        out = net(x)
    
        loss = criterion(out, y)     # 计算两者的误差
        optimizer.zero_grad()       # 清空上一步的残余更新参数值
        loss.backward()          # 误差反向传播, 计算参数更新值
        optimizer.step()         # 将参数更新值施加到 net 的 parameters 上
        train_loss.append(loss.item())
    
        print(epoch, i*batch_size, np.mean(train_loss))
        train_loss=[]
    
    total_correct = 0
    for i in range(2000):
      x = train_XX[i].reshape(1,3,224,224)
      y = train_Y[i]
      x = torch.from_numpy(x)
    
      x = x.cuda()
      out = net(x).cpu()
      out = out.detach().numpy()
      pred=np.argmax(out)
      if pred==y:
        total_correct += 1
      print(total_correct)
    
    acc = total_correct / 2000.0
    print('test acc:', acc)
    
    torch.cuda.empty_cache()
    

    将上面代码中batch_size改为32,训练次数改为100轮,得到如下准确率

    过拟合了~

    以上这篇利用PyTorch实现VGG16教程就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持北冥有鱼。


    广而告之:
    热门推荐:
    SwfUpload在IE10上不出现上传按钮的解决方法

    在系统测试过程中,发现使用了SwfUpload实现的无刷新上传功能,在IE10上竟然无法使用了,难道SwfUpload不支持吗?还是需要换一种实现方式呢?最后通过了解SwfUplad.JS文件发现,我们是可以修改的,让其支持IE10,具体解决方案如下: 打开SwfUpload.js,在js文件中找到// P···

    php5.5中类级别的常量使用介绍

    不久前php刚发布了5.5的第一个稳定版,介绍了一个类级别的常量,名字是 `CLASS` 这个常量对所有的类有效,返回类的全名。 复制代码 代码如下:<?php namespace vendorpackage; class Foo {     // ... } var_dump(Foo::CLASS); //上面脚本输出 string(1···

    javascript实现label标签跳出循环操作

    出场: 首先我们来说说为什么需要label标签,虽然我们已经知道有break,continue跳出循环,但如果是多重循环那么它们就显的无能为力了,所以就出现了label这个标签来为我们服务。 我们先来看看单独使用break的情况 for(var i=0;i<4;i++){ for(var j=0;j<4;j++){ ···

    php中session与cookie的比较

    本文较为详细的比较了php中session与cookie区别。分享给大家供大家参考。具体分析如下: 1、存放的位置 cookie保存在客户端,session保存在服务器端的文件系统/数据库/memcache等。 2、安全性 session因为保存有服务器端,安全性无疑更高一些。 3、网络传输量 cookie通过网络在···

    满足百度蜘蛛对全站的爬行,简单策略是从深度抓取与广度抓取

    一般情况下,做seo的人员对于百度搜索引擎蜘蛛实现网站的抓取收录规则,应该都会有所了解,其实百度蜘蛛抓取,就是通过程序发出页面访问请求后,服务器返回网页代码,蜘蛛对页面代码进行抓取,索引入库,根据网站权重及质量度,搜索引擎将对网站进行不同频次抓取。因为互联网···

    PHP可变变量学习小结

    所谓可变变量,就是一个变量的变量名可以动态的设置和使用。语法形式是PHP的特殊语法,其他语言中少见 有时候使用可变变量名是很方便的。就是说,一个变量的变量名可以动态的设置和使用。一个普通的变量通过声明来设置,例如: <?php $a = 'hello'; ?> 一个可···

    DedeCMS模板常用的统计计数sql标签代码

    所有的文章数: {dede:sql sql="select count(*) as c from dede_archives where channel=1"}·共有文章:[field:c /]篇{/dede:sql} 所有的图集数: {dede:sql sql="select count(*) as c from dede_archives where channel=2"}····

    jquery ajax局部加载方法详解(实现代码)

    在jquery中实现ajax加载的方法有很多种,不像以前的js的ajax只有那一种,下面我们介绍jquery ajax实现局部加载方法总结,有需要了解的朋友可参考。 例 $.ajax({ url: "hotelQuery!queryHotelByCity.action", type: "post", dataType: "html", data: "quer···

    JS中input表单隐藏域及其使用方法

    一、表单隐藏域 隐藏域是用来收集或发送信息的不可见元素,对于网页的访问者来说,隐藏域是看不见的。当表单被提交时,隐藏域就会将信息用你设置时定义的名称和值发送到服务器上。 代码格式: <input type="hidden" name="..." value="..."> 属性解释: type=”hidden”定义隐···

    解决JQuery的ajax函数执行失败alert函数弹框一闪而过问题

    先查看<form>标签是否有action属性,如果没有,并且最后<button>标签的type属性为'submit‘时,默认提交位置就是当前页面 如果在页面右键检查,点击网络,会在开头发现这样的post包: 在右侧消息头处可见,请求网址为当前网址,并且响应头部类型为html 所以只要把f···