博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
模型搭建练习2_实现nn模块、optim、two_layer、dynamic_net
阅读量:4316 次
发布时间:2019-06-06

本文共 4306 字,大约阅读时间需要 14 分钟。

用variable实现nn.module

1 import torch 2 from torch.autograd import Variable 3  4 N, D_in, H, D_out = 64, 1000, 100, 10 5  6 x = Variable(torch.randn(N, D_in)) 7 y = Variable(torch.randn(N, D_out), requires_grad=False) 8  9 model = torch.nn.Sequential(10     torch.nn.Linear(D_in, H),11     torch.nn.ReLU(),12     torch.nn.Linear(H, D_out),13 )14 15 loss_fn = torch.nn.MSELoss(size_average=False)16 17 learning_rate = 1e-418 for t in range(2):19     # Forward pass20     y_pred = model(x)21 22     loss = loss_fn(y_pred, y)23     # Zero the gradients before running the backward pass.24     model.zero_grad()25     # Backward pass: compute gradient of the loss with respect to all the learnable26     # parameters of the model. Internally, the parameters of each Module are stored27     # in Variables with requires_grad=True, so this call will compute gradients for28     # all learnable parameters in the model.29     loss.backward()30 31     # Update the weights using gradient descent. Each parameter is a Variable32     for param in model.parameters():33         param.data -= learning_rate * param.grad.data

实现optim

1 import torch 2 from torch.autograd import Variable 3  4 N, D_in, H, D_out = 64, 1000, 100, 10 5 x = Variable(torch.randn(N, D_in)) 6 y = Variable(torch.randn(N, D_out), requires_grad=False) 7  8 model = torch.nn.Sequential( 9     torch.nn.Linear(D_in, H),10     torch.nn.ReLU(),11     torch.nn.Linear(H, D_out),12 )13 loss_fn = torch.nn.MSELoss(size_average=False)14 15 learning_rate = 1e-416 # Use the optim package to define an Optimizer that will update the weights of17 # the model for us.18 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)19 for t in range(500):20     # Forward pass: compute predicted y by passing x to the model.21     y_pred = model(x)22     loss = loss_fn(y_pred, y)23     # Before the backward pass, use the optimizer object to zero all of the24     # gradients for the variables it will update (which are the learnable weights25     # of the model)26     optimizer.zero_grad()27     # Backward pass: compute gradient of the loss with respect to model28     # parameters29     loss.backward()30     # Calling the step function on an Optimizer makes an update to its31     # parameters32     optimizer.step()

实现two_layer模型

1 import torch 2 from torch.autograd import Variable 3  4 class TwoLayerNet(torch.nn.Module): 5     def __init__(self, D_in, H, D_out): 6         super(TwoLayerNet, self).__init__() 7         self.linear1 = torch.nn.Linear(D_in, H) 8         self.linear2 = torch.nn.Linear(H, D_out) 9 10     def forward(self, x):11         h_relu = self.linear1(x).clamp(min=0)12         y_pred = self.linear2(h_relu)13         return y_pred14 15 N, D_in, H, D_out = 64, 1000, 100, 1016 x = Variable(torch.randn(N, D_in))17 y = Variable(torch.randn(N, D_out), requires_grad=False)18 19 model = TwoLayerNet(D_in, H, D_out)20 criterion = torch.nn.MSELoss(size_average=False)21 optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)22 for t in range(2):23     y_pred = model(x)24     loss = criterion(y_pred, y)25     optimizer.zero_grad()26     loss.backward()27     optimizer.step()

实现dynamic_net

1 import random 2 import torch 3 from torch.autograd import Variable 4  5 class DynamicNet(torch.nn.Module): 6     def __init__(self, D_in, H, D_out): 7         super(DynamicNet, self).__init__() 8         self.input_linear = torch.nn.Linear(D_in, H) 9         self.middle_linear = torch.nn.Linear(H, H)10         self.output_linear = torch.nn.Linear(H, D_out)11 12     def forward(self, x):13         h_relu = self.input_linear(x).clamp(min=0)14         for _ in range(random.randint(0, 3)):15             h_relu = self.middle_linear(h_relu).clamp(min=0)16         y_pred = self.output_linear(h_relu)17         return y_pred18 19 N, D_in, H, D_out = 64, 1000, 100, 1020 x = Variable(torch.randn(N, D_in))21 y = Variable(torch.randn(N, D_out), requires_grad=False)22 model = DynamicNet(D_in, H, D_out)23 24 criterion = torch.nn.MSELoss(size_average=False)25 optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)26 for t in range(2):27     y_pred = model(x)28     loss = criterion(y_pred, y)29     optimizer.zero_grad()30     loss.backward()31     optimizer.step()

 

转载于:https://www.cnblogs.com/Joyce-song94/p/7477404.html

你可能感兴趣的文章
HTML 标签。
查看>>
[bzoj2783][JLOI2012]树_树的遍历
查看>>
2018.10.20 bzoj1068: [SCOI2007]压缩(区间dp)
查看>>
Perl的IO操作(2):更多文件句柄模式
查看>>
由拖库攻击谈口令字段的加密策略
查看>>
Alpha 冲刺 (4/10)
查看>>
并发编程之线程池进程池
查看>>
初始化 Flask 虚拟环境 命令
查看>>
脚本简介jQuery微信开放平台注册表单
查看>>
将PHP数组输出为HTML表格
查看>>
Java中的线程Thread方法之---suspend()和resume() 分类: ...
查看>>
经典排序算法回顾:选择排序,快速排序
查看>>
BZOJ2213 [Poi2011]Difference 【乱搞】
查看>>
c# 对加密的MP4文件进行解密
查看>>
Flask 四种响应类型
查看>>
AOP面向切面编程C#实例
查看>>
怎么让win7右下角只显示时间不显示日期 ?(可行)
查看>>
AngularJs学习笔记-慕课网AngularJS实战
查看>>
数据库三大范式
查看>>
工作总结之二:bug级别、优先级别、bug状态
查看>>