1 Star 0 Fork 0

杨敏迪/有趣的简单的神经网络

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
03 PyTroch初试.py 3.03 KB
一键复制 编辑 原始数据 按行查看 历史
杨敏迪 提交于 2022-03-10 22:36 +08:00 . PyTorch
###类的定义
n_input = X.shape[1] # Must match the shape of the input features
n_hidden1 = 8 # Number of neurons in the 1st hidden layer
n_hidden2 = 4 # Number of neurons in the 2nd hidden layer
n_output = 1 # Number of output units (for example 1 for binary classifiction)\
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
# Inputs to the 1st hidden Layer linear transformation
self.hidden1 = nn.Linear(n_input,n_hidden1)
# Inputs to the 2nd hidden Layer linear transformation
self.hidden2 = nn.Linear(n_hidden1,n_hidden2)
# Activation function for the hidden Layers' output - ReLU
self.relu = nn.ReLU()
# Output layer linear transformation
self.output = nn.Linear(n_hidden2, n_output)
# Activation function for the output Layers' output - sigmoid
self.sigmoid = nn.Sigmoid()
def forward(self, X, %%kwargs):
# Passes the input tensor through each of the defined operations
X = self.hidden1(X)
X = self.relu(X)
X = self.hidden2(X)
X = self.relu(X)
X = self.output(X)
X = self.sigmoid(X)
return X
model = Network()
print(model)
###损失函数,优化器与训练
criterion = nn.BCELoss() # Binary cross-entrophy loss
logits = model.forward(X) # Output of the forward pass (logits i.e. probabilities)
loss = criterion(logits, y)
from torch import optim
optimizer = optim.SGD(model.parameter(),lr = 0.1)
# Resets the gradients i.e. do not accumulate over Passes
optimizer.zero_grad()
# Forward Pass
output = model.forward(X)
# Calculate loss
loss = criterion(output, X)
# Backward pass (AutoGrad)
loss.backward()
# One step of the optimizer
optimizer.step()
###训练多个周期
epochs = 10
for i,e in enumerate(range(epochs)):
optimizer.zero_grad() # Reset the gradients
output = model.forward(X) # Forward Pass
loss = criterion(output, X) # Calculate loss
print(f"Epoch - {i+1}, Loss - {round(item(),3)}") # print Loss
loss.backward() # Backpropagation
optimizer.step() # Optimizer one step
###看看频率随时间变化
for i,e in enumerate(range(epochs)):
optimizer.zero_grad()
output = model.forward(X)
loss = criterion(output, X)
#print(f"Epoch - {i+1}, Loss - {round(item(),3)}")
loss.backward()
optimizer.step()
running_loss.append(loss.item())
if i != 0 and (i+1)%20 == 0:
logits = model.forward(X).detach().numpy().flatten()
plt.figure(figsize = (15,3))
plt.title("Output probabilities after {} epochs".format(i+1))
plt.bar([i for i in range(100)], height = logits)
plt.show()
###自定义损失函数
def mean_quartic_error(output,target):
"""
Computes 4-th power loss
"""
loss = torch.mean((output - target)**4)
return loss
for i,e in enumerate(range(epochs)):
optimizer.zero_grad()
output = reg_model.forward(X)
loss = mean_quartic_error(output, X)
#print(f"Epoch - {i+1}, Loss - {round(item(),3)}")
loss.backward()
optimizer.step()
running_loss.append(loss.item())
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/yang-mindi/interesting-and-simple-neural-network.git
git@gitee.com:yang-mindi/interesting-and-simple-neural-network.git
yang-mindi
interesting-and-simple-neural-network
有趣的简单的神经网络
master

搜索帮助

371d5123 14472233 46e8bd33 14472233