代码拉取完成,页面将自动刷新
import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
X = torch.tensor([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
y = torch.tensor([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0])
class Perceptron(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1, 1)
def forward(self, x):
y = self.fc(x)
return y
class NetModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(1, 2)
self.fc2 = nn.Linear(2, 2)
self.fc3 = nn.Linear(2, 1)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
y = self.fc3(x)
return y
model1 = Perceptron()
model2 = NetModel()
loss_fn = nn.MSELoss()
optimizer1 = torch.optim.SGD(model1.parameters(), lr=0.01)
optimizer2 = torch.optim.SGD(model2.parameters(), lr=0.01)
# 训练单层感知器
print("saved preception_model to preception_model.pth:")
for epoch in range(500):
y_pred = model1(X.unsqueeze(1))
loss = loss_fn(y_pred, y.unsqueeze(1))
optimizer1.zero_grad()
loss.backward()
optimizer1.step()
if (epoch+1) % 50 == 0:
print(f'Epoch [{epoch+1}/500], Loss: {loss.item():.4f}')
perceptron_path = 'perceptron.pth'
torch.save(model1.state_dict(), perceptron_path)
# 训练多层神经网络
print("saved net_model to net_model")
for epoch in range(500):
y_pred = model2(X.unsqueeze(1))
loss = loss_fn(y_pred, y.unsqueeze(1))
optimizer2.zero_grad()
loss.backward()
optimizer2.step()
if (epoch+1) % 50 == 0:
print(f'Epoch [{epoch+1}/500], Loss: {loss.item():.4f}')
net_model_path = 'net_model.pth'
torch.save(model2.state_dict(), net_model_path)
# 保存模型参数
# perceptron_path = 'perceptron.pth'
# net_model_path = 'net_model.pth'
# torch.save(model1.state_dict(), perceptron_path)
# torch.save(model2.state_dict(), net_model_path)
print(f"模型参数已保存到: {perceptron_path} 和 {net_model_path}")
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。