代码拉取完成,页面将自动刷新
同步操作将从 wangrong/fourier_neural_operator 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
import h5py
import scipy.io
import matplotlib.pyplot as plt
from timeit import default_timer
import sys
import math
import operator
from functools import reduce
from timeit import default_timer
from utilities3 import *
torch.manual_seed(0)
np.random.seed(0)
################################################################
# lowrank layer
################################################################
class LowRank1d(nn.Module):
def __init__(self, in_channels, out_channels, s, width, rank=1):
super(LowRank1d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.s = s
self.n = s
self.rank = rank
self.phi = DenseNet([2, 64, 128, 256, width*width*rank], torch.nn.ReLU)
self.psi = DenseNet([2, 64, 128, 256, width*width*rank], torch.nn.ReLU)
def forward(self, v, a):
# a (batch, n, 2)
# v (batch, n, f)
batch_size = v.shape[0]
phi_eval = self.phi(a).reshape(batch_size, self.n, self.out_channels, self.in_channels, self.rank)
psi_eval = self.psi(a).reshape(batch_size, self.n, self.out_channels, self.in_channels, self.rank)
# print(psi_eval.shape, v.shape, phi_eval.shape)
v = torch.einsum('bnoir,bni,bmoir->bmo',psi_eval, v, phi_eval) / self.n
return v
class MyNet(torch.nn.Module):
def __init__(self, s, width=32, rank=4):
super(MyNet, self).__init__()
self.s = s
self.width = width
self.rank = rank
self.fc0 = nn.Linear(2, self.width)
self.net1 = LowRank1d(self.width, self.width, s, width, rank=self.rank)
self.net2 = LowRank1d(self.width, self.width, s, width, rank=self.rank)
self.net3 = LowRank1d(self.width, self.width, s, width, rank=self.rank)
self.net4 = LowRank1d(self.width, self.width, s, width, rank=self.rank)
self.w1 = nn.Linear(self.width, self.width)
self.w2 = nn.Linear(self.width, self.width)
self.w3 = nn.Linear(self.width, self.width)
self.w4 = nn.Linear(self.width, self.width)
self.bn1 = torch.nn.BatchNorm1d(self.width)
self.bn2 = torch.nn.BatchNorm1d(self.width)
self.bn3 = torch.nn.BatchNorm1d(self.width)
self.bn4 = torch.nn.BatchNorm1d(self.width)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, v):
batch_size, n = v.shape[0], v.shape[1]
a = v.clone()
v = self.fc0(v)
v1 = self.net1(v, a)
v2 = self.w1(v)
v = v1+v2
v = self.bn1(v.reshape(-1, self.width)).view(batch_size,n,self.width)
v = F.relu(v)
v1 = self.net2(v, a)
v2 = self.w2(v)
v = v1+v2
v = self.bn2(v.reshape(-1, self.width)).view(batch_size,n,self.width)
v = F.relu(v)
v1 = self.net3(v, a)
v2 = self.w3(v)
v = v1+v2
v = self.bn3(v.reshape(-1, self.width)).view(batch_size,n,self.width)
v = F.relu(v)
v1 = self.net4(v, a)
v2 = self.w4(v)
v = v1+v2
v = self.bn4(v.reshape(-1, self.width)).view(batch_size,n,self.width)
v = self.fc1(v)
v = F.relu(v)
v = self.fc2(v)
return v.squeeze()
def count_params(self):
c = 0
for p in self.parameters():
c += reduce(operator.mul, list(p.size()))
return c
################################################################
# configs
################################################################
ntrain = 1000
ntest = 200
sub = 1 #subsampling rate
h = 2**13 // sub
s = h
batch_size = 5
learning_rate = 0.001
################################################################
# reading data and normalization
################################################################
dataloader = MatReader('data/burgers_data_R10.mat')
x_data = dataloader.read_field('a')[:,::sub]
y_data = dataloader.read_field('u')[:,::sub]
x_train = x_data[:ntrain,:]
y_train = y_data[:ntrain,:]
x_test = x_data[-ntest:,:]
y_test = y_data[-ntest:,:]
x_normalizer = UnitGaussianNormalizer(x_train)
x_train = x_normalizer.encode(x_train)
x_test = x_normalizer.encode(x_test)
y_normalizer = UnitGaussianNormalizer(y_train)
y_train = y_normalizer.encode(y_train)
grid = np.linspace(0, 2*np.pi, s).reshape(1, s, 1)
grid = torch.tensor(grid, dtype=torch.float)
x_train = torch.cat([x_train.reshape(ntrain,s,1), grid.repeat(ntrain,1,1)], dim=2)
x_test = torch.cat([x_test.reshape(ntest,s,1), grid.repeat(ntest,1,1)], dim=2)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size, shuffle=False)
model = MyNet(s).cuda()
print(model.count_params())
################################################################
# training and evaluation
################################################################
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
epochs = 500
myloss = LpLoss(size_average=False)
y_normalizer.cuda()
for ep in range(epochs):
model.train()
t1 = default_timer()
train_mse = 0
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s)
mse = F.mse_loss(out.view(batch_size, -1), y.view(batch_size, -1), reduction='mean')
# mse.backward()
out = y_normalizer.decode(out)
y = y_normalizer.decode(y)
loss = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss.backward()
optimizer.step()
train_mse += mse.item()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s)
out = y_normalizer.decode(out)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_mse /= len(train_loader)
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2-t1, train_mse, train_l2, test_l2)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。