77 KiB
77 KiB
In [35]:
import os import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader, Dataset, random_split from PIL import Image import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2
In [2]:
np.random.seed(42) torch.random.manual_seed(42)
Out[2]:
<torch._C.Generator at 0x7f7419087810>
In [3]:
# 定义函数来找到最大值 def find_max_pixel_value(image_dir): max_pixel_value = 0.0 for filename in os.listdir(image_dir): if filename.endswith('.npy'): image_path = os.path.join(image_dir, filename) image = np.load(image_path).astype(np.float32) max_pixel_value = max(max_pixel_value, image.max()) return max_pixel_value # 计算图像数据中的最大像素值 image_dir = './2022data/new_train_2021/train/' max_pixel_value = find_max_pixel_value(image_dir) print(f"Maximum pixel value in the dataset: {max_pixel_value}")
Maximum pixel value in the dataset: 92.64960479736328
In [4]:
class NO2Dataset(Dataset): def __init__(self, image_dir, mask_dir): self.image_dir = image_dir self.mask_dir = mask_dir self.image_filenames = [f for f in os.listdir(image_dir) if f.endswith('.npy')] # 仅加载 .npy 文件 self.mask_filenames = [f for f in os.listdir(mask_dir) if f.endswith('.jpg')] # 仅加载 .jpg 文件 def __len__(self): return len(self.image_filenames) def __getitem__(self, idx): image_path = os.path.join(self.image_dir, self.image_filenames[idx]) mask_idx = np.random.choice(self.mask_filenames) mask_path = os.path.join(self.mask_dir, mask_idx) # 加载图像数据 (.npy 文件) image = np.expand_dims(np.load(image_path).astype(np.float32), axis=2) / max_pixel_value # 形状为 (96, 96, 1) # 加载掩码数据 (.jpg 文件) mask = np.array(Image.open(mask_path).convert('L')).astype(np.float32) # 将掩码数据中非0值设为1,0值保持不变 mask = np.where(mask != 0, 1.0, 0.0) # 保持掩码数据形状为 (96, 96, 1) mask = mask[:, :, np.newaxis] # 将形状调整为 (96, 96, 1) # 应用掩码 masked_image = image.copy() masked_image[:, :, 0] = image[:, :, 0] * mask.squeeze() # 遮盖NO2数据 # cGAN的输入和目标 X = masked_image[:, :, :1] # 形状为 (96, 96, 8) y = image[:, :, 0:1] # 目标输出为NO2数据,形状为 (96, 96, 1) # 转换形状为 (channels, height, width) X = np.transpose(X, (2, 0, 1)) # 转换为 (1, 96, 96) y = np.transpose(y, (2, 0, 1)) # 转换为 (1, 96, 96) mask = np.transpose(mask, (2, 0, 1)) # 转换为 (1, 96, 96) return torch.tensor(X, dtype=torch.float32), torch.tensor(y, dtype=torch.float32), torch.tensor(mask, dtype=torch.float32) # 实例化数据集和数据加载器 image_dir = './2022data/new_train_2021/train/' mask_dir = './2022data/new_train_2021/mask/20/' print(f"checkpoint before Generator is OK")
checkpoint before Generator is OK
In [5]:
train_set = NO2Dataset(image_dir, mask_dir) train_loader = DataLoader(train_set, batch_size=64, shuffle=True, num_workers=8) val_set = NO2Dataset('./2022data/new_train_2021/valid/', mask_dir) val_loader = DataLoader(val_set, batch_size=64, shuffle=False, num_workers=4) test_set = NO2Dataset('./2022data/new_train_2021/test/', mask_dir) test_loader = DataLoader(test_set, batch_size=64, shuffle=False, num_workers=4)
In [6]:
# 可视化特定特征的函数 def visualize_feature(input_feature,masked_feature, output_feature, title): plt.figure(figsize=(12, 6)) plt.subplot(1, 3, 1) plt.imshow(input_feature[0].cpu().numpy(), cmap='RdYlGn_r') plt.title(title + " Input") plt.subplot(1, 3, 2) plt.imshow(masked_feature[0].cpu().numpy(), cmap='RdYlGn_r') plt.title(title + " Masked") plt.subplot(1, 3, 3) plt.imshow(output_feature[0].detach().cpu().numpy(), cmap='RdYlGn_r') plt.title(title + " Recovery") plt.show()
In [7]:
class Conv(nn.Sequential): def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, stride=1, bias=False): super(Conv, self).__init__( nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, bias=bias, dilation=dilation, stride=stride, padding=((stride - 1) + dilation * (kernel_size - 1)) // 2) )
In [8]:
class ConvBNReLU(nn.Sequential): def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, stride=1, norm_layer=nn.BatchNorm2d, bias=False): super(ConvBNReLU, self).__init__( nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, bias=bias, dilation=dilation, stride=stride, padding=((stride - 1) + dilation * (kernel_size - 1)) // 2), norm_layer(out_channels), nn.ReLU() )
In [9]:
class SeparableBNReLU(nn.Sequential): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): super(SeparableBNReLU, self).__init__( nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=((stride - 1) + dilation * (kernel_size - 1)) // 2, groups=in_channels, bias=False), # 分离卷积,仅调整空间信息 norm_layer(in_channels), # 对输入通道进行归一化 nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False), # 这里进行升维操作 nn.ReLU6() )
In [10]:
class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1, downsample=None): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_channels) # 如果输入和输出通道不一致,进行降采样操作 self.downsample = downsample if in_channels != out_channels or stride != 1: self.downsample = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_channels) ) def forward(self, x): identity = x if self.downsample is not None: identity = self.downsample(x) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += identity out = self.relu(out) return out
In [11]:
class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU6, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, 1, 1, 0, bias=True) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, 1, 1, 0, bias=True) self.drop = nn.Dropout(drop, inplace=True) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x
In [12]:
class MultiHeadAttentionBlock(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.1): super(MultiHeadAttentionBlock, self).__init__() self.attention = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout) self.norm = nn.LayerNorm(embed_dim) self.dropout = nn.Dropout(dropout) def forward(self, x): # (B, C, H, W) -> (HW, B, C) for MultiheadAttention compatibility B, C, H, W = x.shape x = x.view(B, C, H * W).permute(2, 0, 1) # (B, C, H, W) -> (HW, B, C) # Apply multihead attention attn_output, _ = self.attention(x, x, x) # Apply normalization and dropout attn_output = self.norm(attn_output) attn_output = self.dropout(attn_output) # Reshape back to (B, C, H, W) attn_output = attn_output.permute(1, 2, 0).view(B, C, H, W) return attn_output
In [13]:
class SpatialAttentionBlock(nn.Module): def __init__(self): super(SpatialAttentionBlock, self).__init__() self.conv = nn.Conv2d(2, 1, kernel_size=7, padding=3, bias=False) def forward(self, x): #(B, 64, H, W) avg_out = torch.mean(x, dim=1, keepdim=True) #(B, 1, H, W) max_out, _ = torch.max(x, dim=1, keepdim=True)#(B, 1, H, W) out = torch.cat([avg_out, max_out], dim=1)#(B, 2, H, W) out = torch.sigmoid(self.conv(out))#(B, 1, H, W) return x * out #(B, C, H, W)
In [14]:
class DecoderAttentionBlock(nn.Module): def __init__(self, in_channels): super(DecoderAttentionBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels // 2, kernel_size=1) self.conv2 = nn.Conv2d(in_channels // 2, in_channels, kernel_size=1) self.spatial_attention = SpatialAttentionBlock() def forward(self, x): # 通道注意力 b, c, h, w = x.size() avg_pool = F.adaptive_avg_pool2d(x, 1) max_pool = F.adaptive_max_pool2d(x, 1) avg_out = self.conv1(avg_pool) max_out = self.conv1(max_pool) out = avg_out + max_out out = torch.sigmoid(self.conv2(out)) # 添加空间注意力 out = x * out out = self.spatial_attention(out) return out
In [15]:
class SEBlock(nn.Module): def __init__(self, in_channels, reduced_dim): super(SEBlock, self).__init__() self.se = nn.Sequential( nn.AdaptiveAvgPool2d(1), # 全局平均池化 nn.Conv2d(in_channels, reduced_dim, kernel_size=1), nn.ReLU(), nn.Conv2d(reduced_dim, in_channels, kernel_size=1), nn.Sigmoid() # 使用Sigmoid是因为我们要对通道进行权重归一化 ) def forward(self, x): return x * self.se(x)
In [16]:
def masked_mse_loss(preds, target, mask): loss = (preds - target) ** 2 loss = loss.mean(dim=-1) # 对每个像素点求平均 loss = (loss * mask).sum() / mask.sum() # 只计算被mask的像素点的损失 return loss
In [17]:
# 定义Masked Autoencoder模型 class MaskedAutoencoder(nn.Module): def __init__(self): super(MaskedAutoencoder, self).__init__() self.encoder = nn.Sequential( Conv(1, 32, kernel_size=3, stride=2), nn.ReLU(), SEBlock(32,32), ConvBNReLU(32, 64, kernel_size=3, stride=2), ResidualBlock(64,64), SeparableBNReLU(64, 128, kernel_size=3, stride=2), MultiHeadAttentionBlock(embed_dim=128, num_heads=4), SEBlock(128, 128) ) self.mlp = Mlp(in_features=128, hidden_features=256, out_features=128, act_layer=nn.ReLU6, drop=0.1) self.decoder = nn.Sequential( nn.ConvTranspose2d(128, 32, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU(), DecoderAttentionBlock(32), nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU(), DecoderAttentionBlock(16), nn.ReLU(), nn.ConvTranspose2d(16, 1, kernel_size=3, stride=2, padding=1, output_padding=1), # 修改为 output_padding=1 nn.Sigmoid() ) def forward(self, x): encoded = self.mlp(self.encoder(x)) decoded = self.decoder(encoded) return decoded # 实例化模型、损失函数和优化器 model = MaskedAutoencoder() criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
In [18]:
# 训练函数 def train_epoch(model, device, data_loader, criterion, optimizer): model.train() running_loss = 0.0 for batch_idx, (X, y, mask) in enumerate(data_loader): X, y, mask = X.to(device), y.to(device), mask.to(device) optimizer.zero_grad() reconstructed = model(X) loss = masked_mse_loss(reconstructed, y, mask) loss.backward() optimizer.step() running_loss += loss.item() return running_loss / (batch_idx + 1)
In [19]:
# 评估函数 def evaluate(model, device, data_loader, criterion): model.eval() running_loss = 0.0 with torch.no_grad(): for batch_idx, (X, y, mask) in enumerate(data_loader): X, y, mask = X.to(device), y.to(device), mask.to(device) reconstructed = model(X) if batch_idx == 8: rand_ind = np.random.randint(0, len(y)) # visualize_feature(y[rand_ind], X[rand_ind], reconstructed[rand_ind], title='NO_2') loss = masked_mse_loss(reconstructed, y, mask) running_loss += loss.item() return running_loss / (batch_idx + 1)
In [20]:
# 数据准备 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device)
cuda
In [21]:
model = model.to(device) num_epochs = 150 train_losses = list() val_losses = list() for epoch in range(num_epochs): train_loss = train_epoch(model, device, train_loader, criterion, optimizer) train_losses.append(train_loss) val_loss = evaluate(model, device, val_loader, criterion) val_losses.append(val_loss) print(f'Epoch {epoch+1}, Train Loss: {train_loss}, Val Loss: {val_loss}') # 测试模型 test_loss = evaluate(model, device, test_loader, criterion) print(f'Test Loss: {test_loss}')
/root/miniconda3/envs/python38/lib/python3.8/site-packages/torch/nn/modules/conv.py:456: UserWarning: Applied workaround for CuDNN issue, install nvrtc.so (Triggered internally at /opt/conda/conda-bld/pytorch_1711403590347/work/aten/src/ATen/native/cudnn/Conv_v8.cpp:80.) return F.conv2d(input, weight, bias, self.stride,
Epoch 1, Train Loss: 5.541112303206351, Val Loss: 0.8067406771030832 Epoch 2, Train Loss: 0.33060450623344884, Val Loss: 0.14416445189333976 Epoch 3, Train Loss: 0.14130625223251922, Val Loss: 0.07359389453492265 Epoch 4, Train Loss: 0.10518970966866587, Val Loss: 0.054381779930058945 Epoch 5, Train Loss: 0.09058622275218148, Val Loss: 0.0465024342720813 Epoch 6, Train Loss: 0.08342431517521189, Val Loss: 0.042179942210303974 Epoch 7, Train Loss: 0.0774571797686868, Val Loss: 0.03831239916542743 Epoch 8, Train Loss: 0.0720803240385555, Val Loss: 0.03571732088606408 Epoch 9, Train Loss: 0.06799363104247413, Val Loss: 0.03523132728135332 Epoch 10, Train Loss: 0.06398953810597943, Val Loss: 0.03190892792128502 Epoch 11, Train Loss: 0.06091008874914639, Val Loss: 0.030253113742838515 Epoch 12, Train Loss: 0.058550740303718936, Val Loss: 0.03257738580887622 Epoch 13, Train Loss: 0.05582124731094085, Val Loss: 0.027309948182169426 Epoch 14, Train Loss: 0.05444160232369879, Val Loss: 0.03076436184346676 Epoch 15, Train Loss: 0.053529950248896195, Val Loss: 0.026180010566369018 Epoch 16, Train Loss: 0.05092262584375421, Val Loss: 0.02586523879398691 Epoch 17, Train Loss: 0.05036925500297265, Val Loss: 0.026086220715908295 Epoch 18, Train Loss: 0.04870546900922746, Val Loss: 0.025190358426659665 Epoch 19, Train Loss: 0.04829096387533312, Val Loss: 0.024286496195387332 Epoch 20, Train Loss: 0.047801207734552105, Val Loss: 0.024341319628218387 Epoch 21, Train Loss: 0.0463638727533958, Val Loss: 0.023777516439874122 Epoch 22, Train Loss: 0.04561143496505103, Val Loss: 0.02462407554242205 Epoch 23, Train Loss: 0.04455085273469444, Val Loss: 0.02330890230517438 Epoch 24, Train Loss: 0.04402760396489, Val Loss: 0.023676151687160453 Epoch 25, Train Loss: 0.04317896270294808, Val Loss: 0.02370590161769948 Epoch 26, Train Loss: 0.042474492900842764, Val Loss: 0.027188481287436284 Epoch 27, Train Loss: 0.0410688633324474, Val Loss: 0.022131468387360267 Epoch 28, Train Loss: 0.04015502951775504, Val Loss: 0.021191479004126913 Epoch 29, Train Loss: 0.039912018183190213, Val Loss: 0.02161621072507919 Epoch 30, Train Loss: 0.039861640131930685, Val Loss: 0.02177569658515301 Epoch 31, Train Loss: 0.03960100152038016, Val Loss: 0.02101492334870582 Epoch 32, Train Loss: 0.03872588457083632, Val Loss: 0.020859015748855916 Epoch 33, Train Loss: 0.038754463954045706, Val Loss: 0.02414150171457453 Epoch 34, Train Loss: 0.03809461849233394, Val Loss: 0.019819263804783212 Epoch 35, Train Loss: 0.03751421304952606, Val Loss: 0.021835624696092404 Epoch 36, Train Loss: 0.03734014398118915, Val Loss: 0.022214002510968674 Epoch 37, Train Loss: 0.03706552038260442, Val Loss: 0.019966345795608582 Epoch 38, Train Loss: 0.036659476251113376, Val Loss: 0.019636615555971227 Epoch 39, Train Loss: 0.036727246869586214, Val Loss: 0.01936723065978669 Epoch 40, Train Loss: 0.03633688813333666, Val Loss: 0.020286126339689216 Epoch 41, Train Loss: 0.035810444339186745, Val Loss: 0.019208339934653425 Epoch 42, Train Loss: 0.03550744545714694, Val Loss: 0.01972645398308622 Epoch 43, Train Loss: 0.035464368694651444, Val Loss: 0.019827014984602622 Epoch 44, Train Loss: 0.03506896948678128, Val Loss: 0.02004316175713184 Epoch 45, Train Loss: 0.03495936513298732, Val Loss: 0.019192911129682622 Epoch 46, Train Loss: 0.03483127771841038, Val Loss: 0.018953541115401908 Epoch 47, Train Loss: 0.03463402198171545, Val Loss: 0.018771914527454275 Epoch 48, Train Loss: 0.03408609382302712, Val Loss: 0.018758975068463923 Epoch 49, Train Loss: 0.03452993459054502, Val Loss: 0.018336334998937363 Epoch 50, Train Loss: 0.034099031547441594, Val Loss: 0.019093293062549956 Epoch 51, Train Loss: 0.03445967665947644, Val Loss: 0.018671645683811067 Epoch 52, Train Loss: 0.03385696139263544, Val Loss: 0.017988349291238378 Epoch 53, Train Loss: 0.03406877570117997, Val Loss: 0.018068110510865425 Epoch 54, Train Loss: 0.03348344178721968, Val Loss: 0.018683044398401644 Epoch 55, Train Loss: 0.033462831668094196, Val Loss: 0.01905706045316889 Epoch 56, Train Loss: 0.033128469962637686, Val Loss: 0.01867042989172834 Epoch 57, Train Loss: 0.0332745431941607, Val Loss: 0.019846445504338183 Epoch 58, Train Loss: 0.03308211129081812, Val Loss: 0.01826892614840193 Epoch 59, Train Loss: 0.03278694228766415, Val Loss: 0.022516568488580115 Epoch 60, Train Loss: 0.03246014836659122, Val Loss: 0.01806999350640368 Epoch 61, Train Loss: 0.0331528295534814, Val Loss: 0.01772232149588935 Epoch 62, Train Loss: 0.03278059815674757, Val Loss: 0.01812060377461479 Epoch 63, Train Loss: 0.032278176842141994, Val Loss: 0.01805540711242468 Epoch 64, Train Loss: 0.03201383460521874, Val Loss: 0.018378542062449963 Epoch 65, Train Loss: 0.03193402631005003, Val Loss: 0.017855498166952994 Epoch 66, Train Loss: 0.03141010671326545, Val Loss: 0.01813684691219254 Epoch 67, Train Loss: 0.03162443816969528, Val Loss: 0.017312405214823308 Epoch 68, Train Loss: 0.03134946423997569, Val Loss: 0.017035803282038964 Epoch 69, Train Loss: 0.030821436257884565, Val Loss: 0.017176391457782148 Epoch 70, Train Loss: 0.030857550524241103, Val Loss: 0.01778144468652441 Epoch 71, Train Loss: 0.03145846045935927, Val Loss: 0.017036813350909567 Epoch 72, Train Loss: 0.03082356479425522, Val Loss: 0.01754499076211706 Epoch 73, Train Loss: 0.03057446662929997, Val Loss: 0.016873343847692013 Epoch 74, Train Loss: 0.030142722530482793, Val Loss: 0.017114325763380275 Epoch 75, Train Loss: 0.0297475472960764, Val Loss: 0.017896422284080626 Epoch 76, Train Loss: 0.02986417829462912, Val Loss: 0.016979403338058197 Epoch 77, Train Loss: 0.030155790255440722, Val Loss: 0.016632370690399027 Epoch 78, Train Loss: 0.02987812078698019, Val Loss: 0.017218250702036187 Epoch 79, Train Loss: 0.02965712085761855, Val Loss: 0.016456886016307994 Epoch 80, Train Loss: 0.029867385275068537, Val Loss: 0.016108868465303107 Epoch 81, Train Loss: 0.029616706633726054, Val Loss: 0.016850862830401735 Epoch 82, Train Loss: 0.02933939000190535, Val Loss: 0.017380977188177566 Epoch 83, Train Loss: 0.028856007063591024, Val Loss: 0.016677292380878266 Epoch 84, Train Loss: 0.029245234613793088, Val Loss: 0.016243027404267738 Epoch 85, Train Loss: 0.029124773610218438, Val Loss: 0.016707272605693088 Epoch 86, Train Loss: 0.02889745979731941, Val Loss: 0.01667517395888237 Epoch 87, Train Loss: 0.028780636237522143, Val Loss: 0.015974930111081042 Epoch 88, Train Loss: 0.0290858921784479, Val Loss: 0.01647984809143112 Epoch 89, Train Loss: 0.028605496862513125, Val Loss: 0.015814711419033244 Epoch 90, Train Loss: 0.02866147620092451, Val Loss: 0.01892404787321674 Epoch 91, Train Loss: 0.028418820038174107, Val Loss: 0.01616615823846548 Epoch 92, Train Loss: 0.028970944983637437, Val Loss: 0.015930495700462066 Epoch 93, Train Loss: 0.02812033420796767, Val Loss: 0.015577566691060016 Epoch 94, Train Loss: 0.027900781900042276, Val Loss: 0.016411741838810293 Epoch 95, Train Loss: 0.028156488249215756, Val Loss: 0.015642933785281282 Epoch 96, Train Loss: 0.027669002046495413, Val Loss: 0.01564073005810063 Epoch 97, Train Loss: 0.02797757544084988, Val Loss: 0.01616466465465566 Epoch 98, Train Loss: 0.027837259815813517, Val Loss: 0.01699387704200567 Epoch 99, Train Loss: 0.02773604567291814, Val Loss: 0.015504092572534338 Epoch 100, Train Loss: 0.02741758727020746, Val Loss: 0.015247883136443634 Epoch 101, Train Loss: 0.02707562789562705, Val Loss: 0.015558899360451293 Epoch 102, Train Loss: 0.027159787612832578, Val Loss: 0.015182257392146487 Epoch 103, Train Loss: 0.027029822105239625, Val Loss: 0.014660503893615083 Epoch 104, Train Loss: 0.02699657593878497, Val Loss: 0.016841756120482658 Epoch 105, Train Loss: 0.026641362756051144, Val Loss: 0.015178967544690091 Epoch 106, Train Loss: 0.026524744587222385, Val Loss: 0.015554199926555157 Epoch 107, Train Loss: 0.026474817848289083, Val Loss: 0.015399079710403656 Epoch 108, Train Loss: 0.02636850485990269, Val Loss: 0.014777421396463476 Epoch 109, Train Loss: 0.02637453050322413, Val Loss: 0.015275213094626336 Epoch 110, Train Loss: 0.02607358055282659, Val Loss: 0.016890957614684357 Epoch 111, Train Loss: 0.026133586770709285, Val Loss: 0.015139183485286032 Epoch 112, Train Loss: 0.02617257334302924, Val Loss: 0.014704703016484038 Epoch 113, Train Loss: 0.026084138217840926, Val Loss: 0.014918764835183925 Epoch 114, Train Loss: 0.025832627078512777, Val Loss: 0.01494563212420078 Epoch 115, Train Loss: 0.02605823659307837, Val Loss: 0.014487974504207043 Epoch 116, Train Loss: 0.025865597622936103, Val Loss: 0.014469134779845147 Epoch 117, Train Loss: 0.025718001264166693, Val Loss: 0.013978753100208779 Epoch 118, Train Loss: 0.02561279770624233, Val Loss: 0.01455160214545879 Epoch 119, Train Loss: 0.025601031165295295, Val Loss: 0.015720585519646075 Epoch 120, Train Loss: 0.025754293742806685, Val Loss: 0.013814986822135906 Epoch 121, Train Loss: 0.02534578327408231, Val Loss: 0.014853738644655714 Epoch 122, Train Loss: 0.02561174121006752, Val Loss: 0.014788057021004088 Epoch 123, Train Loss: 0.02533768888859622, Val Loss: 0.014425865988782111 Epoch 124, Train Loss: 0.025395122024293847, Val Loss: 0.014166925221364549 Epoch 125, Train Loss: 0.025411863934940996, Val Loss: 0.014836331670905681 Epoch 126, Train Loss: 0.025214647187420048, Val Loss: 0.01417682920285362 Epoch 127, Train Loss: 0.024879908288079025, Val Loss: 0.014164314981787763 Epoch 128, Train Loss: 0.02494473186126501, Val Loss: 0.014208773448270685 Epoch 129, Train Loss: 0.02468084254381755, Val Loss: 0.013683844337913585 Epoch 130, Train Loss: 0.0248352900521066, Val Loss: 0.014833704508999561 Epoch 131, Train Loss: 0.024615347561231404, Val Loss: 0.016790931608448637 Epoch 132, Train Loss: 0.024628470901806445, Val Loss: 0.013669065913145846 Epoch 133, Train Loss: 0.024401855987433486, Val Loss: 0.014544136485362307 Epoch 134, Train Loss: 0.02425686465054311, Val Loss: 0.014493834742523254 Epoch 135, Train Loss: 0.02475559137552801, Val Loss: 0.013708425725394107 Epoch 136, Train Loss: 0.024078373256026818, Val Loss: 0.014549214828838693 Epoch 137, Train Loss: 0.024223965633891325, Val Loss: 0.013578887454214249 Epoch 138, Train Loss: 0.024396276563010383, Val Loss: 0.013251736344016612 Epoch 139, Train Loss: 0.024004749286161586, Val Loss: 0.013333805103568321 Epoch 140, Train Loss: 0.02389194364700697, Val Loss: 0.014107016430414737 Epoch 141, Train Loss: 0.023637132873005923, Val Loss: 0.013322851898029764 Epoch 142, Train Loss: 0.023719912169605582, Val Loss: 0.014070579683051464 Epoch 143, Train Loss: 0.02377868151418579, Val Loss: 0.013563529806251222 Epoch 144, Train Loss: 0.02362075615619312, Val Loss: 0.014379620492616867 Epoch 145, Train Loss: 0.023822628134713236, Val Loss: 0.01308334250240884 Epoch 146, Train Loss: 0.02378806389406719, Val Loss: 0.013488665500536878 Epoch 147, Train Loss: 0.023415484050821767, Val Loss: 0.01323556466067725 Epoch 148, Train Loss: 0.023618425456889434, Val Loss: 0.013999837430867744 Epoch 149, Train Loss: 0.023620333203875563, Val Loss: 0.013482759567968388 Epoch 150, Train Loss: 0.02325812268969232, Val Loss: 0.012960578988682716 Test Loss: 0.028638894522660656
In [29]:
tr_ind = list(range(len(train_losses))) val_ind = list(range(len(val_losses))) plt.plot(train_losses[1:], label='train_loss') plt.plot(val_losses[1:], label='val_loss') plt.legend(loc='best')
Out[29]:
<matplotlib.legend.Legend at 0x7f70bbbcb3d0>
In [30]:
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_percentage_error, mean_absolute_error
In [31]:
def cal_ioa(y_true, y_pred): # 计算平均值 mean_observed = np.mean(y_true) mean_predicted = np.mean(y_pred) # 计算IoA numerator = np.sum((y_true - y_pred) ** 2) denominator = 2 * np.sum((np.abs(y_true - mean_observed) + np.abs(y_pred - mean_predicted)) ** 2) IoA = 1 - (numerator / denominator) return IoA
In [32]:
eva_list = list() device = 'cpu' model = model.to(device) with torch.no_grad(): for batch_idx, (X, y, mask) in enumerate(test_loader): X, y, mask = X.to(device), y.to(device), mask.to(device) mask_rev = (torch.squeeze(mask, dim=1)==0) * 1 # mask取反获得修复区域 reconstructed = model(X) rev_data = y * max_pixel_value rev_recon = reconstructed * max_pixel_value # todo: 这里需要只评估修补出来的模块 data_label = torch.squeeze(rev_data, dim=1) * mask_rev data_label = data_label[mask_rev==1] recon_no2 = torch.squeeze(rev_recon, dim=1) * mask_rev recon_no2 = recon_no2[mask_rev==1] mae = mean_absolute_error(data_label, recon_no2) rmse = np.sqrt(mean_squared_error(data_label, recon_no2)) mape = mean_absolute_percentage_error(data_label, recon_no2) r2 = r2_score(data_label, recon_no2) ioa = cal_ioa(data_label.detach().numpy(), recon_no2.detach().numpy()) r = np.corrcoef(data_label, recon_no2)[0, 1] eva_list.append([mae, rmse, mape, r2, ioa, r])
In [34]:
pd.DataFrame(eva_list, columns=['mae', 'rmse', 'mape', 'r2', 'ioa', 'r']).describe()
Out[34]:
mae | rmse | mape | r2 | ioa | r | |
---|---|---|---|---|---|---|
count | 76.000000 | 76.000000 | 76.000000 | 76.000000 | 76.000000 | 76.000000 |
mean | 1.839756 | 3.116629 | 0.182020 | 0.887434 | 0.984924 | 0.942480 |
std | 0.141333 | 0.218296 | 0.009663 | 0.010754 | 0.001488 | 0.005507 |
min | 1.518803 | 2.497997 | 0.160656 | 0.862124 | 0.981807 | 0.930738 |
25% | 1.725160 | 2.954316 | 0.175752 | 0.880888 | 0.984067 | 0.939023 |
50% | 1.819375 | 3.084015 | 0.180615 | 0.887742 | 0.984906 | 0.942333 |
75% | 1.973080 | 3.293117 | 0.189375 | 0.895522 | 0.986135 | 0.946845 |
max | 2.156417 | 3.538444 | 0.205928 | 0.909979 | 0.988135 | 0.954369 |
In [45]:
eva_list = list() device = 'cpu' model = model.to(device) with torch.no_grad(): for batch_idx, (X, y, mask) in enumerate(test_loader): X, y, mask = X.to(device), y.to(device), mask.to(device) mask_rev = (torch.squeeze(mask, dim=1)==0) * 1 # mask取反获得修复区域 reconstructed = model(X) rev_data = y * max_pixel_value rev_recon = reconstructed * max_pixel_value # todo: 这里需要只评估修补出来的模块 for i, sample in enumerate(rev_data): used_mask = mask_rev[i] data_label = sample[0] * used_mask recon_no2 = rev_recon[i][0] * used_mask data_label = data_label[used_mask==1] recon_no2 = recon_no2[used_mask==1] mae = mean_absolute_error(data_label, recon_no2) rmse = np.sqrt(mean_squared_error(data_label, recon_no2)) mape = mean_absolute_percentage_error(data_label, recon_no2) r2 = r2_score(data_label, recon_no2) ioa = cal_ioa(data_label.detach().numpy(), recon_no2.detach().numpy()) r = np.corrcoef(data_label, recon_no2)[0, 1] eva_list.append([mae, rmse, mape, r2, ioa, r])
In [46]:
pd.DataFrame(eva_list, columns=['mae', 'rmse', 'mape', 'r2', 'ioa', 'r']).describe()
Out[46]:
mae | rmse | mape | r2 | ioa | r | |
---|---|---|---|---|---|---|
count | 4839.000000 | 4839.000000 | 4839.000000 | 4839.000000 | 4839.000000 | 4839.000000 |
mean | 1.833400 | 2.618025 | 0.181476 | 0.631467 | 0.937835 | 0.813992 |
std | 1.185956 | 1.683402 | 0.071764 | 0.260356 | 0.053726 | 0.123230 |
min | 0.248986 | 0.337919 | 0.075559 | -3.769637 | 0.103451 | 0.020267 |
25% | 0.843516 | 1.179310 | 0.138988 | 0.537750 | 0.924173 | 0.762655 |
50% | 1.335556 | 1.939605 | 0.165166 | 0.682016 | 0.951186 | 0.841513 |
75% | 2.759837 | 3.977746 | 0.202966 | 0.792008 | 0.970115 | 0.898809 |
max | 9.474609 | 10.988250 | 1.344091 | 0.978264 | 0.997251 | 0.989095 |
In [ ]: