import org.apache.commons.compress.archivers.dump.InvalidFormatException;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
import org.apache.poi.ss.usermodel.*;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
public class TakeFilePathAndName {
public static void main(String[] args) throws IOException {
// This is the path where the file's name you want to take.
String path = "E:\lu\dataset\original\lu\dataset";
// get file list where the path has
File file = new File(path);
// get the folder list
File[] array = file.listFiles();
Workbook worktrain = new HSSFWorkbook();
Sheet trainsheet = worktrain.createSheet("sheet1");
int traincount = 0;
for (int i = 0; i < array.length; i++) {
if(array[i].getName().endsWith(".png")&array[i].getName().length()<9){
Row trainsheetRow = trainsheet.createRow(traincount);
String stringCellValue = array[i].getName();
Cell trainsheetRowCell = trainsheetRow.createCell(0);
trainsheetRowCell.setCellValue(stringCellValue);
Cell trainsheetRowCell1 = trainsheetRow.createCell(1);
trainsheetRowCell1.setCellValue(stringCellValue.replace(".png","label.png"));
traincount++;
}
}
File trainFile = new File("E:\lu\dataset\original\lu\excell\train.xls");
FileOutputStream trainStream = new FileOutputStream(trainFile);
worktrain.write(trainStream);
}
}
from dataloader import rgbSensorDataset,ToTensor
from torchvision import transforms, utils
from torch.utils.data import Dataset, DataLoader
from net import ResNetUNet
from collections import defaultdict
import torch.nn.functional as F
from loss import dice_loss
import torchvision.utils
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import time
import copy
import math
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from skimage import io
from PIL import Image
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = F.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def print_metrics(metrics, epoch_samples, phase):
outputs = []
for k in metrics.keys():
outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples))
print("{}: {}".format(phase, ", ".join(outputs)))
def train_model(model, optimizer, scheduler, num_epochs=25):
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1e10
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
since = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
for param_group in optimizer.param_groups:
print("LR", param_group['lr'])
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
metrics = defaultdict(float)
epoch_samples = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = calc_loss(outputs, labels, metrics)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
epoch_samples += inputs.size(0)
print_metrics(metrics, epoch_samples, phase)
epoch_loss = metrics['loss'] / epoch_samples
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
print("saving best model")
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val loss: {:4f}'.format(best_loss))
# load best model weights
model.load_state_dict(best_model_wts)
return model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
num_class = 1
model = ResNetUNet(n_class=1)
model = nn.DataParallel(model)
model = model.to(device)
# freeze backbone layers
#for l in model.base_layers:
# for param in l.parameters():
# param.requires_grad = False
optimizer_ft = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=30, gamma=0.1)
model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=60)
评论列表(0条)