Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 17 additions & 16 deletions cleaning/models/SmallUnet/unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
import gc

import torch
import torch, torch.nn as nn
import torch
import torch.nn as nn
import torch.functional as F
from torch.autograd import Variable
import numpy as np


def CreateConvBnRelu(in_channels, out_channels, dilation=1):
def create_conv_bn_relu(in_channels, out_channels, dilation=1):
module = nn.Sequential()
module.add_module('conv', nn.Conv2d(
in_channels=in_channels,
Expand All @@ -30,35 +31,35 @@ def __init__(self):
super(SmallUnet, self).__init__()

# encoder
self.add_module('EncConvBnRelu1_1', CreateConvBnRelu(3, 64))
self.add_module('EncConvBnRelu1_2', CreateConvBnRelu(64, 64))
self.add_module('EncConvBnRelu1_1', create_conv_bn_relu(3, 64))
self.add_module('EncConvBnRelu1_2', create_conv_bn_relu(64, 64))
self.add_module('EncMp1', nn.MaxPool2d(kernel_size=2))

self.add_module('EncConvBnRelu2_1', CreateConvBnRelu(64, 128))
self.add_module('EncConvBnRelu2_2', CreateConvBnRelu(128, 128))
self.add_module('EncConvBnRelu2_1', create_conv_bn_relu(64, 128))
self.add_module('EncConvBnRelu2_2', create_conv_bn_relu(128, 128))
self.add_module('EncMp2', nn.MaxPool2d(kernel_size=2))

self.add_module('EncConvBnRelu3_1', CreateConvBnRelu(128, 256))
self.add_module('EncConvBnRelu3_2', CreateConvBnRelu(256, 256))
self.add_module('EncConvBnRelu3_1', create_conv_bn_relu(128, 256))
self.add_module('EncConvBnRelu3_2', create_conv_bn_relu(256, 256))
self.add_module('EncMp3', nn.MaxPool2d(kernel_size=2))

# lowest layer
self.add_module('ConvBnRelu4_1', CreateConvBnRelu(256, 512))
self.add_module('ConvBnRelu4_2', CreateConvBnRelu(512, 512))
self.add_module('ConvBnRelu4_1', create_conv_bn_relu(256, 512))
self.add_module('ConvBnRelu4_2', create_conv_bn_relu(512, 512))
self.add_module('Us4', nn.Upsample(scale_factor=2))

# decoder
self.add_module('DecConvBnRelu3_1', CreateConvBnRelu(512 + 256, 256))
self.add_module('DecConvBnRelu3_2', CreateConvBnRelu(256, 256))
self.add_module('DecConvBnRelu3_1', create_conv_bn_relu(512 + 256, 256))
self.add_module('DecConvBnRelu3_2', create_conv_bn_relu(256, 256))
self.add_module('DecUs3', nn.Upsample(scale_factor=2))

self.add_module('DecConvBnRelu2_1', CreateConvBnRelu(256 + 128, 128))
self.add_module('DecConvBnRelu2_2', CreateConvBnRelu(128, 128))
self.add_module('DecConvBnRelu2_1', create_conv_bn_relu(256 + 128, 128))
self.add_module('DecConvBnRelu2_2', create_conv_bn_relu(128, 128))
self.add_module('DecUs2', nn.Upsample(scale_factor=2))

# prediction
self.add_module('PredConvBnRelu_1', CreateConvBnRelu(128 + 64, 64))
self.add_module('PredConvBnRelu_2', CreateConvBnRelu(64, 64))
self.add_module('PredConvBnRelu_1', create_conv_bn_relu(128 + 64, 64))
self.add_module('PredConvBnRelu_2', create_conv_bn_relu(64, 64))
self.add_module('PredDense', nn.Conv2d(
in_channels=64,
out_channels=1,
Expand Down
17 changes: 10 additions & 7 deletions cleaning/scripts/fine_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@

MODEL_LOSS = {
'UNET': {
'model': UNet(n_channels=3,n_classes=1,final_tanh=True),
'model': UNet(n_channels=3, n_classes=1, final_tanh=True),
'loss': CleaningLoss(kind='BCE', with_restore=False)
},
'SmallUNET': {
'model': SmallUnet(),
'loss': CleaningLoss(kind='BCE', with_restore=False)
},
'UNET_MSE': {
'model': UNet(n_channels=3,n_classes=1),
'model': UNet(n_channels=3, n_classes=1),
'loss': CleaningLoss(kind='MSE', with_restore=False)
},

Expand All @@ -38,23 +38,26 @@
def parse_args():
parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, required=True, help='What model to use, one of [ "UNET,"UNET_MSE,"SmallUnet"]', default='UNET')
parser.add_argument('--model', type=str, required=True,
help='What model to use, one of [ "UNET,"UNET_MSE,"SmallUnet"]', default='UNET')
parser.add_argument('--n_epochs', type=int, help='Num of epochs for training', default=10)
parser.add_argument('--datadir', type=str, help='Path to training dataset')
parser.add_argument('--valdatadir', type=str, help='Path to validation dataset')
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--name', type=str, help='Name of the experiment')
parser.add_argument('--model_path', type=str, default=None, help='Path to model checkpoint')
parser.add_argument('--tb_dir_prefix', type=str,
default='/logs/tb_logs_article/fine_tuning_', help='Path to TB dir and prefix of name')

return parser.parse_args()

args = parser.parse_args()
return args

def get_dataloaders(args):
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.ColorJitter()
])
#TODO Make sure that this should be MakeDataSynt and not MakeData from dataloader.py
# TODO Make sure that this should be MakeDataSynt and not MakeData from dataloader.py
dset_synt = MakeDataSynt(args.datadir, args.datadir, train_transform, 1)
dset_val_synt = MakeDataSynt(args.valdatadir, args.valdatadir, train_transform)

Expand Down Expand Up @@ -121,7 +124,7 @@ def load_model(model, path):


def main(args):
tb_dir = '/logs/tb_logs_article/fine_tuning_' + args.name
tb_dir = args.tb_dir_prefix + args.name
tb = SummaryWriter(tb_dir)

train_loader, val_loader = get_dataloaders(args)
Expand Down
38 changes: 14 additions & 24 deletions cleaning/scripts/fine_tuning_two_network_added_part.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,18 +100,15 @@ def validate(tb, val_loader, unet, gen, loss_func, global_step ):

# Cleaning prediction
logits_restor, logits_extract = None, unet(x_input)
#1 - Cleaning prediction
# 1 - Cleaning prediction
logits_extract = 1 - logits_extract.unsqueeze(1)

y_restor = 1. - y_restor.type(torch.FloatTensor).cuda().unsqueeze(1)

#generator prediction based on cleaning prediction
# generator prediction based on cleaning prediction
logits_restore = gen.forward(logits_extract).unsqueeze(1) # restoration + extraction



loss = loss_func(1 - (logits_extract + logits_restore), None, 1- y_restor,None )


val_loss_epoch.append(loss.cpu().data.numpy())
iou_scr_without_gan = iou_score(1 - torch.round(logits_extract.squeeze(1)).cpu().long().numpy(),1 - torch.round(y_restor.squeeze(1)).cpu().long().numpy())
Expand All @@ -125,15 +122,13 @@ def validate(tb, val_loader, unet, gen, loss_func, global_step ):
tb.add_scalar('val_iou_extract', np.mean(val_iou_extract), global_step=global_step)
tb.add_scalar('val_loss', np.mean(val_loss_epoch), global_step=global_step)
tb.add_scalar('val_iou_without_gan', np.mean(val_iou_without_gan), global_step=global_step)


out_grid = torchvision.utils.make_grid(1.- torch.clamp(logits_extract + logits_restore, 0, 1).cpu())
input_grid = torchvision.utils.make_grid(1. - logits_extract.cpu())
true_grid = torchvision.utils.make_grid(1.- y_restor.cpu())
input_clean_grid = torchvision.utils.make_grid(x_input.cpu())

tb.add_image(tag='val_first_input', img_tensor=input_clean_grid, global_step=global_step)

tb.add_image(tag='val_out_extract', img_tensor=out_grid, global_step=global_step)
tb.add_image(tag='val_input', img_tensor=input_grid, global_step=global_step)
tb.add_image(tag='val_true', img_tensor=true_grid, global_step=global_step)
Expand All @@ -156,16 +151,16 @@ def main(args):
tb = SummaryWriter(tb_dir)

train_loader, val_loader = get_dataloaders(args)

device = 'cuda' if torch.cuda.is_available() else 'cpu'
if args.model not in ["UNET"]:
raise Exception('Unsupported type of model, choose from [ "UNET"]')

gen = MODEL_LOSS[args.model]['gen']
unet = MODEL_LOSS[args.model]['unet']
unet = MODEL_LOSS[args.model]['unet']
loss_func = MODEL_LOSS[args.model]['loss']

gen = gen.cuda()
unet = unet.cuda()
gen = gen.to(device)
unet = unet.to(device)

if 'gen_path' in args and args.gen_path is not None:
gen = load_model(gen, args.gen_path)
Expand All @@ -187,44 +182,40 @@ def main(args):
for x_input, y_extract, y_restor in tqdm(train_loader):
# data reading
# unet.train()
x_input = torch.FloatTensor(x_input).cuda()
y_extract = y_extract.type(torch.FloatTensor).cuda().unsqueeze(1)
y_restor = 1. - y_restor.type(torch.FloatTensor).cuda().unsqueeze(1)
x_input = torch.FloatTensor(x_input).to(device)
y_extract = y_extract.type(torch.FloatTensor).to(device).unsqueeze(1)
y_restor = 1. - y_restor.type(torch.FloatTensor).to(device).unsqueeze(1)

unet.eval()
with torch.no_grad():
logits_restor, logits_extract = None, unet(x_input)
logits_extract = unet(x_input)
logits_extract = 1 - logits_extract.unsqueeze(1)


logits_restore = gen.forward(logits_extract).unsqueeze(1) # restoration + extraction

# if Cleaning loss use this
gen_loss = loss_func(1 - (logits_extract + logits_restore), None, 1- y_restor,None )
gen_loss = loss_func(1 - (logits_extract + logits_restore), None, 1-y_restor, None )
# else if with_restore =True use this
# input_fake = torch.cat((logits_extract + logits_restore,logits_extract),dim = 1)
#
# gen_loss = loss_func(logits_extract, input_fake, y_extract, y_restor)


gen_opt.zero_grad()
gen_loss.backward()
gen_opt.step()

gen_step += 1

if(np.random.random() <=0.5):
disc_step+=1
if np.random.random() <= 0.5:
disc_step += 1

global_step += 1

if global_step <= 1:
continue

tb.add_scalar('gen_vectran_loss',gen_loss.item(), global_step=global_step)
tb.add_scalar('gen_vectran_loss', gen_loss.item(), global_step=global_step)
# tb.add_scalar('train_loss', loss.cpu().data.numpy(), global_step=global_step)


if global_step % 100 == 0 or global_step <= 2:
out_grid = torchvision.utils.make_grid(1. - torch.clamp(logits_extract + logits_restore, 0, 1).cpu())
input_grid = torchvision.utils.make_grid(1. - logits_extract.cpu())
Expand All @@ -248,7 +239,6 @@ def main(args):
save_model(unet, os.path.join(tb_dir, 'unet_it_%s.pth' % global_step))



if __name__ == '__main__':
args = parse_args()
main(args)
Expand Down
3 changes: 2 additions & 1 deletion cleaning/scripts/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ def main(options):
skio.imsave(options.cleaned_filename, cleaned_rgb)

if options.vectorize:
vector_model = load_vector_model(options.vector_model_filename)
# TODO: implement of import load_vector_model() func
vector_model = load_vector_model(options.vector_model_filename)
if options.use_patches:
patches_rgb, patches_offsets = split_to_patches(cleaned_rgb, options.patch_size)
#patches_vector = []
Expand Down
3 changes: 1 addition & 2 deletions merging/merging_for_curves.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,12 @@

import numpy as np

sys.path.append('/code')
from util_files.job_tuples.calculate_results_for_curves import job_tuples
from util_files.simplification.join_qb import join_quad_beziers
from util_files.optimization.optimizer.logging import Logger
from util_files.rendering.cairo import PT_LINE, PT_QBEZIER
from util_files.data.graphics.graphics import Path, VectorImage

sys.path.append('/code')

def main(options, width_percentile=90, fit_tol=.5, w_tol=np.inf, join_tol=.5):
logger = Logger.prepare_logger(loglevel='info', logfile=None)
Expand Down
3 changes: 1 addition & 2 deletions merging/merging_for_lines.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import argparse
import torch


def postprocess(y_pred_render, patches_offsets, input_rgb, cleaned_image, it, options):
'''

Expand Down Expand Up @@ -38,6 +39,4 @@ def postprocess(y_pred_render, patches_offsets, input_rgb, cleaned_image, it, op
save_svg(result_tuning, cleaned_image.shape, options.image_name[it], options.output_dir + 'iou_postprocess/')
result_tuning = lines_matching(result_tuning, frac=0.07)
save_svg(result_tuning, cleaned_image.shape, options.image_name[it], options.output_dir + 'lines_matching/')


return result_tuning
Loading