Basic functions and classes usful to reproduce research papers.

Debugging

These are function that are useful for debugging.

explode_types[source]

explode_types(o)

Return the type of o, potentially in nested dictionaries for thing that are listy

def explode_types(o):
    '''Like fastcore explode_types, but only shows __name__ of type.'''
    if not is_listy(o): return type(o).__name__
    return {type(o).__name__: [explode_types(o_) for o_ in o]}

explode_lens[source]

explode_lens(o)

def explode_lens(o):
    if is_listy(o):
        if all(is_listy(o_) for o_ in o):
            return [explode_lens(o_) for o_ in o]
        else: return len(o)
test_eq(explode_lens([[1,4], [[5,6,7], [1]]]), [2, [3, 1]])

explode_shapes[source]

explode_shapes(o)

def explode_shapes(o):
    if not is_listy(o): return tuple(bind(getattr, arg0, 'shape')(o))
    return [explode_shapes(o_) for o_ in o]
test_eq(explode_shapes([tensor([1,4]), [tensor([[4,5],[7,8]]), tensor([6])]]), [(2,), [(2,2), (1,)]])

explode_ranges[source]

explode_ranges(o)

def explode_ranges(o):
    if not is_listy(o): return (float(o.min()), float(o.max()))
    return [explode_ranges(o_) for o_ in o]
explode_ranges([tensor([1,4]), [tensor([[4,5],[7,8]]), tensor([6])]])
[(1.0, 4.0), [(4.0, 8.0), (6.0, 6.0)]]

pexpt[source]

pexpt(o)

def pexpt(o): print(explode_types(o))

pexpl[source]

pexpl(o)

def pexpl(o): print(explode_lens(o))

pexps[source]

pexps(o)

def pexps(o): print(explode_shapes(o))

get_cudas[source]

get_cudas()

Returns the number of tensors in cuda device.

receptive_fields[source]

receptive_fields(model, nf, imsize, bs=64)

returns the size of the receptive field for each feature output.

N Images Classes

These are classes to handle many images to many images generation

class ImageNTuple[source]

ImageNTuple(x=None, *rest) :: fastuple

A tuple with elementwise ops and more friendly init behavior

path = untar_data(URLs.PETS)
files = get_image_files(path/"images")

ImageNTuple will only show tha images if all of them are of same size, and if all of them are tensors.

imt2 = ImageNTuple.create((files[0], files[1]))
explode_types(imt2)
{'ImageNTuple': ['PILImage', 'PILImage']}
imt2 = ImageNTuple.create((files[0], files[1]))
imt2 = Resize(224)(imt2)
imt2 = ToTensor()(imt2)
imt2.show();
imt3 = ImageNTuple.create((files[0], files[1], files[2]))
imt3 = Resize(224)(imt3)
imt3 = ToTensor()(imt3)
ax = imt3.show()
test_eq(len(imt2), 2)
test_eq(len(imt3), 3)

ImageTupleBlock[source]

ImageTupleBlock()

Like fastai tutoria siemese transform, but uses ImageNTuple.

GAN Models

class ConditionalGenerator[source]

ConditionalGenerator(gen) :: Module

Wraper around a GAN generator that returns the generated image and the inp.

Test that the ConditionalGenerator can generate images from one image

gen_base = basic_critic(32, 3)
gen_base = nn.Sequential(*list(gen_base.children())[:-2])
unet = DynamicUnet(gen_base, 3, (32, 32))
gen = ConditionalGenerator(unet)
out = gen(torch.rand(1, 3, 32, 32))
test_eq(explode_shapes(out), [(1, 3, 32, 32), (1, 3, 32, 32)])
test_eq(type(out), ImageNTuple)

Test that the ConditionalGenerator can generate an image from two images

gen_base = basic_critic(32, 6)
gen_base = nn.Sequential(*list(gen_base.children())[:-2])
unet = DynamicUnet(gen_base, 3, (32, 32))
gen = ConditionalGenerator(unet)
dl = DataLoader(dataset=([imt2]), bs=1, after_item=IntToFloatTensor())
b = first(dl)
out = gen(b)
test_eq(type(out), ImageNTuple)
test_eq(len(out), 3)

class SiameseCritic[source]

SiameseCritic(critic) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

critic = gan_critic(n_channels=6, nf=64)
scritic = SiameseCritic(critic)

GAN Metrics

These are Metrics that work with GANLearner

class GenMetric[source]

GenMetric(func) :: AvgMetric

Average the values of func taking into account potential different batch sizes

class CriticMetric[source]

CriticMetric(func) :: AvgMetric

Average the values of func taking into account potential different batch sizes

Export GANLearner

To export a GANLearner we need to set the learner into gen_mode and before recreating the optimization function we should call set_freeze_model to unfreeze all the parameters in the model so the old opt state can be loaded properly.

GANLearner.export[source]

GANLearner.export(fname='export.pkl', pickle_protocol=2)

Export the content of self without the items and the optimizer state for inference

class ProgressImage[source]

ProgressImage(out_widget, save_img=False, folder='pred_imgs', conditional=False, ax=None, figsize=None, title=None, ctx=None, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, filternorm=True, filterrad=4.0, resample=None, url=None, data=None) :: Callback

Shows a sample of the generator after every epoch. It is a good idea to keep a human in the loop.

Show Results for TupleNImage

Tuple in Ys Learner Fix -

Learner.show_results and Learner.predict though an error if the ys are tuples. Let's fix that

GANLearner.show_results[source]

GANLearner.show_results(ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs)

Show some predictions on ds_idx-th dataset or dl

# @patch
# def predict(self:GANLearner, item, rm_type_tfms=None, with_input=False):
#     dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0)
#     inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
#     i = getattr(self.dls, 'n_inp', -1)
#     inp = (inp,) if i==1 else tuplify(inp)
#     n_out = len(self.dls.tls) - i
#     dec_preds = (dec_preds,) if n_out==1 else tuplify(dec_preds)
#     dec = self.dls.decode_batch(inp + dec_preds)[0]
#     dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
#     res = dec_targ,dec_preds[0],preds[0]
#     if with_input: res = (dec_inp,) + res
#     return res
# def tuplify(o:ImageNTuple, *kwargs): return (o,)

Datasets

download_file_from_google_drive[source]

download_file_from_google_drive(file_id, destination, folder_name=None)

save_response_content[source]

save_response_content(response, destination)

Original Inception's weights for FID by mseitzer

download_coco[source]

download_coco(force_download=False)

Renormalization stats

from -1,1 to imagenet_stats

renorm_stats = (2*torch.tensor(imagenet_stats[0])-1).tolist(), (2*torch.tensor(imagenet_stats[1])).tolist()
x = TensorImage(torch.rand(1, 3, 32, 32))
xn = Normalize(0.5, 0.5)(x)
xin = Normalize.from_stats(*imagenet_stats, cuda=False)(x)
xn2in = Normalize.from_stats(*renorm_stats, cuda=False)(xn)
test(x, xn, nequals)
test(x, xin, nequals)
test(xin, xn, nequals)
test(xin, xn2in, all_equal)

DataLoaders

Path.is_relative_to[source]

Path.is_relative_to(*other)

Return True if the path is relative to another path or False.

get_tuple_files_by_stem[source]

get_tuple_files_by_stem(paths, folders=None, recurse=True)

ParentsSplitter[source]

ParentsSplitter(train_name='train', valid_name='valid')

Split items from the grand parent folder names (train_name and valid_name).

class FilterRelToPath[source]

FilterRelToPath(path)

class CGANDataLoaders[source]

CGANDataLoaders(*loaders, path='.', device=None) :: DataLoaders

Basic wrapper around several DataLoaders with factory methods for CGAN problems

path_base = untar_data(URLs.FACADES_BASE)
item_tfms=Resize(286, ResizeMethod.Squish, resamples=(Image.NEAREST, Image.NEAREST)),
batch_tfms=[Normalize.from_stats(0.5*torch.ones(3), 0.5*torch.ones(3)), 
            *aug_transforms(size=256, mult=0.0, max_lighting=0, p_lighting=0, mode='nearest')]

dls = CGANDataLoaders.from_path_ext(path_base.parent, ['base', 'extended'], item_tfms=item_tfms, batch_tfms=batch_tfms, bs=1)
dls.show_batch()
item_tfms=Resize(286),
dls = CGANDataLoaders.from_paths('coco_under_water', 'coco', train='test2017', valid='val2017',
                                 item_tfms=item_tfms, bs=16, num_workers=16, n_inp=-1)
dls.show_batch(max_n=2)
# def basic_nested_repr(flds=None):
#     if isinstance(flds, str): flds = re.split(', *', flds)
#     flds = list(flds or [])
#     def _f(self):
#         sig = ', '.join(f'{o}={maybe_attr(nested_attr(self,o), "__name__")}' for o in flds)
#         return f'{self.__class__.__name__}({sig})'
#     return _f

basic_name[source]

basic_name(flds=None)

class GatherLogs[source]

GatherLogs(experiments='logs', save_after_fit=True) :: Callback

Gather logs from one or more experiments.

learn = synth_learner()
gl = GatherLogs()
with learn.added_cbs(gl):
    #gl.set_experiment_name('test1')
    learn.fit(2)
epoch train_loss valid_loss time
0 11.660902 11.212616 00:00
1 9.580758 7.239999 00:00
gl.df
epoch time experiment experiment_count stage loss
0 0 0.0 RegModel_BaseLoss 0 train 11.660902
1 0 0.0 RegModel_BaseLoss 0 valid 11.212616
2 1 0.0 RegModel_BaseLoss 0 train 9.580758
3 1 0.0 RegModel_BaseLoss 0 valid 7.239999
with learn.added_cbs(gl):
    gl.set_experiment_name('test2')
    learn.fit(2)
epoch train_loss valid_loss time
0 4.968184 4.440056 00:00
1 3.889592 2.660069 00:00
gl.df
epoch time experiment experiment_count stage loss
0 0 0.0 RegModel_BaseLoss 0 train 11.660902
1 0 0.0 RegModel_BaseLoss 0 valid 11.212616
2 1 0.0 RegModel_BaseLoss 0 train 9.580758
3 1 0.0 RegModel_BaseLoss 0 valid 7.239999
4 0 0.0 test2 1 train 4.968184
5 0 0.0 test2 1 valid 4.440056
6 1 0.0 test2 1 train 3.889592
7 1 0.0 test2 1 valid 2.660069
gl.plot_metric();
gl.plot_time();

RunNBatches Callback

class RunNBatches[source]

RunNBatches(n=2, no_valid=True) :: Callback

Basic class handling tweaks of the training loop by changing a Learner in various events

learn = synth_learner()
with learn.added_cbs(RunNBatches()):
    learn.fit(5)
epoch train_loss valid_loss time
0 4.231726 00:00
with learn.added_cbs(RunNBatches(n=1, no_valid=False)):
    learn.fit(5)
epoch train_loss valid_loss time
0 12.414106 12.805893 00:00