- loss.py:Function: logsumexp(x):
- loss.py:Function: flatten_check(out, targ):
- loss.py:Function: exp_rmspe(pred, targ):
- loss.py:Function: mean_squared_error(pred, targ):
- loss.py:Function: psnr(input, targs):
- loss.py:Function: explained_variance(pred, targ):
- loss.py:Function: r2_score(pred, targ):
- loss.py:Function: auc_roc_score(input, targ):
- loss.py:Function: roc_curve(input, targ):
- loss.py:Function: dice(input, targs, iou=False, eps=1e-8):
- loss.py:Function: WasserteinLoss(real, fake):
- loss.py:Function: fbeta(y_pred, y_true, thresh=0.2, beta=2, eps=1e-9, sigmoid=True):
- layers.py:Function: init_cnn(m):
- layers.py:Function: init_Function:ault(m: nn.Module, func=nn.init.kaiming_normal_) -> nn.Module:
- layers.py:Function: avgpoolflatten():
- layers.py:Class : flatten(nn.Module):
- layers.py:Class : GeneralRelu(nn.Module):
- layers.py:Class : AdaptiveConcatPool2d(nn.Module):
- tests.py:Function: test_eq(a, b):
- tests.py:Function: test_near(a, b):
- main.py:Function: find_modules(m, cond):
- main.py:Function: is_lin_layer(l):
- main.py:Function: clear_memory():
- main.py:Function: seed_everything(seed=42):
- main.py:Function: freeze_to(model, to=None):
- main.py:Function: unfreeze_to(model, to=None):
- main.py:Function: count_parameters(model, show_table=False):
- main.py:Function: total_layer_state(model):
- main.py:Function: open_image(fpath, size, convert_to="", to_tensor=False, perm=()):
- main.py:Function: pil_from_tensor(x):
- main.py:Function: pil_to_tensor(x):
- main.py:Function: visualize_model(model, inp_size=[1, 3, 64, 64], device="cuda:0"):
- general.py:Function: compose(x, funcs, *args, order_key="_order", **kwargs):
- general.py:Function: timeit(method):
- general.py:Function: parallel(func, arr: Collection, max_workers: int = None, leave=False):
- general.py:Function: download_and_check(url, fpath=".", name=""):
- general.py:Function: get_last_log(name, fpath="logs"):