From 3a76b2f09a537fff3b78b4ee2e67c94557eb09dc Mon Sep 17 00:00:00 2001 From: Jon Almazan <jon.almazan@gmail.com> Date: Tue, 13 Aug 2019 10:41:55 +0100 Subject: [PATCH] clean unused functions --- dirtorch/utils/common.py | 50 +++++----------------------------------- 1 file changed, 6 insertions(+), 44 deletions(-) diff --git a/dirtorch/utils/common.py b/dirtorch/utils/common.py index cf03917..a430349 100644 --- a/dirtorch/utils/common.py +++ b/dirtorch/utils/common.py @@ -189,12 +189,15 @@ def freeze_batch_norm(model, freeze=True, only_running=False): for m in model.modules(): if isinstance(m, nn.BatchNorm2d): - m.eval() # Eval mode freezes the running mean and std + # Eval mode freezes the running mean and std + m.eval() for param in m.named_parameters(): if only_running: - param[1].requires_grad = True # Weight and bias can be updated + # Weight and bias can be updated + param[1].requires_grad = True else: - param[1].requires_grad = False # Freeze the weight and bias + # Freeze the weight and bias + param[1].requires_grad = False def variables(inputs, iscuda, not_on_gpu=[]): @@ -213,47 +216,6 @@ def variables(inputs, iscuda, not_on_gpu=[]): return inputs_var -def learn_pca(X, n_components=None, whiten=False, use_sklearn=True): - ''' Learn Principal Component Analysis - - input: - X: input matrix with size samples x features - n_components: number of components to keep - whiten: applies feature whitening - - output: - PCA: weights and means of the PCA learned - ''' - if use_sklearn: - pca = sklearn.decomposition.PCA(n_components=n_components, svd_solver='full', whiten=whiten) - pca.fit(X) - else: - fudge = 1E-8 - means = np.mean(X, axis=0) - X = X - means - - # get the covariance matrix - Xcov = np.dot(X.T, X) - - # eigenvalue decomposition of the covariance matrix - d, V = np.linalg.eigh(Xcov) - d[d < 0] = fudge - - # a fudge factor can be used so that eigenvectors associated with - # small eigenvalues do not get overamplified. - D = np.diag(1. / np.sqrt(d+fudge)) - - # whitening matrix - W = np.dot(np.dot(V, D), V.T) - - # multiply by the whitening matrix - X_white = np.dot(X, W) - - pca = {'W': W, 'means': means} - - return pca - - def transform(pca, X, whitenp=0.5, whitenv=None, whitenm=1.0, use_sklearn=True): if use_sklearn: # https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/base.py#L99 -- GitLab