diff --git a/vitookit/analyze/linear_part/eval.py b/vitookit/analyze/linear_part/eval.py
index 0e1e9a087c9632cdd0eb58f6f701a7d6b7595a6a..2a585f09cd3586eea05567318579153eb4f62aff 100644
--- a/vitookit/analyze/linear_part/eval.py
+++ b/vitookit/analyze/linear_part/eval.py
@@ -273,7 +273,7 @@ def train(model, linear_classifiers, optimizers, loader, epoch, permutes):
         torch.cuda.synchronize()
         for group, (pm, loss, optimizer) in enumerate(zip(permutes, losses, optimizers)):
             metric_logger.update(**{'loss{}'.format(group): loss.item()})
-            metric_logger.update(**{'lr{}'.format(group): optimizer.param_groups[0]["lr"]})
+            metric_logger.update(**{'lr{}'.format(group): optimizer.param_groups[-1]["lr"]})
     # gather the stats from all processes
     metric_logger.synchronize_between_processes()
     print("Averaged stats:", metric_logger)
diff --git a/vitookit/datasets/build_dataset.py b/vitookit/datasets/build_dataset.py
index 36be70c4de5bc52010b8006f9e5f34b2b58a4601..34ae0357a72fd544fe4acc6a4b7db92dc978ac54 100644
--- a/vitookit/datasets/build_dataset.py
+++ b/vitookit/datasets/build_dataset.py
@@ -33,54 +33,50 @@ def build_dataset(args, is_train, trnsfrm=None,):
     if trnsfrm is None:
         trnsfrm = build_transform(is_train, args)
     
-    def _exNone_transform(img):
-        if img is None:
-            return None
-        else:
-            return trnsfrm(img)
+    tfm = trnsfrm
     
     if 'data_path' in args.__dict__:
         args.data_location = args.data_location
         
     if args.data_set == 'Pets':
         split = 'trainval' if is_train else 'test'
-        dataset = datasets.OxfordIIITPet(args.data_location, split=split, transform=_exNone_transform,download=True)
+        dataset = datasets.OxfordIIITPet(args.data_location, split=split, transform=tfm,download=True)
         nb_classes = 37
         
     elif args.data_set == 'Folder':
-        dataset = datasets.ImageFolder(args.data_location, transform=_exNone_transform,loader=img_loader)
+        dataset = datasets.ImageFolder(args.data_location, transform=tfm,loader=img_loader)
         nb_classes = len(dataset.classes)
     elif args.data_set in ['IN1K','IN100']:
         split = 'train' if is_train else 'validation'
-        dataset = datasets.ImageFolder(os.path.join(args.data_location,split), transform=_exNone_transform)
+        dataset = datasets.ImageFolder(os.path.join(args.data_location,split), transform=tfm)
         nb_classes = len(dataset.classes)
     elif args.data_set == 'CIFAR10':
-        dataset = datasets.CIFAR10(args.data_location,is_train,transform=_exNone_transform,download=True)
+        dataset = datasets.CIFAR10(args.data_location,is_train,transform=tfm,download=True)
         nb_classes = 10
         
     elif args.data_set == 'CIFAR100':
-        dataset = datasets.CIFAR100(args.data_location,is_train,transform=_exNone_transform,download=True)
+        dataset = datasets.CIFAR100(args.data_location,is_train,transform=tfm,download=True)
         nb_classes = 100
         
     elif args.data_set == 'Cars':
-        dataset = datasets.StanfordCars(args.data_location,'train' if is_train else 'test',transform=_exNone_transform,download=True)
+        dataset = datasets.StanfordCars(args.data_location,'train' if is_train else 'test',transform=tfm,download=True)
         nb_classes = 196
     
     elif args.data_set == 'Flowers':
-        dataset = datasets.Flowers102(args.data_location,'train' if is_train else 'test',transform=_exNone_transform,download=True)
+        dataset = datasets.Flowers102(args.data_location,'train' if is_train else 'test',transform=tfm,download=True)
         nb_classes = 102
     
     elif args.data_set == 'Aircraft':
-        dataset = datasets.FGVCAircraft(args.data_location,'trainval' if is_train else 'test',transform=_exNone_transform,download=True)
+        dataset = datasets.FGVCAircraft(args.data_location,'trainval' if is_train else 'test',transform=tfm,download=True)
         nb_classes = len(dataset.classes)
 
     elif args.data_set == 'STL':
         split = 'train' if is_train else 'test'
-        dataset = datasets.STL10(args.data_location,split,transform=_exNone_transform,download=True)
+        dataset = datasets.STL10(args.data_location,split,transform=tfm,download=True)
         nb_classes = 10
     elif args.data_set == 'ominiglot':
         trnsfrm.transforms.insert(-2,transforms.Grayscale(num_output_channels=3))
-        dataset = datasets.Omniglot(args.data_location,transform=_exNone_transform,download=True)
+        dataset = datasets.Omniglot(args.data_location,transform=tfm,download=True)
         nb_classes = 1623
             
     else:
diff --git a/vitookit/datasets/ffcv_transform.py b/vitookit/datasets/ffcv_transform.py
index c5144c1645bfb5a0af6e145ec56c0b8daf3e8bea..9541909c1b496a289742e4ced1d74b208f44d782 100644
--- a/vitookit/datasets/ffcv_transform.py
+++ b/vitookit/datasets/ffcv_transform.py
@@ -558,13 +558,13 @@ from torchvision.transforms import InterpolationMode
 
 @gin.configurable
 class RandDownSampling(nn.Module):
-    def __init__(self, r=(3/4,1)) -> None:
+    def __init__(self, r=(0.25,0.75)) -> None:
         super().__init__()
         self.r = r
     def forward(self,x):
         h, w = x.shape[-2:]
         r = random.uniform(*self.r)
-        if r == 1:
+        if r >= 0.99:
             return x
         nh,hw = int(h*r),int(w*r)
         down = F.resize(x,(nh,hw),interpolation=InterpolationMode.BICUBIC)
diff --git a/vitookit/evaluation/eval_cls.py b/vitookit/evaluation/eval_cls.py
index 607c6ad64e2e5a6ec13e26583a6d133122752b5b..089f80a9f8479c0a26352a4bdcf02dcd559694c3 100644
--- a/vitookit/evaluation/eval_cls.py
+++ b/vitookit/evaluation/eval_cls.py
@@ -222,7 +222,7 @@ def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
         if wandb.run: 
             wandb.log({'train/loss':loss})
         metric_logger.update(loss=loss_value)
-        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
+        metric_logger.update(lr=optimizer.param_groups[-1]["lr"])
     # gather the stats from all processes
     metric_logger.synchronize_between_processes()
     print("Averaged stats:", metric_logger)
@@ -238,7 +238,7 @@ def evaluate(data_loader, model, device):
 
     # switch to evaluation mode
     model.eval()
-
+    preds = []
     for images, target in metric_logger.log_every(data_loader, 10, header):
         images = images.to(device, non_blocking=True)
         target = target.to(device, non_blocking=True, dtype=torch.long)
@@ -247,6 +247,7 @@ def evaluate(data_loader, model, device):
         with torch.cuda.amp.autocast():
             output = model(images)
             loss = criterion(output, target)
+        preds.append(output.argmax(1).cpu())
 
         acc1, acc5 = accuracy(output, target, topk=(1, 5))
 
@@ -258,8 +259,8 @@ def evaluate(data_loader, model, device):
     metric_logger.synchronize_between_processes()
     print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
           .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
-
-    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
+    preds = torch.cat(preds)
+    return {k: meter.global_avg for k, meter in metric_logger.meters.items()},preds
 
 
 def main(args):
@@ -391,11 +392,14 @@ def main(args):
         args.start_epoch = run_variables["epoch"] + 1
 
     if args.eval:
-        test_stats = evaluate(data_loader_val, model_without_ddp, device)
+        assert args.world_size == 1
+        test_stats, preds = evaluate(data_loader_val, model_without_ddp, device)
         print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
         if args.output_dir and misc.is_main_process():
             with (output_dir / "log.txt").open("a") as f:
                 f.write(json.dumps(test_stats) + "\n")
+            with (output_dir / "preds.txt").open("w") as f:
+                f.write(json.dumps(preds.tolist()) + "\n")
         exit(0)
 
     print(f"Start training for {args.epochs} epochs from {args.start_epoch}")
diff --git a/vitookit/evaluation/eval_cls_ffcv.py b/vitookit/evaluation/eval_cls_ffcv.py
index 1c148b019b5879f71a0df49d54b3f1d654e183c2..ad8d388c7904b78fb42dd8e561c0294584da5947 100644
--- a/vitookit/evaluation/eval_cls_ffcv.py
+++ b/vitookit/evaluation/eval_cls_ffcv.py
@@ -23,6 +23,7 @@ import sys
 import copy
 import scipy.io as scio
 from vitookit.datasets.ffcv_transform import *
+from vitookit.evaluation.eval_cls import evaluate
 from vitookit.utils.helper import *
 from vitookit.utils import misc
 from vitookit.models.build_model import build_model
@@ -43,6 +44,7 @@ from timm.layers import trunc_normal_
 from ffcv import Loader
 from ffcv.loader import OrderOption
 
+
 def get_args_parser():
     parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
     parser.add_argument('--batch_size', default=128, type=int,
@@ -214,46 +216,13 @@ def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
         if wandb.run: 
             wandb.log({'train/loss':loss})
         metric_logger.update(loss=loss_value)
-        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
+        metric_logger.update(lr=optimizer.param_groups[-1]["lr"])
     # gather the stats from all processes
     metric_logger.synchronize_between_processes()
     print("Averaged stats:", metric_logger)
     return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
 
 
-@torch.no_grad()
-def evaluate(data_loader, model, device):
-    criterion = torch.nn.CrossEntropyLoss()
-
-    metric_logger = misc.MetricLogger(delimiter="  ")
-    header = 'Test:'
-
-    # switch to evaluation mode
-    model.eval()
-
-    for images, target in metric_logger.log_every(data_loader, 10, header):
-        images = images.to(device, non_blocking=True)
-        target = target.to(device, non_blocking=True, dtype=torch.long)
-
-        # compute output
-        with torch.cuda.amp.autocast():
-            output = model(images)
-            loss = criterion(output, target)
-
-        acc1, acc5 = accuracy(output, target, topk=(1, 5))
-
-        batch_size = images.shape[0]
-        metric_logger.update(loss=loss.item())
-        metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
-        metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
-    # gather the stats from all processes
-    metric_logger.synchronize_between_processes()
-    print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
-          .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
-
-    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
-
-
 def main(args):
     misc.init_distributed_mode(args)
 
@@ -347,11 +316,14 @@ def main(args):
         args.start_epoch = run_variables["epoch"] + 1
 
     if args.eval:
-        test_stats = evaluate(data_loader_val, model_without_ddp, device)
+        assert args.world_size == 1
+        test_stats, preds = evaluate(data_loader_val, model_without_ddp, device)
         print(f"Accuracy of the network on the test images: {test_stats['acc1']:.1f}%")
         if args.output_dir and misc.is_main_process():
             with (output_dir / "log.txt").open("a") as f:
                 f.write(json.dumps(test_stats) + "\n")
+            with (output_dir / "preds.txt").open("w") as f:
+                f.write(json.dumps(preds.tolist()) + "\n")
         exit(0)
 
     print(f"Start training for {args.epochs} epochs from {args.start_epoch}")
diff --git a/vitookit/evaluation/eval_cls1_ffcv.py b/vitookit/evaluation/eval_clsattn_ffcv.py
similarity index 99%
rename from vitookit/evaluation/eval_cls1_ffcv.py
rename to vitookit/evaluation/eval_clsattn_ffcv.py
index ddb2268a2d04701234e072875efd7b061a28f7d3..7f430b3177342b47dd950a1c86bbef8caa8ecf1a 100644
--- a/vitookit/evaluation/eval_cls1_ffcv.py
+++ b/vitookit/evaluation/eval_clsattn_ffcv.py
@@ -197,7 +197,7 @@ def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
         if wandb.run: 
             wandb.log({'train/loss':loss})
         metric_logger.update(loss=loss_value)
-        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
+        metric_logger.update(lr=optimizer.param_groups[-1]["lr"])
     # gather the stats from all processes
     metric_logger.synchronize_between_processes()
     print("Averaged stats:", metric_logger)
@@ -338,6 +338,7 @@ def main(args):
         args.start_epoch = run_variables["epoch"] + 1
 
     if args.eval:
+        assert args.world_size == 1
         test_stats = evaluate(data_loader_val, model_without_ddp, device)
         print(f"Accuracy of the network on the test images: {test_stats['acc1']:.1f}%")
         if args.output_dir and misc.is_main_process():
diff --git a/vitookit/evaluation/eval_distill.py b/vitookit/evaluation/eval_distill.py
index b4f163caf4567d9bc0981ee3ea7b5710eb2fc6b5..c51a76c11111a1b05e9412cff606bfa612d0470b 100644
--- a/vitookit/evaluation/eval_distill.py
+++ b/vitookit/evaluation/eval_distill.py
@@ -362,7 +362,7 @@ def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
         if wandb.run: 
             wandb.log({'train/loss':loss})
         metric_logger.update(loss=loss_value)
-        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
+        metric_logger.update(lr=optimizer.param_groups[-1]["lr"])
     # gather the stats from all processes
     metric_logger.synchronize_between_processes()
     print("Averaged stats:", metric_logger)
@@ -572,6 +572,7 @@ def main(args):
         args.start_epoch = run_variables["epoch"] + 1
 
     if args.eval:
+        assert args.world_size == 1
         test_stats = evaluate(data_loader_val, model_without_ddp, device)
         print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
         if args.output_dir and misc.is_main_process():
diff --git a/vitookit/evaluation/eval_linear.py b/vitookit/evaluation/eval_linear.py
index aeac274b79e0354d5abe18357364804a6db6f861..047a25d3a98c39a30ce642d7e962f0618ed4cb1d 100644
--- a/vitookit/evaluation/eval_linear.py
+++ b/vitookit/evaluation/eval_linear.py
@@ -258,6 +258,7 @@ def main(args):
         print("Resuming from epoch %d" % args.start_epoch)
 
     if args.eval:
+        assert args.world_size == 1
         test_stats = evaluate(data_loader_val, model, device)
         print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
         exit(0)
diff --git a/vitookit/evaluation/eval_linear_ffcv.py b/vitookit/evaluation/eval_linear_ffcv.py
index bbdb9f8e41afb6a674d807bdb2bfdf6f8a7419cf..57931516da40bec5670cdf394466980bc4263eaa 100644
--- a/vitookit/evaluation/eval_linear_ffcv.py
+++ b/vitookit/evaluation/eval_linear_ffcv.py
@@ -21,6 +21,7 @@ from pathlib import Path
 
 
 import torch.backends.cudnn as cudnn
+import torch
 import wandb
 from vitookit.models.build_model import build_model
 
@@ -212,16 +213,23 @@ def main(args):
         args.start_epoch = run_variables["epoch"] + 1
         print("resume from epoch %d" % args.start_epoch)
         
+    output_dir = Path(args.output_dir) if args.output_dir else None
+    
     if args.eval:
-        test_stats = evaluate(data_loader_val, model, device)
+        assert args.world_size == 1
+        test_stats, preds = evaluate(data_loader_val, model, device)
         print(f"Accuracy of the network on the test images: {test_stats['acc1']:.1f}%")
+        if args.output_dir and misc.is_main_process():
+            with (output_dir / "log.txt").open("a") as f:
+                f.write(json.dumps(test_stats) + "\n")
+            with (output_dir / "preds.txt").open("w") as f:
+                f.write(json.dumps(preds.tolist()) + "\n")
         exit(0)
 
     print(f"Start training for {args.epochs} epochs")
     start_time = time.time()
     max_accuracy = 0.0
     
-    output_dir = Path(args.output_dir) if args.output_dir else None
     
     
     order = OrderOption.RANDOM if args.distributed else OrderOption.QUASI_RANDOM
diff --git a/vitookit/evaluation/eval_linear_multi.py b/vitookit/evaluation/eval_linear_multi.py
index b6ec51ab644dbf65cf1b31817724be59b705eccf..fc2fc66b7cdf48c47afd8e56a9922b0cbafa3570 100644
--- a/vitookit/evaluation/eval_linear_multi.py
+++ b/vitookit/evaluation/eval_linear_multi.py
@@ -16,6 +16,7 @@ import copy
 import itertools
 import torch
 import torch.backends.cudnn as cudnn
+from vitookit.models.build_model import build_model
 import vitookit.utils
 import vitookit.models
 
@@ -24,9 +25,11 @@ from torch import nn
 from torchvision import transforms as pth_transforms
 from torchvision.datasets import ImageFolder
 
+from vitookit.utils import misc
+
 def eval_linear(args):
     misc.init_distributed_mode(args)
-    print("git:\n  {}\n".format(utils.get_sha()))
+    print("git:\n  {}\n".format(misc.get_sha()))
     print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
     cudnn.benchmark = True
 
@@ -79,19 +82,7 @@ def eval_linear(args):
     print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
 
     # ============ building network ... ============
-    if 'swin' in args.arch:
-        args.patch_size = 4
-        model = models.__dict__[args.arch](
-            window_size=args.window_size,
-            patch_size=args.patch_size,
-            num_classes=0)
-        embed_dim = model.num_features
-    else:
-        model = models.__dict__[args.arch](
-            patch_size=args.patch_size, 
-            num_classes=0,
-            use_mean_pooling=args.avgpool_patchtokens==1)
-        embed_dim = model.embed_dim
+    model = build_model(num_classes=args.nb_classes,drop_path_rate=args.drop_path,)
     model.cuda()
     print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
     # load weights to evaluate
@@ -261,7 +252,7 @@ def train(model, linear_classifiers, optimizers, loader, epoch, n, avgpool, perm
         torch.cuda.synchronize()
         for group, (loss, optimizer) in enumerate(zip(losses, optimizers)):
             metric_logger.update(**{'loss{}'.format(group): loss.item()})
-            metric_logger.update(**{'lr{}'.format(group): optimizer.param_groups[0]["lr"]})
+            metric_logger.update(**{'lr{}'.format(group): optimizer.param_groups[-1]["lr"]})
     # gather the stats from all processes
     metric_logger.synchronize_between_processes()
     print("Averaged stats:", metric_logger)
diff --git a/vitookit/evaluation/object_detection/test.py b/vitookit/evaluation/object_detection/test.py
index 548240e0ff831ded584662dc69496c053b57d434..60f315fbe0fedd37c286023728f36b8ef9d079f8 100644
--- a/vitookit/evaluation/object_detection/test.py
+++ b/vitookit/evaluation/object_detection/test.py
@@ -214,6 +214,7 @@ def main():
         if args.format_only:
             dataset.format_results(outputs, **kwargs)
         if args.eval:
+        assert args.world_size == 1
             eval_kwargs = cfg.get('evaluation', {}).copy()
             # hard-code way to remove EvalHook args
             for key in [
diff --git a/vitookit/evaluation/semantic_segmentation/test.py b/vitookit/evaluation/semantic_segmentation/test.py
index af95aff64b67e7bdaa780ff0e572b9bb12be945c..9e7fb5f718b0e5984c767ea1429a54f945cb8529 100644
--- a/vitookit/evaluation/semantic_segmentation/test.py
+++ b/vitookit/evaluation/semantic_segmentation/test.py
@@ -152,6 +152,7 @@ def main():
         if args.format_only:
             dataset.format_results(outputs, **kwargs)
         if args.eval:
+        assert args.world_size == 1
             dataset.evaluate(outputs, args.eval, **kwargs)