From 3a8e018e14c30739537e4e79a240e635ea945495 Mon Sep 17 00:00:00 2001
From: Sanchit Verma <23355869+sanchitv7@users.noreply.github.com>
Date: Sun, 1 Sep 2024 02:29:51 +0100
Subject: [PATCH] committing a final-eque state

---
 discarded_src/balance_dataset.py              | 101 ++++
 discarded_src/sample_code.py                  | 260 ++++++++++
 discarded_src/sample_code_try_2.py            | 231 +++++++++
 discarded_src/test_run.py                     | 212 ++++++++
 ...ut.tfevents.1724612332.26610f3d33fa.1894.0 | Bin 0 -> 4110 bytes
 ...ut.tfevents.1724612413.26610f3d33fa.1894.1 | Bin 0 -> 88 bytes
 ...ut.tfevents.1724612452.26610f3d33fa.1894.2 | Bin 0 -> 88 bytes
 ...ut.tfevents.1724612515.26610f3d33fa.1894.3 | Bin 0 -> 88 bytes
 ...ut.tfevents.1724612582.26610f3d33fa.5666.0 | Bin 0 -> 4317 bytes
 ...ut.tfevents.1724612683.26610f3d33fa.5666.1 | Bin 0 -> 4110 bytes
 ...ut.tfevents.1724612737.26610f3d33fa.5666.2 | Bin 0 -> 30486 bytes
 .../run-2024-08-25--19-05-27/run_info.txt     | 138 +++++
 ...out.tfevents.1724630048.0826fd70f652.869.0 | Bin 0 -> 4112 bytes
 ...out.tfevents.1724630112.0826fd70f652.869.1 | Bin 0 -> 4111 bytes
 ...out.tfevents.1724630132.0826fd70f652.869.2 | Bin 0 -> 4111 bytes
 ...out.tfevents.1724630555.0826fd70f652.869.3 | Bin 0 -> 88 bytes
 ...ut.tfevents.1724630643.0826fd70f652.6853.0 | Bin 0 -> 65847 bytes
 ...ut.tfevents.1724612332.26610f3d33fa.1894.0 | Bin 0 -> 4110 bytes
 ...ut.tfevents.1724612413.26610f3d33fa.1894.1 | Bin 0 -> 88 bytes
 ...ut.tfevents.1724612452.26610f3d33fa.1894.2 | Bin 0 -> 88 bytes
 ...ut.tfevents.1724612515.26610f3d33fa.1894.3 | Bin 0 -> 88 bytes
 ...ut.tfevents.1724612582.26610f3d33fa.5666.0 | Bin 0 -> 4317 bytes
 ...ut.tfevents.1724612683.26610f3d33fa.5666.1 | Bin 0 -> 4110 bytes
 ...ut.tfevents.1724612737.26610f3d33fa.5666.2 | Bin 0 -> 30486 bytes
 .../run-2024-08-25--19-05-27/run_info.txt     | 138 +++++
 notebooks/data_prep.ipynb                     | 473 ++++++++++++++++++
 notebooks/subset_for_patching.ipynb           | 293 +++++++++++
 requirements.txt                              |  12 +
 src/__init__.py                               |   0
 src/data_prep_utils/__init__.py               |   0
 src/data_prep_utils/data_setup.py             |  31 ++
 src/data_prep_utils/preprocess.py             |  92 ++++
 src/data_prep_utils/resize_bvi_artefact.py    | 108 ++++
 src/data_prep_utils/split_dataset.py          |  92 ++++
 src/data_prep_utils/subset_and_process.py     | 274 ++++++++++
 src/data_prep_utils/subset_data.py            | 158 ++++++
 .../subset_processed_dataset.py               | 113 +++++
 src/data_prep_utils/subset_random.py          |  82 +++
 src/plots.py                                  | 123 +++++
 39 files changed, 2931 insertions(+)
 create mode 100644 discarded_src/balance_dataset.py
 create mode 100644 discarded_src/sample_code.py
 create mode 100644 discarded_src/sample_code_try_2.py
 create mode 100644 discarded_src/test_run.py
 create mode 100644 logs/first_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612332.26610f3d33fa.1894.0
 create mode 100644 logs/first_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612413.26610f3d33fa.1894.1
 create mode 100644 logs/first_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612452.26610f3d33fa.1894.2
 create mode 100644 logs/first_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612515.26610f3d33fa.1894.3
 create mode 100644 logs/first_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612582.26610f3d33fa.5666.0
 create mode 100644 logs/first_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612683.26610f3d33fa.5666.1
 create mode 100644 logs/first_run_logs/run-2024-08-25--19-05-27/events.out.tfevents.1724612737.26610f3d33fa.5666.2
 create mode 100644 logs/first_run_logs/run-2024-08-25--19-05-27/run_info.txt
 create mode 100644 logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630048.0826fd70f652.869.0
 create mode 100644 logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630112.0826fd70f652.869.1
 create mode 100644 logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630132.0826fd70f652.869.2
 create mode 100644 logs/logs_successful_run_2/run-2024-08-26--00-02-24/events.out.tfevents.1724630555.0826fd70f652.869.3
 create mode 100644 logs/logs_successful_run_2/run-2024-08-26--00-03-58/events.out.tfevents.1724630643.0826fd70f652.6853.0
 create mode 100644 logs/second_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612332.26610f3d33fa.1894.0
 create mode 100644 logs/second_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612413.26610f3d33fa.1894.1
 create mode 100644 logs/second_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612452.26610f3d33fa.1894.2
 create mode 100644 logs/second_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612515.26610f3d33fa.1894.3
 create mode 100644 logs/second_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612582.26610f3d33fa.5666.0
 create mode 100644 logs/second_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612683.26610f3d33fa.5666.1
 create mode 100644 logs/second_run_logs/run-2024-08-25--19-05-27/events.out.tfevents.1724612737.26610f3d33fa.5666.2
 create mode 100644 logs/second_run_logs/run-2024-08-25--19-05-27/run_info.txt
 create mode 100644 notebooks/data_prep.ipynb
 create mode 100644 notebooks/subset_for_patching.ipynb
 create mode 100644 requirements.txt
 create mode 100644 src/__init__.py
 create mode 100644 src/data_prep_utils/__init__.py
 create mode 100644 src/data_prep_utils/data_setup.py
 create mode 100644 src/data_prep_utils/preprocess.py
 create mode 100644 src/data_prep_utils/resize_bvi_artefact.py
 create mode 100644 src/data_prep_utils/split_dataset.py
 create mode 100644 src/data_prep_utils/subset_and_process.py
 create mode 100644 src/data_prep_utils/subset_data.py
 create mode 100644 src/data_prep_utils/subset_processed_dataset.py
 create mode 100644 src/data_prep_utils/subset_random.py
 create mode 100644 src/plots.py

diff --git a/discarded_src/balance_dataset.py b/discarded_src/balance_dataset.py
new file mode 100644
index 0000000..cb2bc02
--- /dev/null
+++ b/discarded_src/balance_dataset.py
@@ -0,0 +1,101 @@
+import argparse
+import json
+import os
+import random
+import shutil
+
+from tqdm import tqdm
+
+
+def load_labels(labels_path):
+    with open(labels_path, 'r') as f:
+        return json.load(f)
+
+
+def get_video_paths(input_dir):
+    video_paths = {}
+    for part in ['part1', 'part2']:
+        part_dir = os.path.join(input_dir, part)
+        for video in os.listdir(part_dir):
+            video_paths[video] = os.path.join(part_dir, video)
+    return video_paths
+
+
+def get_maximum_balanced_subset(labels, video_paths):
+    artefacts = set()
+    for video_labels in labels.values():
+        artefacts.update(video_labels.keys())
+
+    balanced_subset = {}
+
+    for artefact in artefacts:
+        positive_videos = [video for video, video_labels in labels.items()
+                           if video in video_paths and video_labels.get(artefact, 0) == 1]
+        negative_videos = [video for video, video_labels in labels.items()
+                           if video in video_paths and video_labels.get(artefact, 0) == 0]
+
+        count_per_label = min(len(positive_videos), len(negative_videos))
+
+        selected_positive = set(random.sample(positive_videos, count_per_label))
+        selected_negative = set(random.sample(negative_videos, count_per_label))
+
+        for video in selected_positive.union(selected_negative):
+            if video not in balanced_subset:
+                balanced_subset[video] = labels[video]
+            balanced_subset[video][artefact] = 1 if video in selected_positive else 0
+
+    return balanced_subset
+
+
+def copy_videos(videos, video_paths, dst_dir):
+    os.makedirs(dst_dir, exist_ok=True)
+    for video in tqdm(videos, desc=f"Copying to {os.path.basename(dst_dir)}"):
+        src_path = video_paths[video]
+        dst_path = os.path.join(dst_dir, video)
+        shutil.copy2(src_path, dst_path)
+
+
+def create_subset_labels(balanced_subset):
+    return balanced_subset
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Create a maximum balanced subset of videos for all artefacts and relocate them.")
+    parser.add_argument("--input_dir", type=str, required=True, help="Path to processed_BVIArtefact folder")
+    parser.add_argument("--output_dir", type=str, required=True, help="Path to output directory")
+    args = parser.parse_args()
+
+    labels_path = os.path.join(args.input_dir, 'processed_labels.json')
+    labels = load_labels(labels_path)
+
+    video_paths = get_video_paths(args.input_dir)
+
+    balanced_subset = get_maximum_balanced_subset(labels, video_paths)
+
+    copy_videos(balanced_subset.keys(), video_paths, args.output_dir)
+
+    # Create and save the subset labels.json
+    subset_labels = create_subset_labels(balanced_subset)
+    labels_json_path = os.path.join(args.output_dir, 'labels.json')
+    with open(labels_json_path, 'w') as f:
+        json.dump(subset_labels, f, indent=4)
+
+    print(f"Maximum balanced subset created in {args.output_dir}")
+    print(f"Total videos in subset: {len(balanced_subset)}")
+    print(f"Labels.json created at {labels_json_path}")
+
+    artefacts = set()
+    for video_labels in balanced_subset.values():
+        artefacts.update(video_labels.keys())
+
+    for artefact in sorted(artefacts):
+        presence_count = sum(1 for labels in balanced_subset.values() if labels.get(artefact, 0) == 1)
+        absence_count = sum(1 for labels in balanced_subset.values() if labels.get(artefact, 0) == 0)
+        print(f"{artefact}:")
+        print(f"  Presence count: {presence_count}")
+        print(f"  Absence count: {absence_count}")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/discarded_src/sample_code.py b/discarded_src/sample_code.py
new file mode 100644
index 0000000..975732c
--- /dev/null
+++ b/discarded_src/sample_code.py
@@ -0,0 +1,260 @@
+# dataset structure:
+'''
+data/graininess_100_balanced_subset_split
+├── test
+│   ├── BirdsInCage_1920x1080_30fps_8bit_420_Pristine_QP32_FBT_1.avi
+│   ├── Chimera1_4096x2160_60fps_10bit_420_graininess_QP32_FB_1.avi
+│   ├── Chimera3_4096x2160_24fps_10bit_420_graininess_QP32_FT_1.avi
+│   ├── ...
+│   └── labels.json
+├── train
+│   ├── labels.json
+│   ├── lamppost_1920x1080_120fps_8bit_420_Pristine_QP32_BT_3.avi
+│   ├── lamppost_1920x1080_120fps_8bit_420_Pristine_QP47_SF_3.avi
+│   ├── leaveswall_1920x1080_120fps_8bit_420_Motion_QP32_SB_1.avi
+│   ├── leaveswall_1920x1080_120fps_8bit_420_Motion_QP32_SFB_4.avi
+│   ├── library_1920x1080_120fps_8bit_420_aliasing_QP47_FT_1.avi
+│   ├── ...
+└── val
+    ├── Chimera2_4096x2160_60fps_10bit_420_Dark_QP32_BT_1.avi
+    ├── ...
+    ├── labels.json
+    ├── shields_1280x720_50fps_8bit_420_graininess_QP47_SFB_1.avi
+    ├── station_1920x1080_30fps_8bit_420_graininess_QP32_SB_1.avi
+    ├── svtmidnightsun_3840x2160_50fps_10bit_420_banding_QP47_SBT_3.avi
+    ├── svtmidnightsun_3840x2160_50fps_10bit_420_banding_QP47_SFT_1.avi
+    ├── svtsmokesauna_3840x2160_50fps_10bit_420_banding_QP32_F_4.avi
+    ├── svtwaterflyover_3840x2160_50fps_10bit_420_banding_QP32_T_3.avi
+    └── typing_1920x1080_120fps_8bit_420_aliasing_QP47_BT_4.avi
+
+4 directories, 103 files
+'''
+
+'''
+labels.json in each split is like:
+{
+  "Chimera1_4096x2160_60fps_10bit_420_graininess_QP47_FT_1.avi": {
+    "graininess": 1
+  },
+  "riverbed_1920x1080_25fps_8bit_420_banding_QP47_SBT_1.avi": {
+    "graininess": 0
+  },
+  "Meridian1_3840x2160_60fps_10bit_420_banding_QP47_SFT_1.avi": {
+    "graininess": 0
+  },
+  '''
+
+
+# Import necessary libraries
+import os
+import json
+import torch
+import numpy as np
+from transformers import VivitImageProcessor, VivitForVideoClassification, TrainingArguments, Trainer
+from datasets import Dataset, DatasetDict
+from torchvision.io import read_video
+from sklearn.metrics import accuracy_score, precision_recall_fscore_support
+from multiprocessing import Pool
+import functools
+
+
+def load_video(video_path):
+    # Read the video file
+    video, _, info = read_video(video_path, pts_unit='sec')
+
+    # Set the number of frames we want to sample
+    num_frames_to_sample = 32
+
+    # Get the total number of frames in the video
+    total_frames = video.shape[0]
+
+    # Calculate the sampling rate to evenly distribute frames
+    sampling_rate = max(total_frames // num_frames_to_sample, 1)
+
+    # Sample frames at the calculated rate
+    sampled_frames = video[::sampling_rate][:num_frames_to_sample]
+
+    # If we don't have enough frames, pad with zeros
+    if sampled_frames.shape[0] < num_frames_to_sample:
+        padding = torch.zeros(
+            (num_frames_to_sample - sampled_frames.shape[0], *sampled_frames.shape[1:]), dtype=sampled_frames.dtype)
+        sampled_frames = torch.cat([sampled_frames, padding], dim=0)
+
+    # Ensure we have exactly the number of frames we want
+    sampled_frames = sampled_frames[:num_frames_to_sample]
+
+    # Convert to numpy array and change to channel-first format (C, H, W)
+    return sampled_frames.permute(0, 3, 1, 2).numpy()
+
+
+def create_dataset(data_dir, split):
+    # Construct the path to the video directory and labels file
+    video_dir = os.path.join(data_dir, split)
+    json_path = os.path.join(video_dir, 'labels.json')
+
+    # Load the labels from the JSON file
+    with open(json_path, 'r') as f:
+        labels = json.load(f)
+
+    # Get all video files in the directory
+    video_files = [f for f in os.listdir(video_dir) if f.endswith('.avi')]
+
+    # Create a dataset with video paths and their corresponding labels
+    dataset = Dataset.from_dict({
+        'video_path': [os.path.join(video_dir, f) for f in video_files],
+        'label': [labels[f]['graininess'] for f in video_files]
+    })
+
+    return dataset
+
+
+# Load the ViViT image processor
+image_processor = VivitImageProcessor.from_pretrained(
+    "google/vivit-b-16x2-kinetics400")
+
+
+def preprocess_video(example, image_processor):
+    # Load the video
+    video = load_video(example['video_path'])
+
+    # Process the video frames using the ViViT image processor
+    inputs = image_processor(list(video), return_tensors="np")
+
+    # Add the processed inputs to the example dictionary
+    for k, v in inputs.items():
+        example[k] = v.squeeze()  # Remove batch dimension
+
+    return example
+
+
+def preprocess_dataset(dataset, num_proc=4):
+    # Use multiprocessing to preprocess the dataset in parallel
+    return dataset.map(
+        functools.partial(preprocess_video, image_processor=image_processor),
+        remove_columns=['video_path'],
+        num_proc=num_proc
+    )
+
+
+# Define the path to the dataset
+data_dir = 'graininess_100_balanced_subset_split'
+
+# Load the datasets for each split
+dataset = DatasetDict({
+    'train': create_dataset(data_dir, 'train'),
+    'validation': create_dataset(data_dir, 'val'),
+    'test': create_dataset(data_dir, 'test')
+})
+
+# Define the path where the preprocessed dataset will be saved
+preprocessed_path = './preprocessed_dataset'
+
+# Check if preprocessed dataset already exists
+if os.path.exists(preprocessed_path):
+    print("Loading preprocessed dataset...")
+    # Load the preprocessed dataset from disk
+    preprocessed_dataset = DatasetDict.load_from_disk(preprocessed_path)
+else:
+    print("Preprocessing dataset...")
+    # Preprocess each split of the dataset
+    preprocessed_dataset = DatasetDict({
+        split: preprocess_dataset(dataset[split])
+        for split in dataset.keys()
+    })
+    # Save the preprocessed dataset to disk
+    preprocessed_dataset.save_to_disk(preprocessed_path)
+    print("Preprocessed dataset saved to disk.")
+
+# Load the ViViT model
+model = VivitForVideoClassification.from_pretrained(
+    "google/vivit-b-16x2-kinetics400")
+
+# Modify the model for binary classification
+model.classifier = torch.nn.Linear(model.config.hidden_size, 2)
+model.num_labels = 2
+
+# Set up training arguments
+training_args = TrainingArguments(
+    output_dir="./results",  # Directory to save the model checkpoints
+    num_train_epochs=3,  # Number of training epochs
+    per_device_train_batch_size=2,  # Batch size for training
+    per_device_eval_batch_size=2,  # Batch size for evaluation
+    warmup_steps=500,  # Number of warmup steps for learning rate scheduler
+    weight_decay=0.01,  # Strength of weight decay
+    logging_dir='./logs',  # Directory for storing logs
+    logging_steps=10,  # Log every X updates steps
+    evaluation_strategy="steps",  # Evaluate during training
+    eval_steps=100,  # Evaluate every X steps
+    save_steps=1000,  # Save checkpoint every X steps
+    # Load the best model when finished training (default metric is loss)
+    load_best_model_at_end=True,
+)
+
+# Define function to compute evaluation metrics
+
+
+def compute_metrics(eval_pred):
+    # Get the predictions and true labels
+    predictions = np.argmax(eval_pred.predictions, axis=1)
+    labels = eval_pred.label_ids
+
+    # Compute precision, recall, and F1 score
+    precision, recall, f1, _ = precision_recall_fscore_support(
+        labels, predictions, average='binary')
+
+    # Compute accuracy
+    accuracy = accuracy_score(labels, predictions)
+
+    # Return all metrics
+    return {
+        'accuracy': accuracy,
+        'f1': f1,
+        'precision': precision,
+        'recall': recall
+    }
+
+
+# Initialize the Trainer
+trainer = Trainer(
+    model=model,  # The instantiated model to be trained
+    args=training_args,  # Training arguments, defined above
+    train_dataset=preprocessed_dataset['train'],  # Training dataset
+    eval_dataset=preprocessed_dataset['validation'],  # Evaluation dataset
+    compute_metrics=compute_metrics,  # The function that computes metrics
+)
+
+# Train the model
+trainer.train()
+
+# Evaluate the model on the test set
+evaluation_results = trainer.evaluate(preprocessed_dataset['test'])
+print(evaluation_results)
+
+# Save the final model
+trainer.save_model("./vivit_binary_classifier")
+
+# Function to predict on new videos
+
+
+def predict_video(video_path):
+    # Load and preprocess the video
+    video = load_video(video_path)
+    inputs = image_processor(list(video), return_tensors="pt")
+
+    # Make prediction
+    with torch.no_grad():
+        outputs = model(**inputs)
+
+    # Get probabilities and predicted class
+    probabilities = torch.softmax(outputs.logits, dim=1)
+    predicted_class = torch.argmax(probabilities, dim=1).item()
+
+    return predicted_class, probabilities[0][predicted_class].item()
+
+
+
+
+# Example usage of prediction function
+# video_path = "path/to/your/video.avi"
+# predicted_class, confidence = predict_video(video_path)
+# print(f"Predicted class: {predicted_class}, Confidence: {confidence:.2f}")
diff --git a/discarded_src/sample_code_try_2.py b/discarded_src/sample_code_try_2.py
new file mode 100644
index 0000000..2923397
--- /dev/null
+++ b/discarded_src/sample_code_try_2.py
@@ -0,0 +1,231 @@
+'''
+# dataset structure:
+data/graininess_100_balanced_subset_split
+├── test
+│   ├── BirdsInCage_1920x1080_30fps_8bit_420_Pristine_QP32_FBT_1.avi
+│   ├── Chimera1_4096x2160_60fps_10bit_420_graininess_QP32_FB_1.avi
+│   ├── Chimera3_4096x2160_24fps_10bit_420_graininess_QP32_FT_1.avi
+│   ├── ...
+│   └── labels.json
+├── train
+│   ├── labels.json
+│   ├── lamppost_1920x1080_120fps_8bit_420_Pristine_QP32_BT_3.avi
+│   ├── lamppost_1920x1080_120fps_8bit_420_Pristine_QP47_SF_3.avi
+│   ├── leaveswall_1920x1080_120fps_8bit_420_Motion_QP32_SB_1.avi
+│   ├── leaveswall_1920x1080_120fps_8bit_420_Motion_QP32_SFB_4.avi
+│   ├── library_1920x1080_120fps_8bit_420_aliasing_QP47_FT_1.avi
+│   ├── ...
+└── val
+    ├── Chimera2_4096x2160_60fps_10bit_420_Dark_QP32_BT_1.avi
+    ├── ...
+    ├── labels.json
+    ├── shields_1280x720_50fps_8bit_420_graininess_QP47_SFB_1.avi
+    ├── station_1920x1080_30fps_8bit_420_graininess_QP32_SB_1.avi
+    ├── svtmidnightsun_3840x2160_50fps_10bit_420_banding_QP47_SBT_3.avi
+    ├── svtmidnightsun_3840x2160_50fps_10bit_420_banding_QP47_SFT_1.avi
+    ├── svtsmokesauna_3840x2160_50fps_10bit_420_banding_QP32_F_4.avi
+    ├── svtwaterflyover_3840x2160_50fps_10bit_420_banding_QP32_T_3.avi
+    └── typing_1920x1080_120fps_8bit_420_aliasing_QP47_BT_4.avi
+
+4 directories, 103 files
+'''
+
+'''
+labels.json in each split is like:
+{
+  "Chimera1_4096x2160_60fps_10bit_420_graininess_QP47_FT_1.avi": {
+    "graininess": 1
+  },
+  "riverbed_1920x1080_25fps_8bit_420_banding_QP47_SBT_1.avi": {
+    "graininess": 0
+  },
+  "Meridian1_3840x2160_60fps_10bit_420_banding_QP47_SFT_1.avi": {
+    "graininess": 0
+  },
+  '''
+
+import os
+import json
+import torch
+import numpy as np
+from transformers import VivitImageProcessor, VivitForVideoClassification, TrainingArguments, Trainer
+from datasets import Dataset, DatasetDict
+from torchvision.io import read_video
+import torchvision.transforms as T
+from sklearn.metrics import accuracy_score, precision_recall_fscore_support
+import albumentations as A
+from albumentations.pytorch import ToTensorV2
+import cv2
+from functools import partial
+
+
+def get_augmentation():
+    return A.Compose([
+        A.HorizontalFlip(p=0.5),
+        A.VerticalFlip(p=0.5),
+        A.RandomRotate90(p=0.5),
+        A.Transpose(p=0.5),
+        A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.5),
+        ToTensorV2(),
+    ])
+
+
+def apply_augmentation(frames, augmentation):
+    aug_frames = []
+    for frame in frames:
+        augmented = augmentation(image=frame)
+        aug_frames.append(augmented['image'])
+    return torch.stack(aug_frames)
+
+
+def uniform_frame_sample(video, num_frames):
+    total_frames = len(video)
+    if total_frames <= num_frames:
+        return video
+
+    indices = np.linspace(0, total_frames - 1, num_frames, dtype=int)
+    return video[indices]
+
+
+def load_video(video_path, num_frames=32, augmentation=None):
+    video, _, info = read_video(video_path, pts_unit='sec')
+
+    # Uniform sampling
+    sampled_frames = uniform_frame_sample(video, num_frames)
+
+    if augmentation:
+        sampled_frames = apply_augmentation(sampled_frames, augmentation)
+
+    return sampled_frames.permute(0, 3, 1, 2).float() / 255.0
+
+
+def create_dataset(data_dir, split):
+    video_dir = os.path.join(data_dir, split)
+    json_path = os.path.join(video_dir, 'labels.json')
+    with open(json_path, 'r') as f:
+        labels = json.load(f)
+
+    video_files = [f for f in os.listdir(video_dir) if f.endswith('.avi')]
+
+    dataset = Dataset.from_dict({
+        'video_path': [os.path.join(video_dir, f) for f in video_files],
+        'label': [labels[f]['graininess'] for f in video_files]
+    })
+
+    return dataset
+
+
+# Load the image processor
+image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
+
+
+def preprocess_video(example, image_processor, augmentation=None):
+    video = load_video(example['video_path'], augmentation=augmentation)
+    inputs = image_processor(list(video), return_tensors="pt")
+    for k, v in inputs.items():
+        example[k] = v.squeeze()
+    return example
+
+
+def preprocess_dataset(dataset, augmentation=None):
+    return dataset.map(
+        partial(preprocess_video, image_processor=image_processor, augmentation=augmentation),
+        remove_columns=['video_path'],
+        num_proc=4
+    )
+
+
+# Load and preprocess the datasets
+data_dir = 'graininess_100_balanced_subset_split'
+dataset = DatasetDict({
+    'train': create_dataset(data_dir, 'train'),
+    'validation': create_dataset(data_dir, 'val'),
+    'test': create_dataset(data_dir, 'test')
+})
+
+augmentation = get_augmentation()
+
+preprocessed_path = './preprocessed_dataset_augmented'
+if os.path.exists(preprocessed_path):
+    print("Loading preprocessed dataset...")
+    preprocessed_dataset = DatasetDict.load_from_disk(preprocessed_path)
+else:
+    print("Preprocessing dataset with augmentation...")
+    preprocessed_dataset = DatasetDict({
+        'train': preprocess_dataset(dataset['train'], augmentation),
+        'validation': preprocess_dataset(dataset['validation']),
+        'test': preprocess_dataset(dataset['test'])
+    })
+    preprocessed_dataset.save_to_disk(preprocessed_path)
+    print("Preprocessed dataset saved to disk.")
+
+# Load the model
+model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")
+model.classifier = torch.nn.Linear(model.config.hidden_size, 2)
+model.num_labels = 2
+
+# Set up training arguments
+training_args = TrainingArguments(
+    output_dir="./results",
+    num_train_epochs=5,
+    per_device_train_batch_size=4,
+    per_device_eval_batch_size=4,
+    warmup_steps=500,
+    weight_decay=0.01,
+    logging_dir='./logs',
+    logging_steps=10,
+    evaluation_strategy="steps",
+    eval_steps=100,
+    save_steps=1000,
+    load_best_model_at_end=True,
+    fp16=True,  # Enable mixed precision training
+    gradient_accumulation_steps=2,  # Accumulate gradients over 2 steps
+)
+
+
+def compute_metrics(eval_pred):
+    predictions = np.argmax(eval_pred.predictions, axis=1)
+    labels = eval_pred.label_ids
+    precision, recall, f1, _ = precision_recall_fscore_support(labels, predictions, average='binary')
+    accuracy = accuracy_score(labels, predictions)
+    return {
+        'accuracy': accuracy,
+        'f1': f1,
+        'precision': precision,
+        'recall': recall
+    }
+
+
+# Initialize Trainer
+trainer = Trainer(
+    model=model,
+    args=training_args,
+    train_dataset=preprocessed_dataset['train'],
+    eval_dataset=preprocessed_dataset['validation'],
+    compute_metrics=compute_metrics,
+)
+
+# Train the model
+trainer.train()
+
+# Evaluate the model
+evaluation_results = trainer.evaluate(preprocessed_dataset['test'])
+print(evaluation_results)
+
+# Save the model
+trainer.save_model("./vivit_binary_classifier_augmented")
+
+
+def predict_video(video_path):
+    video = load_video(video_path)
+    inputs = image_processor(list(video), return_tensors="pt")
+    with torch.no_grad():
+        outputs = model(**inputs)
+    probabilities = torch.softmax(outputs.logits, dim=1)
+    predicted_class = torch.argmax(probabilities, dim=1).item()
+    return predicted_class, probabilities[0][predicted_class].item()
+
+# Example usage of prediction function
+# video_path = "path/to/your/video.avi"
+# predicted_class, confidence = predict_video(video_path)
+# print(f"Predicted class: {predicted_class}, Confidence: {confidence:.2f}")
diff --git a/discarded_src/test_run.py b/discarded_src/test_run.py
new file mode 100644
index 0000000..b2df458
--- /dev/null
+++ b/discarded_src/test_run.py
@@ -0,0 +1,212 @@
+import os
+import json
+import torch
+import torch.nn as nn
+import torch.optim as optim
+from torch.utils.data import Dataset, DataLoader
+from torchvision import transforms, models
+from torchvision.io import read_video
+from torchvision.models import ResNet50_Weights
+
+# Set device
+if torch.cuda.is_available():
+    device = torch.device("cuda")
+elif torch.backends.mps.is_available():
+    device = torch.device("mps")
+else:
+    device = torch.device("cpu")
+
+print(f"Using device: {device}")
+
+# Define paths
+data_path = "data/graininess_100_balanced_subset_split"
+train_path = os.path.join(data_path, "train")
+val_path = os.path.join(data_path, "val")
+test_path = os.path.join(data_path, "test")
+
+# Define artifact (can be extended for multi-task later)
+artifact = "graininess"
+
+
+# Helper function to load labels
+def load_labels(split_path):
+    with open(os.path.join(split_path, "labels.json"), "r") as f:
+        return json.load(f)
+
+
+# Custom dataset class
+class VideoDataset(Dataset):
+    def __init__(self, root_dir, labels, artifact):
+        self.root_dir = root_dir
+        self.labels = labels
+        self.artifact = artifact
+        self.video_files = [f for f in os.listdir(root_dir) if f.endswith('.avi')]
+        self.transform = transforms.Compose([
+            transforms.ConvertImageDtype(torch.float32),
+            transforms.Normalize(mean=[0.45, 0.45, 0.45], std=[0.225, 0.225, 0.225])
+        ])
+
+    def __len__(self):
+        return len(self.video_files)
+
+    def __getitem__(self, idx):
+        video_name = self.video_files[idx]
+        video_path = os.path.join(self.root_dir, video_name)
+        label = self.labels[video_name][self.artifact]
+
+        # Load video using torchvision
+        video, _, _ = read_video(video_path, pts_unit='sec')
+
+        # Subsample frames (adjust as needed)
+        video = video[::video.shape[0] // 16][:16]
+
+        # Apply normalization
+        video = self.transform(video)
+
+        # Rearrange dimensions to [C, T, H, W]
+        video = video.permute(3, 0, 1, 2)
+
+        return video, torch.tensor(label, dtype=torch.float32)
+
+
+# Create datasets
+train_labels = load_labels(train_path)
+val_labels = load_labels(val_path)
+test_labels = load_labels(test_path)
+
+train_dataset = VideoDataset(train_path, train_labels, artifact)
+val_dataset = VideoDataset(val_path, val_labels, artifact)
+test_dataset = VideoDataset(test_path, test_labels, artifact)
+
+# Create data loaders
+batch_size = 8
+train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
+val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
+test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
+
+
+# Define model
+class VideoClassifier(nn.Module):
+    def __init__(self, num_classes=1):
+        super(VideoClassifier, self).__init__()
+        self.resnet = models.resnet50(weights=ResNet50_Weights.DEFAULT)
+        self.resnet.conv1 = nn.Conv2d(16, 64, kernel_size=7, stride=2, padding=3, bias=False)
+        self.fc = nn.Linear(2048, num_classes)
+
+    def forward(self, x):
+        b, c, t, h, w = x.shape
+        x = x.transpose(1, 2).reshape(b * t, c, h, w)
+        x = self.resnet.conv1(x)
+        x = self.resnet.bn1(x)
+        x = self.resnet.relu(x)
+        x = self.resnet.maxpool(x)
+        x = self.resnet.layer1(x)
+        x = self.resnet.layer2(x)
+        x = self.resnet.layer3(x)
+        x = self.resnet.layer4(x)
+        x = self.resnet.avgpool(x)
+        x = x.reshape(b, t, -1).mean(1)
+        x = self.fc(x)
+        return torch.sigmoid(x)
+
+
+model = VideoClassifier().to(device)
+
+# Define loss function and optimizer
+criterion = nn.BCELoss()
+optimizer = optim.Adam(model.parameters(), lr=0.001)
+
+
+# Training function
+def train(model, train_loader, criterion, optimizer, device):
+    model.train()
+    running_loss = 0.0
+    correct = 0
+    total = 0
+
+    for videos, labels in train_loader:
+        videos, labels = videos.to(device), labels.to(device)
+
+        optimizer.zero_grad()
+        outputs = model(videos)
+        loss = criterion(outputs.squeeze(), labels)
+        loss.backward()
+        optimizer.step()
+
+        running_loss += loss.item()
+        predicted = (outputs.squeeze() > 0.5).float()
+        total += labels.size(0)
+        correct += (predicted == labels).sum().item()
+
+    epoch_loss = running_loss / len(train_loader)
+    epoch_acc = correct / total
+    return epoch_loss, epoch_acc
+
+
+# Validation function
+def validate(model, val_loader, criterion, device):
+    model.eval()
+    running_loss = 0.0
+    correct = 0
+    total = 0
+
+    with torch.no_grad():
+        for videos, labels in val_loader:
+            videos, labels = videos.to(device), labels.to(device)
+
+            outputs = model(videos)
+            loss = criterion(outputs.squeeze(), labels)
+
+            running_loss += loss.item()
+            predicted = (outputs.squeeze() > 0.5).float()
+            total += labels.size(0)
+            correct += (predicted == labels).sum().item()
+
+    epoch_loss = running_loss / len(val_loader)
+    epoch_acc = correct / total
+    return epoch_loss, epoch_acc
+
+
+# Training loop
+num_epochs = 10
+for epoch in range(num_epochs):
+    train_loss, train_acc = train(model, train_loader, criterion, optimizer, device)
+    val_loss, val_acc = validate(model, val_loader, criterion, device)
+
+    print(f"Epoch {epoch + 1}/{num_epochs}")
+    print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}")
+    print(f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")
+    print()
+
+
+# Test function
+def test(model, test_loader, criterion, device):
+    model.eval()
+    running_loss = 0.0
+    correct = 0
+    total = 0
+
+    with torch.no_grad():
+        for videos, labels in test_loader:
+            videos, labels = videos.to(device), labels.to(device)
+
+            outputs = model(videos)
+            loss = criterion(outputs.squeeze(), labels)
+
+            running_loss += loss.item()
+            predicted = (outputs.squeeze() > 0.5).float()
+            total += labels.size(0)
+            correct += (predicted == labels).sum().item()
+
+    test_loss = running_loss / len(test_loader)
+    test_acc = correct / total
+    return test_loss, test_acc
+
+
+# Evaluate on test set
+test_loss, test_acc = test(model, test_loader, criterion, device)
+print(f"Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}")
+
+# Save the model
+torch.save(model.state_dict(), f"video_classifier_{artifact}.pth")
+print(f"Model saved as video_classifier_{artifact}.pth")
diff --git a/logs/first_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612332.26610f3d33fa.1894.0 b/logs/first_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612332.26610f3d33fa.1894.0
new file mode 100644
index 0000000000000000000000000000000000000000..cd6b3acc3dd59de5924f5c744c6dfed965de0bc9
GIT binary patch
literal 4110
zcmai1&x;*J5LOAAEQpwk;2~M&D0wgYHoI{XL^Kds6pX037^LYp(=)HLJ>6q>&+fj2
z`~kt6Ac7#ddGR9N#J|IXC;bBikKV+ux@UT3HZO4xyKGf;b^ZA2tDeIb;?KWdAAWz~
z{^K`({p07~-+%MfrR7r3ZyyWoZ|;6F-@4+3cE&CZx5eDGb<OR1eqyB;b}q<@l~M^7
zC4c|;-jg3*i+=z7=E+wVe)}c*ee1i+Ti;&ZdY#+KUG?JBv(DA6e=ePW<>I-EFP(q!
z)?b$%Zf$R8ru9whSs`t9b32<~wZgT^yQ{X<v)!HDz1hyeY<GV)TO7>x-=6JV&#r`1
z$HImn*yMc4l@r0BFw9eKa5@o+W3C|cwoP`86@|?G#FEUIg9k6FH3jCpXk5IzvzJCQ
zWt?M1D>50bX@q5kIF`AHF|Z}~`HDIDK)`NsJp?#13YR_0rfS+vUUaKe@<TRHJ4q&$
z<%Oi)@jP$qR&h_U$Bb@n?*=#F_(pwl6HE%BIAyyNPAS2ymbzk8C`hk~+0K5@oCsO1
z&{mO0^ZCvq$a%qQwiKQ($jSW1RJa?3H%4NVT`EI;T)_E3u&?<kqokNNwkFLyhNIiM
ztCeU>zH(4q>r`~4&-h@ki{3JquSC%*G|aCX${<rxBdOSFH8wapAvnY2;iH%#w{_c4
z=F&jAGmi<vip2e0-A$%q3iCtI%Np*SJQm#*y@pxQD4BDzfDfwbV1afC4hU&eP>>0|
z%2XAVKTQt=|2I1QnP;H8SC`T{%q?J#3kPiX>OQ~Slwb%%bIqhKi6uZ8<4_$x7I3$5
zqB<Q-7@D~gT?U~y9)qZ4EonX@RHHR7MdT_vlL``aXR0*>-)YsF)hHx2W6*(jys?7i
zT&X3`j}R*`?O16|&47~ps4-GcSwb6@w*|Fq@Y6c@XxgV_b>lixn5di(1q6FBwSs%j
zI#HUt`_hMa!-!7~(_OL=rxW3_TwI?B$|eX>jp%=78b2WN&LTo>#Z}2bZ@F~D5L5=C
zDZGrg-b5R6&q}b*)8Pcin^nHNMzfuAQU-O#?C2v%W50`{8QqvHu^kxAb&lpxt0#(D
zcq?-%?6R$(9NN%J?tKx4l?tm2FNm3~X_(?}+R8v~J*fA)h=xNH!HKa)!cq*8g+@M6
zGr$o8TJjty8i$E#_ZW#HbZHhj0=tcAUI)H;{+=tO<4Y`D|7Af$H{@qUZ7@)+1_lZg
zN42${OoHM|p~5Ybs!<WZzlbS}0OOv;7AJVbnzGW^ksa>N$_^!!6_^amq$46fGv=}a
zCkqyVd?csjQM!TzT~FvgR!fuu#cjCdHANarA9T6VrKGmfLjg~P&9+dSs>uvS2@5`s
z33jP$V@+5fnCgTBToRc*90XHoha%L-6e|zEbLE&fc>8clBH<8AQ(%E`^c{VeM(jZ8
zKCb@&-f<vkZ4=%FVe?P0jWFr4H9FXL2O1iBqKJSC$Lj`{Bc73krrE`W9nlV%X21C<
zkX9QdmC8^E5ONdwDP<dB<!WR6iZ)LIe82xdERvv_(WXB^!EC;Y%n>xKTcXJ!%#x2(
zo~*J4OLVygN9oG1##v-h)L}l&QD+#Y9jnw>kGpa1Uc}DmU7JDDu>t^CVmA9R3LwN<
z2}7PD48+ue3O$ws5W9resnTMoa+>pHTb5@}4<yn6>ow-q(gUjRS2m^*oRuIUJk<uW
z8H(aCPn`q&3CuTkM8Hs@;`y35oSwAtiE5@p*tBj%I9|1s$l#qG-nH1U?^3Qf(Xoc4
zr7)3?H#@Lv*Nm2Jw^ne@8>|u>%HBD=dy9Q`=fm3{y*p?dE04h`^ai4y4MEYBT%GW>
zV-)v9m#{o+=QDr@jJlWa#Dq=o9uOmSuG#|IRO4d=OCPg5RCov{K7q0Ijn2WG!{nxJ
zh8?(Zanjkge|o29P%7f6EgOqG)*D-KEguAD<3@~ahJt^5_tQgEoSiLv-9*;y0iVTw
z(5jD9G5I7JiV!w@L8m~-(oOeLnD=6h?M97E*;x)i<feOK)AEgL83PEq5cr6L_2Qs=
z`UNK_a^2u#bJFvPX5({#_@+M^3ONqrIh>oF<?0lBkBv}K;1H|82_S%GV5mxOMawLp
zC_>&5S5Ln}wJ3e71s?Mf6U$YzqV1Kgi!|xMlOP-{X?&ebHhAnxK;knYiUok@s~uL`
TqYut+y}WgfzQrGZ{!{iJaFsSy

literal 0
HcmV?d00001

diff --git a/logs/first_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612413.26610f3d33fa.1894.1 b/logs/first_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612413.26610f3d33fa.1894.1
new file mode 100644
index 0000000000000000000000000000000000000000..1dcb305968855178b24dd6f7a555aee19ce2cfc5
GIT binary patch
literal 88
zcmeZZfPjCKJmzw~+3$1y{-&FbQoKn;iJ5tNu4SotC00g3dR#gssd>fuMM?RIMJam4
hrMbC@MU{HxMVTe3MS7_qRq<(=IjQjw5$P!#odAX1AyfbW

literal 0
HcmV?d00001

diff --git a/logs/first_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612452.26610f3d33fa.1894.2 b/logs/first_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612452.26610f3d33fa.1894.2
new file mode 100644
index 0000000000000000000000000000000000000000..3619b50087b5c9d5154ef9d48164fbb6b557b856
GIT binary patch
literal 88
zcmeZZfPjCKJmzx#+^T%@{-&FbQoKn;iJ5tNu4SotC00g3dR#gssd>fuMM?RIMJam4
hrMbC@MU{HxMVTe3MS7_qRq<(=IjQjw5&ur38vuSTA$R})

literal 0
HcmV?d00001

diff --git a/logs/first_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612515.26610f3d33fa.1894.3 b/logs/first_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612515.26610f3d33fa.1894.3
new file mode 100644
index 0000000000000000000000000000000000000000..bb878f89e4e3b7341f99e0c1014a936814064682
GIT binary patch
literal 88
zcmeZZfPjCKJmzxx@_v1Bf74AzDc+=_#LPTB*Rs^S5-X!1JuaP+)V$*SqNM!9q7=R2
h(%js{qDsB;qRf)iBE3|Qs`#|boYZ)T$Ug?vZU9^kAkhE-

literal 0
HcmV?d00001

diff --git a/logs/first_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612582.26610f3d33fa.5666.0 b/logs/first_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612582.26610f3d33fa.5666.0
new file mode 100644
index 0000000000000000000000000000000000000000..7588ca876fce4b56f15ca5f21534edb4b4f6fcc6
GIT binary patch
literal 4317
zcmai2&#NWH6&^)&-j$#;Iw1^3UoK?f;JtJ2n>Ul0AyFI<9tlKAbR|;O?XK=ydDT^&
zs_OUVI=BdA6S7Jm1Og%8A|NCnLdYWd2P6wOZbB9cfe4~;5s0`@^PQ^h>h3pp%q-sU
z&N+4J{P@mys^7t>`0w*y9{hIaFZ_*9-+lA%PoMwF%5tga&m0NuukZa}K6%0m?TlR+
zZu7Zos*2mq{Mbq_>|Br)E2I)EO78ypKi~TO1JUn$zxv(Jcb@)5^n2kqk4=94*yIei
zrMu|GP0w0aH$Hjv^nGXVIeYi%yKa2==&i}_?$k8CZamASonGIa&M#Ww8s*(Z+vwTe
zVsC%8xH{XrJew`A%@%vJ{cF=F!l@%+LlA6qzTnD<V2~T;DK|JB3B?gtka^oAyV{Ce
zW`1NzX3W8Z7v+Wmb6(Ug-rd?uqnR?!F{2fk^w-qFvRoX=OvD)2iu-KMoP0&VZh0jH
zI4}yA-N~k`n{8fnqg3)kHn%%TCZ*-Mq~7r?YpO<ZPqD|0uJ7&zH{tkJeR2~_a-le7
zy9lS0;8sgrGAb0Lm&9ywIcScBEZ1nO$fEgtu?%va^NOv6=Sy-jzcv=`Md7u97-g5r
zP#>3Yel^%v{3fHMm^QW|%{+#qo2spqs7<zZP+e+ObfnAp>V6x&WiDHbyisVF-_(@B
zR7s7bVyD&6;P9B>43k^0#tgZwnwm0~2GYemCI~AMcXzcnnUX2Y4?%BQbLZrdXs_rr
z%!*pcjFSa?P*r;iv`cV6NSmC3Oz2gnEUElydLa0Jqtl(a1G+nPA+5vQ0`|DD$9AXg
z^4m)ZhCnpeOzMJI0+cZf)$t<%cUvc_)zO5ZnTzN$2)*$bL?tUp^BJKUt$86LSJ9bN
zkf1wJtv>ixt2V4cA*mU?4!q-y6)fXQt$22bSb=HB3Tr9`lw^msk$TJ$+OVw2sb#&N
z#=%G3+$O7P*OJ0W<%Gx~*pI2@+;i57(%k(>`Vemz@zG(rOV;A%NVqDNS4M)O4uVu8
z`kk5D_lSIU8KJi3s$ihEOgdr+Dud7z-jp}qL>qF?3b4=Z;RMIqRlc}Hvz>BM26e{l
z=tD?jzw^8v+?Xt}9T?4ZhUQSKM~X^#D>EwWswtuD+t5i~d?^eo6;>IZ6Ej;=GsWGw
zm4VzkP+x8%>JL!_$HpECOEK)9^og1Qju_B_XF$;~Oia7SNaUeQv&a$HZA|kj@XeR+
zxm-HF!ou}G&WY&y{H&-92CC7(K#t<5rqZKHP<$m+xMfr|DgyWyF@+Ie+_Tu?1dmu#
zRv0_5!`;)OMM-G|Cc`r6h{#Wjxh%oSf<+)7$}xEqE+;|T6Z(x+5~V<KYi@Z(k;c*o
zU1oG4sjc)-z*AwnEfhD^Xa=K%1s_KQyHvKJCM*z4b;1EIiOe1jf~mAa5h`Sgm4~01
za?BgNeK;kNaEPTTus}Hajy{YdcA#`0*M9)-Fc7r14)21n`A67BnDp2h9PGLS4Gld}
zM8JjPRgKFL&%i>{>|(+WX@`un-~JRxtBsOMWyl2xxsLplvW>8El`(!zn<oK&(JopL
zizKK{Y11F0U^ZJv<_H?rF45!=X30k?&$P4#OLVmXN9oe9hgnRcsKtDmqt-A?CRVAj
z9(Uv1ixE46ckL9CmK6ZN0<+nTQ2-&<N*MAKVIZakROqoBfY=qhj+GWemC>B9nxZ&)
zx+jquSg$a*mL5=Dzor9f!kf@ZkPx0~J=yd{ahRvp0safjH*`e6P@?47hB%y_wDE~*
z#zR;)ZcR8|H<U>4ogUtm*s||Zt~k-MhNPu1k`FgKuxmGrmTkLMaLya75*(U-`{0Ee
z?D=QE_ssX7?6tL(N8l8C15wZVpy*1jj`_whihHC>Snjv;3BUtJ?aOy$!bW%xh><!|
zO^$7<@iBs>k6G?3+=mmNz}WgmXJF2La#J_M4&1sp>1@+Iz0)%&6>-p(twkQ{jV-yB
zuLNhqMhtEGf<O1d4-ZgrcCzqQ9a*;n{51B1R(+g`(I-h?gs|arIt4<OZoHSmyyqKi
zH)>?c&T;@EGwl<bmTz347(md4z(*Xcmsi`TUvPpVS2aF1M?D{Dwmuh#Z@QzQki#&(
zgmb4SxjMz(VIx!&IK(P&0tlcP=&RCM(J~7tija52)#I;FEehXgfycbW#Bx=yX?vyX
zB29YmBnSsf8eb=)4IaA^ka$OkVgca!YWvmp>UU31?w#C2-{R{0VZPh`HQ@1&UVm=q
zUmyJG-uLj=fCnGG@X+MUWD>rcFVd&&xeKpfdg}c@pV|3lYyZJtul#4H{3zLfZSvL0
z1JORLQ~Dk|_t`tIJ@xX>etPhDd+1wVuv0t#=np+IIX`)*y=3_Pc<zsX`1s1-K6`EV
m8|}fnZhiRQo#nsNZ4Xb*PVSEf!}r~}@4W4v{QRw-ocbTXzI?y{

literal 0
HcmV?d00001

diff --git a/logs/first_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612683.26610f3d33fa.5666.1 b/logs/first_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612683.26610f3d33fa.5666.1
new file mode 100644
index 0000000000000000000000000000000000000000..ef95393aca2d3ecbd709755be79cb946c7b69f49
GIT binary patch
literal 4110
zcmai1&x;&I6jlkEtcWB>56RL;$xLP@iR^|DFc4T2jHvMvq^Rkxo++ogs;#P?-AM@k
z0pdjvK@bmue}Mmux7@sV_a=U?s=K;-Gl_fHW#4=C>iziM_o`1`i$DMVaPrHg&E>zY
zJpbd*58r!dX}Q$X2V0^2oyBL<gX>;sXYA5&TTES3Row2Tr&fAlr-H0lDV1PR^7i+?
zo&NrA^!xnBU%$Kb>|XTy{?FGAe!6yWncJ1S;l-I}t*iV0T)p_#l?zwixcK`0zpp+$
zI6TZu<Lkz=LfY)kVK%*Cg=>^|H*BLPi`nAVWOg)J+@4J4$CKG&a_cy|9!_nA4MDKc
z`I0Lqf<a-Jr`+IlBote&AoI3KcC{6S%>Brc%$S1*FIGDW%z06}cz0_rjb_R?$Bb5F
z(qB^x%L=iTxri~aCHMK7Ir&7uZvH_Ca9|WJdzsCuZuWW6jZ(=E*}Ui^nXD`?B=wHx
zc~dotdx||~bmwpp+=SzM^~p^zDTLyb?JS&9f?F;1icz5;y(uQM+d*?G<Z6w!iaeT6
zXY(NE1+Umrcs?g5)8nyl5rxMCG0HBLp+3&x{3zI0{ESgjOdDH~W*WoMP1V*))Fxj$
zsBX3@I?`o)bgPZtGMBGK(I_;`?`p~*Q&J<T*l9I1*qjoaVe<4@%#hovsVQ@5Ae~KP
zg0LcScUOCpSuutAA?Rf_cTR3adqt;VR@6%7oGjpjs@hwiU4jEb+7uLILa#Eb6_r0t
z4+Q@=I^CI<pu1C-(mKp7V2=xXY<KD|zeP$g1fscSQkTRMpp0Rtj&B9r?VYGrM-zr-
z&Z5g8^u}Wlm8>MqXM}3B=B0>SMQ2h$g6>?k`ruow+OZ0Sq-OLw@QycDu$(Kk<oO1%
z0@IF_)>I5A$v3r;ddw2qu)Ha#Wxb!q!AIS^AggNElEO&kgeV}m6;ms?=d2Z_x%*oB
z5N{aq(P6qv*5Yg=T$b|>MuM^qf>a~=pPAbCh<rGYP+N0VGSFKt9WexzL1+pu<Bd1b
zhTO9f?DJwc!SQ~TFK^Opr<{~QoiRIl18MAcQPhJQlO?tTqq)w}9BTDQQ3-ElPK8}I
zD=7Ombdtwkg<++_D#Ht6W@~DuxEr@JkXr}p+igVsA&TJC*p09hLu8?WPt*)>#DJDO
z2a1MaV%j}Mq6l4@M2^62W13fiZ(hCU3hDR~3)g>B5YhGdSy34bRHK1`0>x2HrAL#X
z_)@5F%cyEp1n@6n3M0U{XR*Zz9<ipZG<INzyR))I$;t{$hGo(bk)IoLxdJB(7J<Bx
zWAZ3nL4vj?^dGAvN`d0m-13SdjinE|+~`tLTj`;Ir^0?)D9+Sq2BU-pA6tT5D%(&K
z76_&~;Q*IJW)BC!RNA2k6*9%j!|z-<<_+FHoRUa5#L^U4ARK*1AI1?oP`Z!nKY(`_
z2wGc*cR|?vBWxo~dTb32cHM!7hMp)Q;KK2$#^s1-V4-PtF<~3pA>-`#KLyfiqoh(9
z3IRf{BR{2VBdlCyj9=5{Nr0cVix$Kp391=w`co9l=Ih8DLBrZ5njFF``AFr-R@PvN
zE_dK4z4Gf}7FiUvm``)m8ir}dDmB*QZk&4@u`_trW{|Y3005Sl&2EeW2(ebekf#U(
zF*Tq<kL3WwF5z{ov>2+K=6u<d<@wV+iPXS)g}Jr#fa>~{4QT{tB}fQQwVrJHqBzV`
z>i~ZO^9>ylFqByFd`BEkPuloIHRB<y8@DDLuNz9F_f8M*O6=KpDOa3mSwqrN7|Dm5
z9oV%yM$5KcD>&y3RtXMecTXPOXJ0)0_`xUldTnjx7MwzFAnI8k6kW;HDc?CpagTHf
z%l&pf2YA4!eff?|*a+_dF;eHMDX>j7K1Q(gG0T00`*7kD7+c@y9L(8IZt7;(fqNGx
zoo%|OcX|e;A`aTJx5z`iu`90Sli+OFh@nki@J}Coeu9d#^M$YK$hsZiv)B(>^>Hdj
zpCo+|!iF#C6bM<m@m>n^UhJ^lsF5i<%K?bov`=hWzHu#M06`Z5A91jrAGJ@v-~>gk
zYJ6;tdOp(ZeJ&8+bVoxWhhe;ebF=eYonr5>5h@BCVih<61keohRq3o~nFSO@$UEZd
z@mHu8rEj#rV_srnxvJN+z0!4&COvo(go7oGuanURk6j5!d?`e+0PuXZ{c3ym(Zz!|
O4=&KR__x>Ye)%8r;xqgJ

literal 0
HcmV?d00001

diff --git a/logs/first_run_logs/run-2024-08-25--19-05-27/events.out.tfevents.1724612737.26610f3d33fa.5666.2 b/logs/first_run_logs/run-2024-08-25--19-05-27/events.out.tfevents.1724612737.26610f3d33fa.5666.2
new file mode 100644
index 0000000000000000000000000000000000000000..6ea8859409230e96f44bb013b856c7203d32cb1b
GIT binary patch
literal 30486
zcma)FcU%<b^FF(G5)>7siHInQf`X#h_64HEu0gTudB6du$K82F#a@YBEMGO2Xv7k0
z>>7JCMosLo#u{4)_8LXe-@AKzJMXYde&mnjliT-s=Y3}8otcNFv{gC(`82ofGIdRJ
zhk!fVuEqF#k!aN!Okr(?>P`0M5%FP~`gXm^X0|4pb=KrCn<G74XUzy3W;NLL)-XN4
zN}FOZ>b1^K65IcIe{Ff^zHNgp?Np!o&be>MlIog8)ioYEYpSh*T|eBe6}O6g=U=u=
z#gY|^mn|0i#DAnFARy4}uv;8<ZL+}{*gPOGtbtW;a~SQm23ChDG@@Zd<Isjtp%Iax
zq2bL!8%BmkG!3jTpoZ$L!XdN>pQ1C`^umH<v)0aYqnn}+^+R<=`kCG8kk(nO`eZ|r
z9j)Y-X>D`|yFN98KbXy~x7eJv#q|cWNozFQY+AF)$S(mi7QI!QtRHGf(mT(fP1M<w
z(zG_i2t8dF-b6Scvq&&j*qT&}LunV2!)TQD;n&DFk(Q)db;$-kcDke_N4mqPv-4+n
zGHM<WA#5YS6`xDn2uqUnMjg+#p@1^#bykzXl&a+&ML&(y(;x{?hUpEdX*8_*B<J$5
zhT+2Jx@29tHc@Zah4Y()HNzi9I6rL0d`#VLFq-)|hSSZXg!SpV;aZ-Q)?~J(^H0K@
z$1yq5MPKPH=A<+mebh*N#6w(+8%Kzzw`y%kY5HV`kp|43Vc{7B8Vx2&${A@RyI}Az
zKAq{3k)xdqb=Gu;g=cOs(@z_QISJBL{Kqa^q9t>x)=2LUpL&57oy}$#suxYTo@lLl
zi_wsz<5$o<c(1|=8ZNz#rjW^;%pX$9UZXiRm3O~%d*}!M-_>zp3dwHQ;}nC{M(-_6
zdukAJ+x6JR&*e(-ASRtjYcQqoRf3ib_NX@9P(9sF@kWWy_!~xp+0f}T;X}LGPA{U-
zkZ$1br!!TZt92=QXRYEfl0$@N1-u1^7oQq4wCU85d@$e{tvhzJRj*Cb8I6g$q`~y8
zG^cIa6stL1OS2?tu*GaJ;VNM=Ym*$we8^xQhmG!GamYSPx7fs@fF9ZO`egb+V<)v_
zon5CDKa%d<AcI{vuW-c?n73uH=!c^Z6H~&Qpbt_k!UvL%`2W|MEq2JrF5%8pOVb%s
zw6we>8Ekxo;N74%@y83)IqYWV8h(3i3avi!;|LooKeDGZ;_tS^$!Mm2rgz6Qn11Sv
zcXG0Yu_&#i-9gLW>m(W+KGf(zy54RzB=L?-bfnUU5C+#LeftZSm3OSstV`xAv(;?T
z8g(`tN};&9k~&g65rFs;3^Q8?>#ahHLbSw9sncja(i@thOQLDSo|9g8JG~RhLYP9G
zMS?artx1<I6rBHVbjb#rE|GRz`;cV5(g8uMKHW@jl*2@mAenl{=twuAt6<b6>WzXG
z<Tn1mv;fmH33ouNvum9(mh!{tRBejc%B-Wd15?B_Nww<f)v#&}HolS<xaWpcT9ox#
zXC@Ce;OdcLOXfcigTnuxHr+s7LA`6yS#|0BNu9nIo+X)0DF!~S{G>o<1?9E$!EhtG
zgw9X2!X88Uyp!C<J`p+yz1C79P$SOLE&yq%GTEpT(kWBg^XR{mj5e*^OotyGPeMvW
z+BNmGgXn17X&ms0ZM3G_o&8_PUiKg~v=(775L!P<ZhB09+G2pN=+Gzpt><fmkQX*x
zx`i62=V4Zu_{&SL*kHbiVD`$hfYjS~Qj!~z^|T^eoCQi^>r5+Ky4h?`<6DoE@D0UI
zODl_kPt`!a)eoZ%vnHiEONa18D|S(7L%K}bLvl}Gs?|)pXkrE}Ql?aU8p|Tk`H`6O
z(mfIv3fG<XQnS@gZBtxdXYMeAZ6N(bEDJONQ|R4vT_Yh0X<rd;xpYFIGC621wA1cD
zGd7X#iXU3(rAp%Od7>jFr63xrh=t~QI=#16eu8qvE0C#8bSShLXceNf8dMu_ijCeU
zaRd4vq4cpE@rhy7r|Ob2_#)2Fw9XmTj4z?ZVN2ulIL*Nmf!+DZJ6*5P@FcDjAyy3o
z-xbj3Y-^{vYt7K|-B#=?6lXWnUP3{EE!!r<YP)yo(55p7Pc2r%P+C*?A&8F|9Evxo
zGY->b*tGn)(K9KRp*<H!c$!Ay=!=$UQN9ZqV=yHd9m%wvny4C~U7y}%@Gu<5IRn$S
zZ#E^-$_Y1Dil)#66+0)7b+~44X%3Q{$b?1F$?S8pHC1Oaj1V?s1HrBVhj&fr)|Pr)
zThQ_87H8FUC48VW9(=EN-U_5izzIT&Pv%jyARBDBFA4WOIfHgLK9CYStBxi_l3ARw
z`R?oN%k%=$a-pXxj;;=m5@$bQ6YAu23ssva=IDvyyujBR7gTU0dyN06xq$_1wRCn@
zZU`Qwc}QD<O;0m`zXRYE*DAi530dS!c^hA=u?#ioQ|u0to=)=ydSi7)OB&z3By-L(
zX{U9P79857sXRd)+!+-i6AMjI+5qVMYK7J|x_wzqX-!F9jB^oVCI*OK4ygNNT~~Eq
z@5Z70mjfzP4ymN^&}anV+<@1%o<4_t$;w+}>(>(3=k9OvO5NL6TJNQ)qABlOFSJu$
zka-UFnny03i9c9Z1U3DvRjE${$WP;=sU#X<1*B)z-s4Rh&06kPLj)EZ`S`86mY-y;
zvZkV@oD(PrJkM5VZCsfriw6aW0D;c^nmd~bbn3|H9Ixv`{`qoY8APW}Eor?Xool;h
zkoHObeG#2G0QwI)Uqv+<+pQL&^EUt$=)B^l#_WGoU8M7Mht^)|EpEkI3v?nvDrzcd
z)O_Hb!3p>!fanHh1vC_w|L@H?tvarPw7jOKhQ>!&&I@}h-xm^%g0xAp(wy{co>i8-
zThnK6h=|`jbIu#J-9y4x)A(vC3;2|9j6B}mgZwz>?f!5PxoqOZf7CYt8Keo+_zFly
zO|Yi_byt(H7XR)F6E`^ad(S*|ql%L08k*`FFJS|!MyMjg<~OYOAp-}E-ht2)mCzFL
z6gpkfsg%zCo-=Q|lYMKOcCRDa{%onzUVQ=3z8Y^$MHg)b7ki^TiMl^Iwt<LC?_aH}
z+P{)?W;#}AybF+K;8#l)C-?Uj52-K0UvECuMm-4N)iu>LRanXlR;8*zX3rYxK#|@F
zScORQv!#J*tVE;3>KQTS12?kF@2gRyRh6ZqqevrU=ffIuW!mHxDAGgyB&1TL(FQB-
zyyDXPwYrUBbq_X>MH-=BJP#m!dXyd0M08r0o->}QBdST}>uQ2E0fN({NYnaYvZWj7
z*I4+k16ZwQt7nxa$GKE>`?|<|Ghi2RA5C-x{F;TExH{MRj%zBO;_sZ_daG{%K15ST
zQ=>rS6}E}Jx11ZaW6|Tr;x+>sRqdw^@t4R1Yieoy*$hzF<JQdsoNao1Ta-l(*aNai
za){1o(Anr<<oS1WELlABkXLQdzOBnWmHIGXy){)e6$Ja#4H9ZXNIv&%-kZ2k5!Etg
z+6T2;b?MXr8h=gI0^}udz>LPkG;8p7lz2S>42dV+a|yXO=Lk3Js}X1GiDol>k9JT`
z43f<HYy32xf?0`ZqRy1e^PGG-hCEm>-y2o=8vqrmyo;v`G@(Qp;&yW2YvkC4wIqAs
zSf-?j1`q!0Pd@GVP!)BEcYqZ-gkaEB%|zga^|{>cdSk|;k`h;2G72R{BAKkS(yvV=
z=}mf@r%E-DTn&ER2-(d7IM}5DWr3&j!^xb8%*p6NTn8Xr2q4Si1M<^IR?iLtP)^he
zmd*`1ArfZ6Dpdti*5<zjRRjxwp(046>1;tKO-h;P98I!j&cA@F_kBR4>doNVJvhpJ
zJ#|$x<mylLr6Yr@fieTXdE*9mtloywC_VawN^nRIkuif+skU%=5gR`t(`x|>rX|AZ
zW<k3N#k)<t`lL$f9~Ph}zXmvpG7C&d*vT1p=T%2hCSlT<m7>hz=LQ)_rG<|Up(sxS
zye!Hrc$ANhbIaHG)D-XUmgU2~R9^yc6-{N$7wG<aE}s=dLf%;NtBH_-4Q|&|*JvP}
zw}Qr9Q%1bE!s3o+Y|Vni#j*9qbyAyPaaE0{#$8w}#Z$l}O{@C>4P-w7Mi|JLdny2K
z-g+`1P&6{!qgB3oN@M9no|;M;Hz&Yh;&qJYk%i-9emtGo4f*U2Ai!tNuiUV5!gFej
zTWo%Udofh=LL((}Q1gVZR5D7$lSH~l|8>SK8&SIgP}!`Rsq-S8D|~Z0K%^#&&g+-8
zZZFK5EoGzg-ygh5$fi?gQBl4AowQzYbPn`cKpvjEGz6t?>~ab8pQCe)i(}>?Itu_O
zqhq?>gxvk^PAH-iJpDuuflgN$o&KLHk&1in{f6kQn<1@Nq!Th{DVaXyOmRfVW2OWu
zluq9FEu(%l_BxK}7yu}vLxl|)nvr8cbdLVGGD)D5ETeO!VsR3);lXl5=kKl3dPO=z
zZcHb`NBpx0(Mi}QfeNLQJT*M3LxWBKB03uZD5I09TSP{lI~;_B&8=J0Qbjrigw3MF
zATsLRl6t6c>F-F(p>RnB6A7D3^E`-l%Kph{>i7fjFm)6ZHs!wwBojRI>!EQs;V;Ru
z(zuIE=MHcu7dv{oq2{(4Hh|{l8U<Kcl#tMhe7sZ}x5gJrXo)zZ5?)>}Mdm#{)(N%7
zmUpGYqt?jaeoH7xn)mhVgZfh@;GjQArIdb^P|{P6(q3^Y)tI`dHJ$^wtTi%NmCA#R
zx;%9uinPx?>BtI^M*5&lJvWllQ2z!+ItlP7(g-=o_6t{I%$r~o=~Is+%Sw?(8~ont
z3vzkQ+6^eu;g6*aWRXVbg$ovNPZNCJB4INY&_xqA>w>en&l}yRBlq2fUBG?H!e;!|
zGu*h1&oYs)340=OD59_#o1e|S@9^hkBy2KZo1zGt{rk$1z9++vqAWTBdq5URsV`}T
ztA1_Cr5nrYAz`C_D$!6DHUpn|k+jr32T`k@4Jc^UkoJ<vCB_I+KRoFgO1x_ThQt%^
zxrDTzFGW_nb-#d`e3{qMVWG)OL|qz%7jP?*uC72;zCS<}g-y1fnw<J!T_SSqIsk!V
zfu_sgXs$Y0cJV~7y5f)^d?oJ<U}5xhRWni8xE+1YwcK}X1}Z7hZzPgXQY4bBM3{NI
zGnu{ZMFO%r9pGTMfUw!tp(fezKF5eI#6<wYg#faw;dfK~$kgbo87L>Jzm?7nIUy2e
z!5h|xkjW<ko}-G824JWN5@|*yWTmws*OvF#fvWd5K%?r-;1-X%#!dQqZxV9#-4D`{
z!PP*Sflv80kMp~bX+-G}^-+RDdWeh}tV(s5`@PwLE6DU@z=COsFe?$p>}yO?G^MMf
zD4zp3iZTnl*i1vF4&8PQMcF4`I<r!gS^U!pgmkExQXNG(1@N*cv*6`%SGg}vUYmr3
z%~k*_2%9tGgGlCxj50{rd;ka%HvD4Wd%n+*u=(nfL{3TA@R(g4r`<pU*>1oH0~sr9
zc)*J_k?u&?{8MteYao*gn{K5>#2mde_cQXDrIdsKpD75NfqIYF_|vsEqndXDz)<sK
z!e+m{QglhfArC~&x3pwean=kgGbt2}&bwZOID}bqxNLNu<#@q`rkwl&71i4>rS*!V
z^M&CfiK<1(0Vs9bzLG$Nj?SkowWC_OS09b&tN@^lj_LX?qP_3700DGMHjfwR%qS+~
zGkMf=ZmYEnZid^J0zi?_xBfGUXXk4(P(Sls2B<<AHSOgaRsY-FnJAU43{plZA>k4^
zwSQVQ6rsyquS^#xEhtv-J;#k_HOb13u{+SHHaZ^G!+n?P3>wM<&TGiFK`l$7^n1W;
z0NuKvAQ)u!AoE7v+8QBtz##7)kJT-@fo-KZ5gE@nlp`N}hAluvaw@X}v;|iaWhH@1
z<wg9OZLfjg7Zu?Wd1k;qGrlCLdN^(nO4RDz;rytrFhGC*Pu$AbD=JiDzh{6@+@Y^9
zc$Mlow|MPDPgF0SGk95JLEtjvuQIQ^P{dm%z^N4?j>hY9OG=QH<8Gfo5zk~cKoLjK
zMYk_;8?XH1jv}7fA8ac{9PJSMG>_|M$nK3o_K?{@7I6gc(`PUD<or)-(NG>a01i+z
zZLsg+AKbd<S*MW?$1~f250%Bi^v`FwOTYAd8YQNFcE;$Q>NCs+MN|h5`tRg&zF&4d
zQrzeAvHlO$-df;O6nWs`@q#<=@0*R%NzZHo=_FkPY2@a|$>{ezrUr{A*!S1gIQ13=
ztSk_=t_~wNyjJ~*=DGI_40;}10m<l)K8?thUJ>|<d2I#)ZAd}!-b>IKVIgF0i|xK>
zCcB~sDD+E-tZQU%p4FCUqJQp$v{1toz$%J_N#!b$DW9s>L+%~TpuoLA)n(GUxB=<*
zvcYE5JAPxpFvz(ooG25{zWT^bTD_t^Dl(p_a9AiZ5>a;Iz7v)}?)UBY1X&));K6bM
zq44#>T4a9deihK|SkEBgb^u}4w0md-*;(#Wca$7&m<=E~MA9spe}jEIq2(l0B1jq#
zhY}$XX9n-KJ7P)stxqbWsy>y0qpHpTQ`0YVNAk0Ck;B)fgI#bq&}Q)WHa+I*^jFnD
zdGd+DL!OAN8L&$A3%B!bjq%8Mdy|rJi8MQTPwf#!lG@h5U+noGgGbS3kt+t2BkA=Y
ze1oEWn?Wi?o892kjV@%f=k_mAwCkGT=(1?D=%3b}=W;`u&p{)-jX^7DhSf`h$+B6S
z)BMGnb?rvQ2=xI5gG2+rI%C;)*U=DL#scS4QVl#Pr)sA?XmabyfMl|PNBrDngB#L5
zeH>s$t{YUU5i#~3mcKxr+sZ(|a|*)2mgW(w>)vTJs)U~yG*m)G<?z#)poNHD+o5n+
z*<jjk?tOA#RJCeo*6xvAZLTnwu8_~#c~2g3HOIvzpkiBMH2@T6?PbF=Ny1laZlbES
zmO&LdYkyBFM5Pt4I}cH+um<eOD78BKiuB1Gj^8|}M}GWHptM6yspsRfT)^WCml36U
zqXD2uY0|=}q)fR!eG#Q4460B{<$_B^jqjb;8&P_}AZ3*12C1WU7w?`!Q6eoSZxAS*
zkW)Ih{U@$$glQ9^G_fTB6e%qmIEf6nQe^?6^q4^vO6ecJ*G-Fgt?Pv-#k2x@GD`Gi
zT=bjMvvLB&%Pzc(+wh^!k1k3DUXVQXuSYIT8mUM1Y*0R|hk7PeQY1>c{92wwA2?A9
z&By1N4PZDasEAsmRU-d;_V^1lAJ_f_ww2~%WW36@3M4KhMvI!?P-X{cey(|eRYdor
zs*ttSxtj=nR1q$bXS79wHzmomxCL8L?{xbN=SRJh0e-dn75DANerHho>cIe&dMATF
zGw&JqzjevssCO=B@Uq^?fK{q@T<;Gx!coL?8L&dck*e6!A)oWVezz2gcq3I`DaBC4
z5j4tjhx=h%N>3DVQWl^}5l1`ReEN**`KDDz6td;a4zh?N_#YQ8a9@x7@*Yw|j~IB-
z6w$&F+qn73=RA-P8>@i__)u999VdIZ+m}|)LyBlLvq2FR(Y2||xb{aqdm%-1oY|)+
zis;tOo7}jnTTY{Ns-OWjkWSJykQBz#_%7s<sdWfaM6(&NvLf14wIP}BS-&@G=6^9T
zXy!0JN=6$sqKc^Xk-aDd>z9KQLkf!bUV_F|4JE5*eI1PEfO!=F3Uh!&)+JAtlxRnM
zyLF$A%Ku{qtEh-3YdlGOtvxf5dz*WJMQ|@rb(y?9vH>|XI{5?Y9hnRmCRJC36BUtH
zW-+p^`RhMWkvYd8p~y%?Sz&Rge|r*DZOlVt*|#Db8!Q)4L>7lHiQn|L7`h!P3=(b!
z5N1s`4ya5z)c0|q<k-q=0LdYeX3@=N)g-fz{(Bach>r{!N`yq5(H851za*bGhm=88
zU0VrwqpHpT<8NQ%vU0pVki*A&0T3Jxv>E)j(X_PXR`o!6@+X6bJP}zlV3q0wxBcLh
zPsn(%w~}#*G%GCn=o%4wy?LKew1+Tw6m1syIl4SKbZcG*6zzQsQYqT(23@B#B8ht@
zr=n<=@`0nvqRpbe>h+YnpMGEhQbe5@w1OhK+c=0EO=$l&Qbdaw3{ph=>UKfBKOjZq
z?F;8qQbatc^X+1IzD{L8!sN!jNZ}DRkJM;_6w!<TFe6t)IfEQA?d%68BG27tAmBL#
zMRc%w<=C0ccCJE|ut5zl2$fK#h)86W=s7p)`J%ye1cOx^OqY}}yhgKj=fy+z3xnwm
z`K*2L=iS`s13vpvv7N940E)AAkGvUV#mG&)P#!;GP=(Ig2OiyQT6Ncyx`-0D6zs_;
zjrcA&I*pVYjwp2<`0j*2>A9Rz+^+4MH9M;pqBLCx07Xil`Ya)P=Kl5xQF_Us3Z=yV
z6ihr9Q4LXwO9Xo|O7xrGqQ@2s`wGd2<6Zpz5GduzUZ{Al4<k#}U*Z=k6TgP_aQUUO
zgC_VLM@o~r>mM6XC(mUzfKFaeK0G{BfgGwo!GVU!5%FMKX_!RDU-T+N0tRi`g*wIW
z%nr~gT+Nl056x!zlKT&apF(i&Zg2pAOXQhBd|hw}^49!11~r-#296pH18m{Q<!Ve=
zPzu%h9~q!hqhauma&x&~--aJUjpjXrmo*v&tWxE2aSK|cqjKN6JCIh0I2yr&`V=F1
z5es5a#Ah=bpok;r*sx>V#PM8p6!EqFz_wDv(GEwazv8s}wwO`KO818yAY@|15q!<+
z)m&Ui?h7Oz5*T>V<in;dKXGM>#V<xa{GQnce5foRxOXeKGgaPfM)Kh?vq2H%L$g`)
zxZhW|{1wTEr~%-PqR0mikFDIQRe8B6oyIepKsrg+KpMwmRwj^(2~$oY`EZH>E6a!b
z+r!Ak%}pMl&R0bX2ZzoFvzBCZ*sz{t|J$EhpcEX)pdkgtERdjkb1IV55?QBF(|ni=
zhM{RnWL>j7KT)?n`KBBy|E=|aRg@1`eLYFpho@H~_s(HZ;9j8WGTCQIB<UGlB@*?H
zTMQV+I#-2bUZ|W|l;@9%Oq~=sEEE}uC_9B$+|-qf9T;s!mTe3kEEkXunI&tGBbkG@
zq1$nYLBj0-!mR0s|I{SSbu+$4$x${Hjt<Eol4jBOqx{M5*B9Z}<nas|N`yq58N}Zt
zw<NpAwY!X}`Z5NNsyYLVI`9v-wQH|5<nW)3aB6Tk&}Q%<FRyX!YF0`^dE%1}c*qlx
zH3L?uK62eV&whf8r!ZhJE|F%Za9cn)S(mXFzqs4V;8C<$WV1_d#C!Fl%P86(7^G6P
z*$vhY4<qW230N$)F#&H`v{`hQnvc1YCF<`*@?kcER*(-nVuHw57naUN^5IVggX9Ch
zy4;<dGe|zvGQ;_l<O2_CmwnYAO>PzjB$E$3qW9v1`A9xYwu2eDe25)5JZ5xGubSwd
z++rZ$IR*Jpw_nBBR;7K{qDolL0S2KG%H+e?L6xKDYJbDu>EmFqvcWVbgNqI;<?e}Q
z?FBdYToVS<NyTN~ywt4CT5k5_ypyQdzPb$n#aa8#hQ8#7e-|D_RcqiKKovS`Cr{nl
zbjs!Yu_$7@7^I9+qE}qBs^6JVMCsY(=!XKOMRH1~#!lhZaKB$glp?MIK#|f(pRwfM
zaaYPCN{bm(p_H2TTGBM&&$v~H(t8Feqm+=45Pj>%PU8@z$19G#6ew+!Q)<{TlN)mN
zrxS?Mk(B^Yq_ktm67qJ#;ZcZEpKL%CN~v7%<fc*8dQ3o+b}&d8rFL_ZqkDDjR|P4e
zl__)Hx+oQRp%RzZjD!ZQeU0i_oitbv^-QXyNR)KzQkwiR<%k6hC$pFhU^pqLh^}^Z
zC%Wkk_o4at9s^aHkCE|4jmnTVowJvr`M9wGP7lq`H7~G=h<~Bd>6zDU1fQS?m&h~P
zVr0Z8PJg{+In+DPGH}#88Q`Q&H@KlKLyw{M<u?e94ebkhCxbt8{0_Hd{^4b)ccwCU
zS?^@PD%Dl4;~oQ^=C?6mg@_|n!7Cy!-N2?O;`z)5DB=h@=i(jC^I6#)DB>rrKwK%}
zXoud<-*A@>-n5~R)wjV8vWO#i`SNGENbQS?ND+-<;6+nJM;{*MR`)2Mjc&~eW*hLK
zvLZ5E-osTJ{o^2{h$`EGMiCX!CiiTvnd)R7Qbby2pQ0!t`faJ)<`y5aP&%z=Hi2}K
zu7RX57LJG~@6T-<f)vqP2CS@zF4hVrF9v6JM9n<f0S<s>4p%@jY97&zy!l~m1C)X@
z7&N4yc<&|Xj4%zk&^@yvnggzl0K+f`NMv2|q+6FbLVtk#AS(aiBLS<Zh#o{%CTHJw
z?Tp+zjzNKYfvU^on$@*PNbY|8t4e-nz%Z%0Dx9c@61_^3Ul#iYqassv6dV?cj6{?b
z7W~f^HQ0CL8R}Yz3?3{OP(+Kr^(G6_66c`Xv57&#?Eu28>DPnH(|19pY?K`DnGGO0
zMA9reA)yxGyr*_RCF0A`KpaYhM4ZtUL49J#4>vcTL{)tz14mVz0rre~!fkF_bsTc|
zwh3St91gS@eBQWR&ewbCM3g6*iGYVZ5m_@}{=1H|%Jw*pjQ3)|U|b^23X7-l4al~T
z##2zVS2K7NZ5H{ay*u#<%=AXle$F73qRnm)vN4!cAJcmpigxo!z*`n=7G1Oczg*`#
zwe}%Jl*ynK6p_!*fuuuz#YadHontUa5%H@Be{$0zMO1AvoKHy+@t|^}whcs+TN(or
zCO7tl3XfRzD7zR^MD3@68Mz{Q`p6pN(Rd*)qAM8)cuqkP-CX4vTWQU$XQ&cBWzbLw
zWr}Fn!K%?043=m_FKRj*R&g-BJ>ZvkG;2Tjd49>i_+P+vSUziO7tP?bGe-P@imm%m
z04UDdlWIREe)$PTl*hvuRH3u>fk*wD{;~Y^C`9QrgOpLCKdT-c(Wmu!M5%1MfgS>-
zi*icaM_RdYTef3Gr0)O#MM@;pMtYBLWk8h9GpIr-(H~cD8u866KSZf^N3bWO<dr!-
zI`glCV~~8P9-ZMUP`XvT;7_Rh-I0)O!~83w%YPyU*2CqO$_|>~@9i!}x>cQc9d+`C
zv9JMj@`CaqXYUte-SPYF(J=W9163L(k#YVfnFsuOZxiYi7nvQPQ@ENdmJjaaMD>*)
z5j^lqI6J^4^2{J^bT3Kb^aluPG-d{l8Vv)i+wdRm?ZJ+JqFTS70Ya^ZM#JF09QK&Y
z9`bY=YBZ%=!?|URh5@To54l;#F8HB{$1`Ash@%ml|9-4iwkIo5#FsD|pok;r+s?PS
zBOUYJqlmk8f`cnX9PJQ%>KVs{-ra#hmd@+|Arm8x;4hw^<Hj3nc0lss1OqRce5lrT
z2Y2GiikrxXo}GaQ_)u9s%rVX3T1W1ii{yir*`SE>VO81`&Zmv;A(9V2F#8lmKJ4GO
zgbNydYd%V+7tAJ*PSQ1y#&KJ~*W}v_pQT7XH17hgP?irJE{2g0{VW-1W|+jlp!2~M
zkc|HLeM{0UYe_Cj!P5*HQc%nS3A#5Yko>i@d?ad`>I671G);-DYnFd`y$uO|K64f7
zXo(C~Q9e{ORVM9Ae0v?acRPau_X1UyN&el!#&vDoQ12+!1C9%0ovXr$@?pexWyojG
z`9Y}2bY+lGWF(^O6drKCJ*k_t{T;Hrgu#R50`ej1y)TLUwFmyqdk+{S+zuejnr>Xd
zlSE`a`x+%jgPuSdl0ziTqWSN~syn$-4k{7D88nm#i8wQeM~rDfvPX{TjH>z(29Bya
z1N`1Smoq&mI}bU0V?Q`GI2>p*_$KqOaDDwEcA`8f-XHLgCn9SGOuzk_{JQ@${=H~j
z888@^NV8M;`zI0PkVpIt6z!!99z~l)_RiLj?MpM%s21ihNTq1A8#Eu-l#D4}DgZ@0
zVgT@#MVm!WdT@)osyh50$%l~)x)AvgaHQQ`Bp;437@2$+cJ`_X$%pb<IA3A%q1Uu+
zL(t^bn*k}xhrdU^T7=}ov}7<Nmk*O4Ib!CfzF&_#_ke+b=M?0_<MWkbUthf47*)au
zJs5;aD3cG}?|DW?yt?)X(HqNPWrJx#!sO@=pI$CUv-Yc!`)de;X^wo>uG+RMS7&9f
zZm8ILmj;01ti2}ANRAzeT7&X<JcAPQnErZ7gQ)@f8!(>q$2pot?izXtQM$|^Wt8Ry
zO^rSiduc7AloJ&bCQ$k)r*!9FXKvBV&$SVyjOGAPq%?7yja;esH~z+0zZQThloEYM
z)THO2HYX6JkqlBs$?a%X^t}?*1|v!hTm9Nxpp;cY_S)-z`nH^|X2y6#DVqa;BBi)1
z14!+P&+&JEx)VVC2c_^P`|@fhB1(x2QbvjX2CwLN%RmiML>0Xkb1q5+UZ~9SiY3;5
ztyQR=W%|H+sAp0oMWTdPMD}^f>(FrWme~M?6D38|^P3p_EoPg2!M4(TjEwV&=+*u6
z?@;qA?gu+S^K;D$1r^bhYn_iGco#*uM4r(W{Lk#~e3$e$>Ya-jIO?4Y@X6!H+`f>J
z38;5IWPs4Vpm#F(-PfORrbk!sFMMoR4QR`HCj(Zg9&@^uAGf23k6^$G5l5<`mG@_E
z>ekM$QN)ij8=#0I=)|*^xh11j(MS<pt_25Iia6R~|LN!4sGg<VQOKfd!w#~DBlz9F
ze&!}_&K!jlQ5FL)nj(6Wyq~+)qhv+o!<)=D;6r6abYb{XuH)+Ufk+Y62?iQPR75jM
z&E>wBzx)nTM0RGMq9~%NQ-0to@A;xU8pRGVn?O2A*FaJjeRSQ(-r3y)kRsC50Vc|d
zsLY2@5}Z4z0&3>n85lHkxB`;VS3d2D*NsYjPztVK(2#=Sy_cX3_Ii;fk5=_XbHJxi
zI4{fr5?Pl#;a{lyTkraLRQ`K}0aj5FMdwx~FQ@!l7rFN!g97&gRhP;4-nGaJE)usx
zO#?6qld7x2F^cHV?Kcdl$aG_nP-G;ctgzsJ8AFEJQVz`)OBp;^E})2l{Q^j6zhm{$
z?YPe%;dTIF)^tvWKN<at&tE7xLK^~UNDh%Si}pKSgQVQOyc?B>;S3r|ghZUt7W^-7
zyWb)oza~Gzz)@9afH8-2x$QIB&qNN-kAzc$!+|z~Pn+|E8|7K`ca$dyO#lyhBC=+{
zDpd|=f3@KfGQN@lgK>#8D=hdIDoZAvpMawMl)<BDv&ho(+=w+`4}PK2yeS-8DcbA?
zBdgUVB!1ZsDB2U54P?<~(cH}E+^<<tZ;&Fo!k`rt(MNj_8Qf*;0HlZlqu|I$5%H`0
zeQ~!rQba~(wUQ#@L9=VwAEU`_Cj$~DH}-`Jk9bkce;-mr2U@|Q<cet2OMA@tm)`hy
z_Iq&v0naHYB3oMJ*y;5na#1BrV$e_tWs0cYH&vpGeILIG(c8pe6$jHlR({(H&DtHd
zcIYS!rmN($cJsL%xYAE=;P2<%zYPG2vv%}n8(DDen|3IVt=j=r=&Vh@rl4w-*XARl
zbeciRD5+E{qJLW#SsdBwcwE!TMTxN`XfQ4P8*;*LpWdfk(AqCJDjK!zZ6^RJv~8zu
zb0QVuH}P;pOo^bYbsF&}r~kOE4d|ghbrLqIrKzd$L!0n_9F_fT)HSwj<DTy>orRir
z=igupp?Qm22!G-+h5or2Pu|+zhCjz5*!P9Nc=0Lx0aj1Xy+3n{1BdQM9)7{V6g_-?
I*Ke`^2S<Phk^lez

literal 0
HcmV?d00001

diff --git a/logs/first_run_logs/run-2024-08-25--19-05-27/run_info.txt b/logs/first_run_logs/run-2024-08-25--19-05-27/run_info.txt
new file mode 100644
index 0000000..a22fb47
--- /dev/null
+++ b/logs/first_run_logs/run-2024-08-25--19-05-27/run_info.txt
@@ -0,0 +1,138 @@
+Run Name: run-2024-08-25--19-05-27
+Model: MultiTaskTimeSformer
+Training Arguments:
+  output_dir: ./results/run-2024-08-25--19-05-27
+  overwrite_output_dir: False
+  do_train: False
+  do_eval: True
+  do_predict: False
+  eval_strategy: steps
+  prediction_loss_only: False
+  per_device_train_batch_size: 16
+  per_device_eval_batch_size: 16
+  per_gpu_train_batch_size: None
+  per_gpu_eval_batch_size: None
+  gradient_accumulation_steps: 2
+  eval_accumulation_steps: None
+  eval_delay: 0
+  learning_rate: 5e-05
+  weight_decay: 0.01
+  adam_beta1: 0.9
+  adam_beta2: 0.999
+  adam_epsilon: 1e-08
+  max_grad_norm: 1.0
+  num_train_epochs: 3.0
+  max_steps: 420
+  lr_scheduler_type: linear
+  lr_scheduler_kwargs: {}
+  warmup_ratio: 0.1
+  warmup_steps: 0
+  log_level: passive
+  log_level_replica: warning
+  log_on_each_node: True
+  logging_dir: ./logs/run-2024-08-25--19-05-27
+  logging_strategy: steps
+  logging_first_step: False
+  logging_steps: 20
+  logging_nan_inf_filter: True
+  save_strategy: steps
+  save_steps: 100
+  save_total_limit: 2
+  save_safetensors: True
+  save_on_each_node: False
+  save_only_model: False
+  restore_callback_states_from_checkpoint: False
+  no_cuda: False
+  use_cpu: False
+  use_mps_device: False
+  seed: 42
+  data_seed: None
+  jit_mode_eval: False
+  use_ipex: False
+  bf16: False
+  fp16: True
+  fp16_opt_level: O1
+  half_precision_backend: auto
+  bf16_full_eval: False
+  fp16_full_eval: False
+  tf32: None
+  local_rank: 0
+  ddp_backend: None
+  tpu_num_cores: None
+  tpu_metrics_debug: False
+  debug: []
+  dataloader_drop_last: False
+  eval_steps: 50
+  dataloader_num_workers: 12
+  dataloader_prefetch_factor: None
+  past_index: -1
+  run_name: run-2024-08-25--19-05-27
+  disable_tqdm: False
+  remove_unused_columns: True
+  label_names: None
+  load_best_model_at_end: True
+  metric_for_best_model: f1
+  greater_is_better: True
+  ignore_data_skip: False
+  fsdp: []
+  fsdp_min_num_params: 0
+  fsdp_config: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+  fsdp_transformer_layer_cls_to_wrap: None
+  accelerator_config: AcceleratorConfig(split_batches=False, dispatch_batches=None, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False)
+  deepspeed: None
+  label_smoothing_factor: 0.0
+  optim: adamw_torch
+  optim_args: None
+  adafactor: False
+  group_by_length: False
+  length_column_name: length
+  report_to: ['tensorboard']
+  ddp_find_unused_parameters: None
+  ddp_bucket_cap_mb: None
+  ddp_broadcast_buffers: None
+  dataloader_pin_memory: True
+  dataloader_persistent_workers: False
+  skip_memory_metrics: True
+  use_legacy_prediction_loop: False
+  push_to_hub: False
+  resume_from_checkpoint: None
+  hub_model_id: None
+  hub_strategy: every_save
+  hub_token: None
+  hub_private_repo: False
+  hub_always_push: False
+  gradient_checkpointing: False
+  gradient_checkpointing_kwargs: None
+  include_inputs_for_metrics: False
+  eval_do_concat_batches: True
+  fp16_backend: auto
+  evaluation_strategy: None
+  push_to_hub_model_id: None
+  push_to_hub_organization: None
+  push_to_hub_token: None
+  mp_parameters: 
+  auto_find_batch_size: False
+  full_determinism: False
+  torchdynamo: None
+  ray_scope: last
+  ddp_timeout: 1800
+  torch_compile: False
+  torch_compile_backend: None
+  torch_compile_mode: None
+  dispatch_batches: None
+  split_batches: None
+  include_tokens_per_second: False
+  include_num_input_tokens_seen: False
+  neftune_noise_alpha: None
+  optim_target_modules: None
+  batch_eval_metrics: False
+  eval_on_start: False
+  distributed_state: Distributed environment: NO
+Num processes: 1
+Process index: 0
+Local process index: 0
+Device: cuda
+
+  _n_gpu: 1
+  __cached__setup_devices: cuda:0
+  deepspeed_plugin: None
diff --git a/logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630048.0826fd70f652.869.0 b/logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630048.0826fd70f652.869.0
new file mode 100644
index 0000000000000000000000000000000000000000..6011474c6cca20208705553f872d20e212a29edd
GIT binary patch
literal 4112
zcmai1O^+Nk5Cs%LvJyhckwdcd5oIT{vzsUh2#H9MR(u5k;SwRs)9vmt>$V%)J-d@A
z|9}$`2PC+22E>J5!I3|K#07~9;+5U)?%vFX95$M&a=Bdf>Q(j83-RB--yHpL>6gEE
zFaGt@AMd{Y>e6zlXZP1a`@4%zW;@rt(9YPU;kKB$wywF|%ucNI!p;O)u~I6*qU7@L
zZ~gk?Ytir5hxX@7AAayg@cZWX*LJ?UwsV==%H8nd)U(djy??G=eC5i8D=%Gq@!p?T
zpX}`JW~TK`>scXfc6T?M-LS&7%DWr3)q9Ki;&5+%dv9@bZ*Ot1ck=*0vg_g0TG$W-
zo18DXav~TMhIx7$oKA#d%@t(cw#lxsqL8_tSdtlY@Zd$Yp}?FMjf;18_R?skjC0Ir
zMJB^Fjj*f`Ynh7}16y*Rub7jM1?={3g#brJ;j;5=s;1rMMYl>NKV<W)lVnm^UP!7P
z&-1o!755Z-e9_(AMQ{_2Z`CI^!K4t1(`)D9loH%(sVhc_BI#^CpYI3NiICL_brpHE
zpLKFx@R}`!=lj9_PB2cwMHJo{iII1y3>9);p#H(W=BJEa#k8?CX=X7V-PT>QL}T)m
zgX*AD(UJbihl?(J%Ur$^MXOLTziH?RnUWf>iltU#gX0szGfbX5jW6W3ZX0^KG?30`
z-NU1}zpJ~+R7|0P2zyz>os(<PUD0cp6^)WPCkxy}X&o$3FTsH!Z3+r9!B?58qWq`!
zf#Cmor$2KJynA&itwZAi_qcEXcdzc>*Qc@zM03rgE{P_<8RJkLUyJivs?$-6p_ucS
zvmo@wqZ5^^B{gWoYP9C1h-5`)QbL06OtyyLJFVKV8i}N03_1{xH&(EmE4AeLF=7R%
z9V@M=8BmfRH%96yO=!dNwxE&?ep&|~P5X?jZd}I-6O|L9fZ#Abt>B)sPLvw&OX)+r
zp~okO=`Pub(}{3d?%$dS$|eX>j_5ZtjUN#CU_U}_#Z}3`Z@F|t5tIg@DZGrg-b5R6
z&q~nGv*85C+f}|ipx#bzQU-ZO@91MlW4()_8QqvHF&-H8b&ldtsV9nBcq?;C?6R$(
z9Lms39(@s-l@hBAFNm70X_(?}TFSs~J*aPX5e<hZf)itpg$)oLs?|9~(tt;FXvuTH
zXdEZn-J>UpP^P^|5}0mG^EwdD^Cw*)9baPP`mYLNy5WIV)CL{ZYJi|Xc2ryINh>J6
z6e`>@NgD+W0!B=s2N?G();QrKCY6=Oj_h!ER(3F{tUzm6CLJ;PnLd{lNLjFm<YPHy
zkJ1$+=!!zWSS^tXWVhj#*A!{YebD7bmy+sAs{)pV?Y2;ys!0<@2@5{fgu9fsu_jCq
zv~>akE{W704r2Oiha}W^DTW^Y=E^Z|u>Ej)L;@nlroaT@s5|N~jo5+HeOv$nddGpF
zv`yFy!t9@b8)4GAH9FW=2O8>n;)s9?$Lj`{Bc73krshS99n%n*zJB{MkY*b_Dy5+i
zVB{teRC;ZMm8*^MD;hls@EHD~)xa#0pqkNCKS9E5zKYZlG_0GV$sx>=kCdLQvIfI-
zxdBP(%CE++$fBsjeCnglFvNgaYOKfIIQJ-GXSCU7kaV;F0G8;@zKsG1F;_yDrw9Wx
zwctXJ=>Wto;dQFCXsVq0eA$-e+0z4yG{Aa|zO}SM_4Ud|+=Q*rNw5%>wSjGhq&W0b
z=K%i&>Ki*EU?@@Xd_yEoOKn_I&9n)d)~yJ~tCk)zc&F977Fz~hdMi$JtRZPCOyuKr
z0@$@1M$@*ND>&y3W(f{uZy!Cp$3A=T{{4^M8MKX+Yj6rJK~%FLD7uoX6TWeb;-2Ud
zmWT0t2JnDUxBE^^*aYtZF;eHMEig_sK1VS1(aS@Ihj8K&jInQY4(1#>H&rtX!L5^%
z&bIyXojxs65=Uj(n&h$G*otfUI5-;@Vr(-c{Nsn89wFoGY~t%CvThIfEY^c&ee8<K
zN6C<cFyae31wxi?I+sGf7aL4BDr9<{<p4x(x*eOQZ%8|}7U)9YGY;1Kx4YdhI6;!@
z2A`Xgnol%a_XXmc{%A<#IE?3TZg!TdQ|vu9LPmi@i~=Ws0P2AuE4>vBvw)%qc}HA5
z{SMWl^sN@y<|R6ot7b*hD_s|9(t{^KI2h9SJef@JSe1apb0LZWfc@1DqwVQ?7k6IX
Nxj<jzzkU08^B;8CGwuKY

literal 0
HcmV?d00001

diff --git a/logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630112.0826fd70f652.869.1 b/logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630112.0826fd70f652.869.1
new file mode 100644
index 0000000000000000000000000000000000000000..552bc0441b298fc0199417237135023b8a741954
GIT binary patch
literal 4111
zcmai1&#N3o5LOAAJp6$<iHGDdcv12;`}QRv2_hH>JPa67a|+V*&h*T-+tWSi?%DS?
zAqOuWJ$Mqtvv(nXiU<D)L2sVKuexV?W?nWihZnZ0y1KgRtFLN~Uy1+z{qgvh3y;2k
z^S!@+`{UF1-&$HO_3Z9OXn%Y0<!tYY7up%SG~5<5*VZ+++u4bgUf7u+D^^M+Sd{#r
zKK$#~ccS0p4}O1k;q7P9??*ph-uvnD-n-mZ?y483o^`J7{B!C28yC-AeC_<JcmBNe
zWN&{zGp%o0&kAX?+xyw<suiwP-d(k=K3L2b*AC`44;I%C4i-lT*N^ZcyAn=qgbhKk
z$@!8iCxSs?n5VbF=|m_tTtVh-o9r4Z3Yq(fC7Ce?4_;JT3e0)YxOjJGFO6o(ILC}u
zWHMaS2+Inwk-3O5uqF5TiaGgM!0zxy2ykQ+E_;zp)wH|3=vJxZhismAl1wVg3rV%(
zdEVBo;+|rUFS@<I2yVjho%-Y^m=r>BdhI-%Qi5A8b;T%AB%RIY^TVJz5wcpLt|E{2
zvrf(nUbCg}{4m(x3dTveh{9VVG4d{zp+X)C)IZqQ{FKqFm^QX1%`C>F+qz4ZXiUCx
zP#twDI?_M++M)~JGMBGJ(JEBTZyS0-rliKJVyV^GV0}V(hRKts@rB&hZ9`9&2GaSg
zdw3N0cXc<JiYYV@VJ~aAb8;iPD|!vHqERyEWPzI~t%C*XB{(poO+i5>_$pIXl>gK|
z5d2^7^k-fG?_OO>>(IEsJuV!;-K+ce^{MOv(OfgBOQH#I#yC{RH{wMt)#<3kP|SJE
zSrB^T(TPgdk{UE(HCpphM6#kYDIr03CR;=BomOpGjYLv01|5jU8!K4Om0I$AjaUI{
z$4YBz29)IM#z;M-32j*37F4pqPwU{LX`hqTjq6xpqH;nM5L}B-E4b&Z6Q#!cR{9Wc
z=<&&6x=S|VbRt}qhc_mIvI&BeBl^uu;|E0EKa5aYaaA(#TP__@1f@Y}3NPcWH_?XN
zvl8_4d^o}JZj~>OsJGLbltG@+J9-Uitani~qZ^YY#sj0i&QTmH^+Zt%Z)Hx2UA7gJ
zLm7I>!*4>fQeu_i1yQp#4O84rOBvX$2le$XqTvulaANFQSc+jNR_7Br102zyCC`DP
zahT|KkCrGxl@20DV7M{O>%ccJA9RIue2Izc9~DG&!~3kL4H~M|z(9fIsJ7OVPEdR)
zRJdi5H3}H~i<m+SFz#6_ae_z8DJzW~+2QW2>`+o!fzGf@IwJBjZ7wTtvS1O&YdIy4
z(iJ4=YC^wQEs+W&x8au86lqL-(B(##lFCY(0(OPnwosg^Ne@N|3qCdkyOg%ECJYdC
zb;1EIiOe1jV)$!^B-D5*W*+|L$}w-S{BU|i!Xc)nzyRT>JL)iv*n!f0oc{s5<3Lc_
zCM*VF^iQyjFlpNw9qg+E4K+PcM8JjPb%V<h&&Wbk@1n!jG()DZ-@OZ@(MFF-X($8;
zxrzLgUK?TMYGeG0CQkx9W`F235Q`+JW;E1KkT9FCB69={>xO7@2(#oPr6;Sb!E9Y_
z!BM*MtMMzcDC#hu`lvGuF<_J$>v1>EJ&f2HEw&jX9V-BUC3>@OqX0sTmC)oV!az(d
zsL*3L0I^GWohmJwDyKeQwq<$t^gtpFuwJ8YEp1SJy|NKCVJUPHB!pdUAe$j64*k?Q
zz<+`H#*PRWN>n`G5{J`H8+TMQZNjE?E5h-rrH2gOY4fhdj(L~fiW41cNE!+g`FNWE
zcI}qYu<gbQ&Uu4Tf<xKI#}DqXukYWx`}rq>wy|;pPN5x$YBmH#S8{d2w~kTV6J5gc
zFrCi;9x&=w--!vE;5{Hl>Rh!2rm4o)2!=j-dC2e(PTYYp^^MNKoI~fPYK9rOb8ynx
zw%@(emqkkAs4P2^Jk}dqaV;MQXX8SQZH9z@@!+dtWSpH%eBDIW?E#;~deEqkT`~D4
z8Ilktd_ku`$kI*cQt0<$i|Iy%Os}&XfXGd^V$<*qX{Xi#T?l-|!TRuKxB3MqNOIla
zYjaZbiDu`zKz!354T&6w@e<C>&T@5%y~jq#C~$~T-~<prJuqaYx1wPdP!u8Wh^wcc
zp<0x_)dI`BM8|T~tY~_r>mp5h@FWNaLmFQvlL;QH5|H>ph++U>eYL}Ad-~b=z1R27
M(Z~4uyL&hO1MrhF&Hw-a

literal 0
HcmV?d00001

diff --git a/logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630132.0826fd70f652.869.2 b/logs/logs_successful_run_2/run-2024-08-25--23-53-08/events.out.tfevents.1724630132.0826fd70f652.869.2
new file mode 100644
index 0000000000000000000000000000000000000000..6b9adbf3fc6f38a81768f98e594fa1eae681dc1e
GIT binary patch
literal 4111
zcmai1&x;*J5LOAAEQmkwqF}NNo+R((?b{?IK?H*#i-Hj~ryxzgnVxy=_H>WkJ#Y6V
z<e%^)h-dGDNB;~#{|E(dp2V-ZXL@EfZ(|M{UR8B<b=6m2)f~ML|NZ;j(T|skx88pD
z@1Otp=$%(rmP<Xqw-MUkS$;9!yXu8@#;y#v#oV=Z&Fyx6Vx<>$F35_NQVA9%vtRXJ
zKfM<H9=-S5FPA>PAN{`n!<D`7uk5|SZRM_caq3y;>h3?6FTV2Tg*RWi_~P9^FF)Da
z-_K0zo7S^J+U(ALHos<tYn6A`Y^!I>#q#=Wacj1`F`F$9XEzS<BfA<-ZG;U$u*vz7
zD<^_MVVI}4!RbUOHe5mGZJX>GD+-zWi6xma2M=CUTMEp1(YSbbXD^Lr$~ec2R%9|<
z(+JB7v5~omF|ZZ)`I<TTSitV!W(aU(6fQf@rfS+<UUaKe@<TSyI!Pv#<%Oi$@jP$q
zR&h_U#~0n%Uj{ef_)dLt6HE%BIK6fePAS2ymbzk;D3Z<>i^V}uod{X2QCE>i`*|nl
z1+UpkczzJ<ZwKQfTt?yTkr;WG%1|K>1nM8`Yktb;RZJUOlV%>{(QVx&OEe~5JE#sj
z6&>lHe0|x4Z<)*2qG%N==C=(!AyZQ0Rk74+Y;b%+c!tT7r}2f{)@?&imj=?synA>Q
z_jh$SnTjbi5MeKCxN~wNx+{7Ov!YQl=VXDKD6NA9>LoZZq)kCVCip5-Rh0kKJ`nt0
z@APNRfp@PirFCdr;2swa;O^D^`}$ONfoQIo)FsgbIAa{D;~Q~aOLaPGF%)wVa~6c&
zcyywYwWJ1(SdG@a6p^gxOiD=5oypb^e5X}gRwI#Aj6nzD@x}_4bEQ^1KSrzowPU3<
zH3LfW<Hkrmr3r0V-WF7{!B6YpqiLU!)s5>|VWM(E6cAjGPb;|RtP`ci`$qZ@Z|L#K
zVY*8;;&dWhl?OK`g0cyMlq34hOydVcJ~)U_TXR)1@LMh&Q3RzyXbLamtvAtz+_Mt&
z^K3Z5@otqb52?4)o0LJG(L4GW(pc}JXht_CON<9beVwB?RO*SM7T(I761!?ED2Foi
zl1E>MW~Ia`!waHjYZ|7wo0c-LTMz0RT|~nnir~c9V__+Vp;(<y<P2~`gO)r8ipF80
z+dW#M2vwRzj=*qZn%9AEo<Hac>G%p0*MD0O(GBmjqBdx#Rs#bClB3#MPdY*Il~Cc9
zN!BP}@GoKtEx@>EvBU`;F{i9Fc4UXUv$8`;Wd%CJGU<rO&$PL$z{!F|ARo&qd6cdo
zL01#{#cGLEAh`{<yrxKF>Vqyfx|CE_+7z%W?6!sCR84v?N?7o*A=ss~jWuC_psN!O
za7kqLa1g^^J0zjTOEL5CH&>2%gXM?QBN7fVH3bF;N8M3}X~Yhc?&JIq;2j5o(l%i+
z2%~?3ZG=hN*63hg9cZZOi6R0n9IqQ(j(A2EntB%<c1$y5`ug3wKpJiIsFa36fRLNW
zPwBN0R<1V2uW9lmz+?7@P6M$>f@(%X{R9cK`8qO3(6DZZCWkOfK2mzJ${Nhp)fOD3
zE59DUB8#F9^Qn(I!w>^Tsj(h+<J_Z&ozY^OLDI1T09c|o`!)(7#8?SUo+1py)Pf2<
zh6510g4e0iqN#G~^Hp1xXHO3#(g5o<`qt70)z>Q<Q4^LzCqY8k)dsQ|lH$-$odf(A
zm~ZTefT2Xi^DS{W?X+=6HPa?+TDK+~uUmS^;GH(_TI`s2>8&`?v4*6fFp-b931HW5
z84cTRtl*qC7$rEAeQ@;fF8k`ir}sYlaL_hZZonzD15wR}py*1jPWaX_ihH6<SRSVH
z8NdTZ-Re6rVH3Ou#7Ld1w!k#i_!_~`M=uW<9>R$`Fs8oIIhb?k+*Hjl19uKiI@|WU
zclxqONgS1BXOhQyV=J!Z<KS#uh_TI(@XsH9d4!C!vx%>p$htk?vse!r^|31^-y}m4
z!h|pA6bM<m>0Ao^UTiVlsF3M(mIDyE=~iqSz9H?@TA&MouQ*sA-0D`p-~>so8+>g}
zYCh5ITo;IM`lBI{<1n7Xx!GB+PO<mc2pI(qF$$aj0;mUutn^ki%mRuc<Q;MK^fOe8
z(zjY*nV0BTu9`JXuXJ6cNe`X`;b2JP>tr&)V^snY&xI%k0M=JKjJBtrT-<wk?*e^{
KpT2(o_x}Lxm@=pU

literal 0
HcmV?d00001

diff --git a/logs/logs_successful_run_2/run-2024-08-26--00-02-24/events.out.tfevents.1724630555.0826fd70f652.869.3 b/logs/logs_successful_run_2/run-2024-08-26--00-02-24/events.out.tfevents.1724630555.0826fd70f652.869.3
new file mode 100644
index 0000000000000000000000000000000000000000..8ca1d6ad846d489b7b578fb916e39aeabbb94a96
GIT binary patch
literal 88
zcmeZZfPjCKJmzwKUHPf)<EERAQoKn;iJ5tNu4SotC00g3dR#gssd>fuMM?RIMJam4
hrMbC@MU{HxMVTe3MS7_qRq<(=IjQjwk^7x1VgaP+B7pz^

literal 0
HcmV?d00001

diff --git a/logs/logs_successful_run_2/run-2024-08-26--00-03-58/events.out.tfevents.1724630643.0826fd70f652.6853.0 b/logs/logs_successful_run_2/run-2024-08-26--00-03-58/events.out.tfevents.1724630643.0826fd70f652.6853.0
new file mode 100644
index 0000000000000000000000000000000000000000..52264f3345f36505026169c61baa8d7eee1185fe
GIT binary patch
literal 65847
zcma)lcU%+Q)^!v_2#OU%lp-Q1iXtdr$sDkvsMxS8MhF_E89)$w7u&V>f(3i;4I4i8
z-g{Rp*t=N1nUKuhJ2Si={&DZ`NoJjW*4cG2lO$M?_wVn_;3ZbAv_}_z+<rUAp?Dvi
zS{qxl=^#yPVtt?XHI+3IHL(eCx;}AgU1ZIKq?j1BZg9<Hoi<UUtEmyL3Xjr8Yr^wh
zS>E{P(u@*$>vpwlKgp`+g1mJ-7dR{DIx9=5b$t_RC2CR<!wpL{`sQR=w6uBY!j^>^
zy>S|%ba8QwOG=DSN(_(G>Rju)xYn$t(<CHCCnnU=CB=IAc>C1x@(%Fw@$>TX_V)7j
z^{N}-T0@T-q|xa&5l-+?>gWWG{({K3@I-+dzbJW8Ge{lHzm}*=60eKbX(F`|iR4P*
zvhW0cg+xu?!NSH85;gG&dA1GLYvW?WqvH}1!sBA2g-gIpyhaxusTrh=(B$nQypK9D
zqF;D|HdVu47oguCx<+p+pG*42C&{f5n-m=_E+bqcT|>O2uTCAQ6+EYoh)9Y_idH8I
zJI~`&-^EA2j2<rgzPOD3l1NRoTHxxfM@4JYx>#*&-*DlWgjZ{NdwbW`zm=@f_U*@e
zRTGhSeNDsb>PU4=cppuoy0-rMdismS7k%<xtcPCaPhA@wC%Cb;h7Se(^)c#{aDi2L
zY@9Aec&BFGeqxhi42PwOkBjJ+z`x~dcuRm7DF@UsY+o0i5YbN)nH0_YD{*kVK*2Ry
z8_ToGb96MmU_i1E&HN=phUGC->td4P1?t*3{#Eaq2I6@y8*LdZ#q|x3=F>-rJlA-2
zLV|XX#$dwuPPk4JAFYj03s>-#gkyy(cz<csd<4bDMGBh~gEu;^Z(rg3#pK~%`2S^&
z5tC1F8{dx7>Js?0<)fZA2%&9!+sMyHB&gp=tU5Ma8yh7Q3BF?BttO}kY4S@b!+S!4
z@y_(lTT=gGVq7Ahhtb*?t&pC1v6^?aI!cpQssxO9Ao}-AP75|~csF`*cnp7#f-~Sf
zzU(B%=``UH>geb`>WBgSUiqL-2#?am#f0-w5-}h?P8&;$L~LAmL{g;SGFT@mfnOs&
zN%C4ue1f4Skhc;vnn?bII(gJ0)rsnG!%Je~_17lq_p8rw0v2p(<25Pd#XeEB{m2VZ
z@%k6UGZOwgJT5*FA~LjgUa0j`M@NP8)h$AsAe0E<7&NiM_FUCTiE(+?2+M~@@#ROl
z9sR=cZzV?g3b`$EijL#Yna`cr0sO0Z{*H`{M;65^`Q{KV<aGq^9KqG(jTlX$P8%T{
zY@ejQ{F~qg<15{J>XTJC*yuQQq)?c3aq;2N>ICXaeRVSibzQ@b03_@nIZih~qZ9T3
z&N8gYpBW#He1b-)Blsx7+vKx7kxz+8y_dZ5Y6Rcf!eiAj`g-$!x4B4dg1QgiZxaVb
z3WW~%=`=BMd_pC~@-Yy}A5L^qOf1QSXmuY=wB8CiHDSYi{pI(hPl0fCVtAg5#qh{G
z)9|P`9lDOU?HXl>lD;|(pBcJvZGuq9P10Q3m#@j1@Vr1CpryqlDj`yM!Qd0&zrtg*
z{88|S8?V->V}zaN9lid&h`87&t>9K+PT(g6`D^(%Qld!?=DiZGUt^FEcH*(&ck~T}
z&uTFacq4h0T@U12e{2GO2r)cUzUT4(8xfrlo*2iEK5Bu47>9h<)bI^N&HJ7A1Kn`~
zU(yrvy1zbn@kV&p#_Pudedi~!&2LkfwGhzg9sV65>4g%ZkBbC#OgwL#-w(PXR>)mG
z!v+XFgtC{P1;o}SkP?p}Qo|SI_`Lcgvds&tgqXOv#C}5O5d)s@{rXJf3yW3=Rac>@
zC-VoUi|ChEIrQ&@8=9!NAb**-hIl-#eRXksZ|yUfuTimm6Z>Hnu6ZvRVqVN6!-e`i
z@U1jXm&n`ZaozJ`2aUC^{40j4zz1LypPR-s(gz{mR`f|O?oeNtlK4`X$TtT*V*Bu`
z(l>Q{Qbh=P-X|$4$`lPnB%Y7;7(Q=x!USdXmn$kw{7jgj<%<wMt3k1WLrLKC$*=(b
zfBMRY7ZL&^TGLk@F<7YK!bF=lp~lf9j897FC&Y2TB!LL5F3jCA8kv44a^(?*s-YE{
z0(l)z6MXFI28RpH*3edXTw)yGN_dcK!{D$+;T=OmnzrHK-FTgL5MNV-8A$LMY)UYx
zj!sq&P6!wFP2Lk@8M?Dc!1G~b7=6hl;UwPm5u=Tbh)#;+yJ;-1Mfj%A=Q5lyY$tC9
z=DU7eYy@99A#+7<>U*GU;}oz-#@Soc72+Wxx5yqcem_pvR~@TO)h~t}fv<rBZyVMj
zm_PAw(}RzR&#St|fOpOFhtTTtvVuqwI0Su%j}%aRLDnYFzNF9h$iaMf6C5eB)2aD@
zh=?<c*h2HwAG?rRe7Vr@8jioZc7S2@(=Wmwa!fq0HHpv3JF??~P;ZP-{Xybw{2w*f
z)wEWNdpE|0{zUmW<Xb_4hK~Rt2jEnUR|(BbA4Pc~pCHs~Dnes5QHe>h8h)79@)@g+
zj_)UQFVS3HnM~yCBwuj&Ce5o8(!uk*qL0LULzHg-{P?Pa);6rArP4xaE=ckDuGK!e
z82%Vg?d`g@RtaNntQP(lP_nFN8D%M@QZJip3Ch;ia^qsvqU}YBR54ut`k>!uE9H3c
zdV8gvvP9nX`gSVlG25h=E~*EXKYXhhK>q*26;}HJR8i@mEMqW&1*Glt{U`mce~fdg
zWB?W#lJm`~$pq0_S!HRZbskW!?`%~UKdUmlg5J9r0D3y_&JW(Ir&CozC$ZUVRlu@Z
zg9x4Ui{kaNbbi&+sJye)UkDx5B@y(0&}klUe|n4ggw9<6%Fs#RewcUN?rfmbE~JUQ
zRjJB02lRA&JWDG}Dy;;^=Q-!o+v%ze<4V@@He5dE>!on3`c=fsD=RB09rTwA!d@rz
zg+9@GWfP&}W777*D04Qx(yLyc2K?r!Grn3~0lcHKg0ie0A61(oqwDSE#yG9dsck?m
z88haUm8-jG*-h!Hte{6CF@a5=KUa)hd(_yfreT5q&UXG~H2@Z<q;yu=>lfg~2rtN}
z`Dsrla{fOCSrhbjIkZSTik~j|sgxi6ZG(3fW);8g&aG;&{V(3GrPX&pS5TH$mNwEx
zxa8!U+|qM@N7XXmVtP8ZwQA@glIF(>WqA|Q2p)U)1=qaAlwUOr@GqOsHMJT6aA&2X
z(hgHbScT#YrwLuvmw5UZU}Zcl%$8bSV)cnm(%JTUZN(J!=7))=&8v!AC!QwAA1VLi
zhJEeUoOt?^w}_PUG+7|?RdMG3aA`d8RcdW<0g0yxTA>K!DqZ-V>1Q~!(9TodTb%-Q
zHKm8rMSp0br}_F2n-HVr|HLBv89=9t)7e&!zQA?rG`j;izLGxT9^m-+K$pR{xMRa2
z8-4NfH|!$&<(aNl9RcsDtg5VJ;(3{6w!FN^ExI0fu#RDwUcTjeSj~fFJd{<GPB;Q&
z)@afsk~Nyq>=uclkFW+rk$52LXstSdAB=40rTVawo!6FiH`q@IeX6jE^c9bxyt16K
zq~1P%3?izOjat=hVx!+)22{hKNx4?n0p+4}QkF9zFM`9u!q|YqOMa2SQ`HeoLf{$l
zTtwE||FYri;mvZZ8_bTckQHKes=kPHQdU&j>dlHo`>10h1)iGUer(+p=ToH0_i7+Q
z^;O=;(+CQ=wwW8z+U+|z*o^>!gGD9HU~uUzD^|Spy+5QwSTz(4>N|wqps|`6fC|M(
zE-ZJ3f|Qh@0EChvl8jXA_&-fWXks-9wg;|tU}o=&JtB7fRWf#Yhhkt-eMeSjZP%kD
zA+`b#5(3C#eD3(VY``wdBoY(OoM;$g!ax{<H>_8(mB#|lkc!X`z)%rH()?_}PntaC
z1l#)4gW2BrOQT5j-Uw(?y%Em+_X5?q3ajZ+uWBN08IBq#BRKCV<`};!j)X_krXn1|
z!@!teT~wBLHufT3J_cAYEfS82(`z?<@jek(j!mqYafW!gZZpv|L>KR63=}darTD5L
z#LG(nDCcEUynUOL(!|S(=AvPVmrcRB7tNZG{A~<il)p(4w7su)v|{_bE(Z|8;{XbT
z)jSln$^V9%u~!SmBoLb?0Z46<Ml-5TF{W+3b|L9=MT14I(B}l3GKJYj_nUjCofhd~
zs8<IbZ);{%q^-DsgVJ7EDz9Gg3y5XzZ!Z_-I9gSL)Z}phgPJVZH5RLRIy>gHZ|rqa
zR&M|b%Bq0E$=CQ{2#Y<xzCSrz=XT;Y>Di(n-oN%IH*A8(2x2k{fYc<8gekKsu@>*2
z6{&1EnXSu`8(VDzq@A*?vN);awr;nbm}6>-v5p2v?^+KlTfGNJNu`alsG)-EFK*v&
zQL3Ba;zrd+wzjI%UZhk`X{)r+Uo2*J9;W7%Eo3xL28@0**JJY1Sq}&=Qum6J!N~1<
zrG2dS1E7?$gwisPL{fz90zX%lR+N=;HC&xiO7+L;To-Xmw#qWfVtLCa#R~e@wrjba
zLBqR_SVeNJQCATGxt8}AKP*FSE8jgGw7cPpL=yMY01R=j{{uHtuspOyU}$a!8*&gg
z04mWa<JI%IBd-HjxEQGEC09zy-@|&1(oiC~=KY(l+BS3l2GWer4HmDLm0W+u4pcQQ
z5tB=zw0Eiq(nl$ugxc6z<QAlD&e=HuMQU|7BXqU^P(mm6?r!c%=aI_@9ginpPwVNl
zmCzZuak6Sh|D7WUoru@s^|ExP6(6RWWBD|l(E0X81m#PI7sLS(iO-G^I%{%7po9)D
zh`9_)ZWp0*^x>zgdODF3Iu9RBR@Ezc^9P~h^H{uImd+VxovPS>VLu3+M*zx~&fR9`
z1Kw4w5JBiPd?H$t&>63`XM>_9RwaUXu+!?>20A8!_?_ebR35H2-$=u&<04)T4Nq*e
zL=b1aP%w*w1MZU=cLwlK<M>WXkiNbpnDSMVouqv@R1z&i`w(joU0_Ph!tDFy)mkzX
zMZf|u6d6ZIET?r9omoJ=wdDzVjT~Aej@st+Ss%Hu;$w@Dk?J*|$w-B8Hzu6melLqU
zP39UeSCKT#H9#7{|K7XERrVe8iHuZn0GEtZ2&+&W;=1p#4<Vl34p<palXl-t{h7OV
z;zw!X>92q%o+il5@r9XFnNziir}ub@q~$zK7Rd6>;l62Wjv~J5SzTN};%S0@^0*YM
z<uECi2;$9vE|?(ZYd3T0U%VALzOS$f93LO(GQ-a6F~_(P{Vm=TL0rFv$f1COcxuOU
zoXg<b8X|}<!ZHOB#0wYf<0_T7u!}@dD=%>?5Jh6>i^ErTU?bN2<84baeC+}(3}5^)
zh^V9NGxxarmkXp-+tm~;LaT<b7r_Vi*JtOSK5tC|uLpo3@C<n_BFpvM$xVIG)|)i>
zV|7Hc(Bws;#&PlI+tMs_`JCCL%D1g6LS+SUBKyw$cVl;1a<Gd51P2Q=jRq6%9N})9
zYWA9T2*ARWX{=@jL7e4XgZW;LjUgq)%}?AZloXL9o^HFTJFs`}&Ws~=`vDy6@(#to
z%pNYRP~5A%Bq25e5E25&V!Uo)EoS?3rY(tycYueOFc8Mzdeh3VJ9pdcBo)EiUnC6`
zK_ra^^Ior8uz>m}Vo3E)05qxI2v@VaANOVdzmnvrm#ajpaMVB<!KXb9U`sYlO(fyr
z%876Y4+CR_RVXg0tXBGNA*SO13#LWFSWwvcd9X=cr+*?|-UV>tWenUqEuE|NI5mNI
z`3C^yyo~WZ6Wg(1O+-K9<pxaTEb%f1zgXIgOD|_ujO6by0HgdBlpb#H-M9Yat{td0
zkq|xwP#|m~h_9sQa9x-EYfNlf2Z|e}Hc6v7GVwK+I{R}g(&t(O6#85~f;epBEiSm?
ziQYsIS8gWS&PNazy2DtR&^l(MCeH#G)MPA(7msseC--Uxkh1y!P*7G)1@WZGZJ8!`
zTX%A{?#;z*(z8WDyz#yh`_w6AJTW-{fYc<8g#XoWVO!!qnGr#}8IUr9_<zqGnWy>Z
z>qHQL1_%+v!o?xx6`YA6_74^*$q8Zs^YY21>tr-f1&n?)$AVY@Ow(0)Km>6n0H`1y
zHP)TAby<}~1aV9oaZ6G`+*`9dC~L<sEy=Z~fPh?+5yV$ppA51)oqL|dy?3Z+4&q)S
zh);$z56m68`6M}r6oATV6wS<7A0oM)?@NBJ*C;6x$yM#`R8{2lHf=~Vesw^+URH8F
zI66+{wWsVR5~cGGiXeTI$_ZjYLAp)6mq6$^XNo`xo!Gm6Y{WUW7Xbt=O@6PZGo_G(
zPe|ACs*Nwdt4YG%Sq1=ELTwtQs#*{7{6?}TCLK`uGHP1$SisH1Py0zIWg(=5l5i7Z
zQN`RX#6zyXTmRBinp?>9zUt#1Q&nFVy_iBupYvb19{PmXpGdtHZnssi-k(CkZ!=l|
zYJ;heR5?^;-;UNx_u)r$;h$UiPB~UC|G+lXU1-b1c!>=~nPbSFEYe)3p%q}9Fb-e1
z*PI;KigoX&J3!!nWZ@!tB+NR5TChpJ-xEoo_E)qK&zg)Q2<WhT5!dQcu~nouUqe6`
z3V=Am``v!V4Yuw3n$!!f5EzyWBnYffM03|t`yL`5UyHyp9w*&W@OXo7J1vOEOBM#}
z#Nz~-Ht{9rS^D4`;_;~B0G0DNS)q@I1yjX_RwgcUwSg5R9w%@wuk&2La%ImDF?R&P
z3#KWb?m5eq@i=>soS}aS*ae)Syr|q4X~`NqYpM&#i+sh(!NWRRtwReGP*pa4dxlHk
z)_1RKSm(xnJzrQ=C<z-Wh^$m7+On($79U7BO+kx5IEiT>O0|0#9DAoq;K=Cn4}oEb
z!NT%Vnc{4-h4WmZ1L8`-#-Zmy3W!D<{dHg-v6H8e5PX2p5Q2uh7oj~93$axlGmDd1
ze!T-2hJGoMHA=nsdG%T875{T1<-bk^z{-ltepeN&{S`%da=hCR3XT`38cnWVT8!Pf
z>RN{Mj$RePq`r5cVMV-ea(DMz?(Cffr$~``jF3=dM50)!y*}NA4g8w(lvoaRl(TFi
zFxPde#!}~udrq<=6CoixfG{@wcX}Y()<8X*1c$d1n1<kxk(j!Q<yd;p6ngh_DMCYu
z5Q(E%cjvKYY|GX$A4yera|Yw2sv}^BYiGG}_rp1I;0rwf2nP<d5&qnyLhMLVSQ?2F
z*Q$VrIFXZ?Yq{fD6WS2t^AH$}i=^?Kp4KUV^^F}omw4N{8W@K-<GqcM?hi_^&C%ui
z5^oPdNI7rg1uQ!@XEmz7m_WS!8Z994HbxJ1c+6!!8T5qYa~Dt8ILc>Jtr;CumOZS$
z*piSwfv`Z@L~QQ8XT`j4)xJ%v`c%))D(OLY2b5vY0-N3?O>hOmLKDnKZhE!4%T<in
z#>k&RzW9RmeDvm|zADzeYP(IOR*$L!IH=WFa1O5O%F5Jh6HBV?4+Mj1YpOU)9&XA4
zhWyo#6CO|(EYcH3*|65F8e4hP{yH)G5Fx2i91=d&sxjq%PPd3AZ0iR`WkhG6q$(`6
z#Fwo^;vGg9qB@1E6&pG<Bf6%RKUk2HodRg#be|Grm|u!O65S~vp6|Kwirf;i4gfP$
zc<%A?V%2OmxDc(~DG)A~Do@`X`-4gz%zaC8FB?H1_hh7Jv)#vno(y<Znq<M*ATS8I
zC()jjwl)f!v3tucavJp-0ahYyTFi`O%eJmxOVrx1ps0U(X_GEhYjGdOsj`Qc^Ca!M
zTVnvos<kC$&#3x+|4kp``iW5bkd@P)zm^{iSb53anot_q1nfyDsV*k4=6}i-CX}qt
zZnXL&Jfgf)N~wy)DAh{mVFw5$iwgjdrIg%!lB!bL<a#6v=Oa|Ul(yLJ3D{9{!8JnZ
z142qD&2&p+XW|&u)Hm$^E2F1$N=oTozmckSk(KWeO4A$wAWP|4ukor$$qlmzCC>_g
z%9m1P+O~ksNoyFPv;!d}l=#O0*ri=N)Gmf0TmKlqvw!L3jg(Ao&E6_JO%>9>!Gm<f
z4TYAA*F#4Xdohu<i_EOpuVZGrN$qP>7#4uq$2VgFogQV&($ZJtlIF4=L7}<85JFaX
z`^b{{ta);aD6B7N1sI>?HMJXCkv?@Yfj2ONy#rh%kJPE~oOV`v-LGW&8j0X!`a-~}
z$qHuns80(r0G&WU7=VB{!q4yUfjhfl`d~7BDa~QulIaV96$&NuUplWk@%S7Bmhm{5
z38p@@VQp3~Doi|XTLi2Vj}xd{>vfz%rG7EQ;~k0vRL<jMg}e3+Y)0b&Ux>>zHn0M?
z%;0eX*J`y~tk-~#L{sM=c)>KaLUDvEGa}cLoMAu-*ae&+AM`ReYVB$&SZSSmW1^`?
zq6G@5saFj@%FR6zsv?^D6k4Ysn)<<g6&q6~fj%|rTN2nnIEiT>YShC$TQa-iHM^2I
zZ3F_toW>u82>WAU&pw4d^(4*wKLmqj4k;j_Uv%eKpz97_5`tExV8;-GhA0rB&C31Z
zf<3wpBXdAXIe@|(Ad)o--0A(Ave>&1vq<^>g|M=kdicZdTu`yT*GRTDvj>ZCyg=1x
za%pxw#@_q>kMxcy2n<81vBDW2e$L*<Ir?e)ks@;$A)&~KM6s|fTDmps+i<iqv20r&
zwhfkf$71C2|0=PKs}D^g+0g|dAv=IDUf^?Qdsa5wWey3B#b^Nt4g+b7J~^-kyO|MU
zM@qy4goY9!5=X<lcAOXU`?~Bfsp>8cz?)Qc1l+XZ1XrSJtIFiSwUqz}2M)9mzWTaN
zT<FG8`$(KTKzN7~18W3UC|+{&E+79&j8}J+GcJ<GIz)Ij|3pa3$HdzM5uSJ(BjZO|
zv*c3QBZ;?nA*7tQ@dBk91+#VYu7(hA|3nK&yp7RoOWol9j1HJa^0`4}Add3cR8tSA
zW5YB7x0Vvp!x0upn`r7u_lvPY{nr*HR+Vn~StUKl@?b@_v1Q}sqzSe|SZIRzXllDZ
z$GIJ2PE{tFdaNf{&qq@)zuBC{EuCjWYV}WqgIbL>bzNsq_HXy!WKwNgRtIxXZA~@x
z<6>>u<15bwk`tbdp!9@MK$q)Tg$>9$^P3obfRNNE4vCsqJlNA_H+K_F?NI~vE2F8M
z+f-&<UyklgG<7_}5KS#yy?uLcOQNX{qpRgKwE+5&Iqw`9=54%STl!%h{dv?@kGMa!
zG||*u5rS&!z>8kY?0BazqNz9ef&r<f)(t)obUnd&56QiXbpQgnC!?ujvX2It75dtm
zWPui;A@?Mjdfc0!KsS5$isUr5A*`&l390?jn+VX3JBwG-OPhOAwRZbLvdZn!^KPVF
z@7WCiS+%w+aFEJQvA~~%ta=Zi^dT#!smuS_5a9l^+7v?RB|=Ilr7m5=N-i~fNhl5V
zDeSAK^g&8#nnk>7e1dlfq12%c0Awlsa9E%^*`%2}p|qebpz@`3K7MsT#@@NZ2&FFw
zDWSwaxXsf4#5^Se)Tz=1m7dZs$xYq=k|wJXqTkw+_R*{|To3I-Y+OVG?mT9}MmkPh
zMheAWv;Y(eQvv$RvN%(Z$tX!GaW6Np4V4%=HZh)kxhOl+OkJML8r#tdFl!jcGkjBb
z!}`)J)w?8p)~iqz*Z{yq@<__n`fAShMz`5cMwPY*PDT|3{I#nn+i2syk4y_o5KwMZ
zLHO^BK5=taJKiFr$}5DIj4BAMP&hERxkq;skN0&4(lQ<=4YX+Kx7_e=DYJ;jH=zZH
z#|boY^Gz=M!-@pr@h0A2Th8NTg@+FwaHChXEJi$j9<2Z_GkBc9KYqBx)oyz=k_gc9
zwP6DV6QEfyj&Ke4XuZi9_D0LV8OjULBF8LQ*{+R_5&@cl7AT+q9scMf_wHiG2_isW
zp>+x(K+`66VSR2_R+4b?@_|Eva1zr%6m-?2+pvt3X(}?*3`AfUYOnwe{aKj}9kb;c
z>3rJ}3_2g=zG$@M??Bcnp4(1B@Ebxy2pXb5gsz-y!;*gQaUf0esS3zK(-g@X)!>OZ
ze^$6i<Km?J&*K0qD?m$RTe0$MTh1WI`x2qxc!8?XWYf>)Y<>N8r%3PcVPFy_IAet~
z2+%JLOR#2f@%KoPnSqc{WJID^$Az8<WdnZSIY%ttL3priB0$?!sm#7E9yFe0hjSq8
z8L|TiW7DjsGi#DLss{;<NVEV1hk-Ok>neJ%3T@Y4CM9AuLPLoViKEH)UHed$apFZd
zsp>fhPO3TrR_}V6iwUgxg&eqVbD#|e4zv;8*VmVwSaR|&iIcSm4{>5(jlc@U6t3ir
z6fH6S8G*sLNE#3AKEoTZSNATs5pUNEhRqXiV`O02(yaChn}5XH!w^!=+js&0_Z?V=
zSHm2Mw~wF&B;Lm8vCkiHLr>&RBKcgn1#BGUv#9_*>tn$dS7`o*kZy{wK-xrrZcDIW
zP3L@lMy!rUNNSbzpnKn+b4j-z?k7#~6v9Fi%twIUn|_EpU(%-}5uiQW0_S`L=z;4U
znId;}HmTK@5e{lK7N8z=-I>#>{#!`3b!-RbpxT-W&@RO~u-|ph*^v{DKu~(ZD4;KA
z1h93FvilRGYY>te#UU}Ol@}ZM+*wToXbytP2+(iYm04WtZec`#4sH(|i2xO@w)VDm
zB?2@HT`ea-1(46S`oqaEZ`}b*NCc>WuxniGHW8p55P}NO+HJhqu;TaY69L+|D;SUp
z(Bs?o1w~Yz^Ni%)ZUlkclM$f*JC+r6sYIJpk_8ssz#wFSM1Zb6(Kyh(?S?MoG}<Ao
zMB2O@e2Be?w*F4k+I-umjr7uHY+=bGxGq5hRbl(qt|sj|xEugv)!P2m6I7$`j-&$g
z8bawqR!)G9ewH4PvGXduPpz>BdlE{0>`$=Or7I;7O7*UPYo(_&UrH&s?NF6-v-4jF
zr5E=AAWLb|xv?t0B27M!ENt)qQ2A2w@3JUhTb;A1#7id-QbH*#>^f84ogYUi)rl+7
zSx;%Bl+w1t(W+Y(q2CFmjqw1GrF7^@nks8S0DW4d@jyW3OG)@FbiGHe?Fgm$2q~e|
zV&*gUpn>-hqN&a5f9PSPWO7sYVT;MCsfR{fAsumiL%1F~qS%XxtmSVWv$j*uYf0@3
zRKWsJ`%E?ULtiWQU{~rE(p-)sC^Q!sLWuFnvkS2icFiV`@hOl4<1jwyWWI!|;>l9X
z;iK6!0^cqR7s(@adbsCn&TsdShh+M4Wni65UkK<o`zBXveZ)C308K|g7=VB{!mm7j
zk;`n*WF48liUfjT$@GQ53dJL?XuqV&#N(+5EaP!96U5JRU>76m&mkUvj}{;vC(z>a
z&Dfv!l@Ae*&us-8m-9GTq2mZ=mezkrJ>s$ptziX;#|c~@AG@;rKccC7w*h#;G_{S}
z9d6WBcQ0~=x6v|ihVq))Va;dm#`BKlh^B5G3i~LaroPhi1b6T9jZ~tkFQIh`qNyL;
zcVyWfe^-)lYT6by1K}j5fv8b~R)n#qj(z_}=CnNs409UR)Dfp^u{wLMEFjIiN;|L#
z%^XrdG+HgABg^fZwUdP4e1wJ&G~~SqO}uQ&YQ-sPkvX7qcQ6cdfJoLTaIep*&9>iL
z(2$gW>mGoW)zmAiD%s85h31jtorh3xyg=1xa`KiRT-Y;fdQ-P@PcR8Xsj<QtH1(dC
z3aoQ>UnMCr>k$%)j7Stq)62^4Z0*<<V@V^f)l1H@iKf2UtvZ|2zSKIB9orESvI7WX
z)Bm+;#aitxwV4D*-Ec4s!C@ec(ZXkO@1;HtAthovLPLoViKAiuR>fdeyI8P>RCT}J
zV4PHS1k5>pfvaO@X-f{AX#ofa4zv+ID$0TlIHI68b)EVH9^%Bn8i5sxwVWl}Ta_5U
zi@;!9B#m`Q=UWZgQ@bDw;_Yq&z&OMi?`@2%_tJ{(Dq5oq@%9CTl=C)Tpw;0-wteRg
z74bF~4W=dD#%SUBnnEM|f=NDaLui!GrkZ+hss;NKY8y^SJI8=!AZ?<li=O_(U7Y)^
zBe9x>kkl&aLG$KTWZ%pzu8}7A8)2ad=A)@?P9Nnw_d6CMn)*)?>^mP#J?d#Ywxn^;
zB~q(*4+0$2YOJXp7u00|y3X`tDJ&UaP;E^$_2Xi#*{bV3UXl~ugP`<;Q9$3|s<1j$
zw%jEpzH&-_MsY}t9bcUd&?##WO+6JsWi<8G|9P<cA&0a?Q+o~ub3{`MS5LHwu17TW
zMs&5DrWQcITJ{)6hWTo#U_w94<D0qy;z;2#4Tz@xgb-9y+h=*RZ~mPc5KV111`J3w
z^#{9yK}}WK$t3soA_(N3jHX`E^k`81>Xtf^1*)-N5VAm`sW&$d3hcG?P!KtdeF!Tn
zZNkmm=ofxPoV*gRmo`VFYR!4v09C-#zjH~uK70lMvTE(k-$^R-<F{IokR5UsQ2LOS
z)6~n^^njc)kr9N_AB2=p;=k*OoiEmA3K5_?y}zaEDP5DNl(k~QOhW0PF92jI?YEt!
zvg)Lwf0%Ds2T=J^>a=50K+qNUt%TANgp^RSA73aixmP1kB0x7**fUN~=|N%BPwHxB
zPFB6T_2Lw1AIlu!dT1YF<02Yx+<9ID7Mc8w6bgSQSOBUM-?<31V%lf!@W#$>NhQue
zP^iSvvB?VQ)y!GZfb`X5)_9LrfLX&hp5dFi6;D3q0(?qcA#guu*gL>Q@<__LTvoEE
zLK(lws4@(}$*6*W!c)P;Hoh26riEh&2-5-(NBI5Aa=CJEHqp=i6>))mOGXt0Rw!O@
ze~)ijNj%;Hfn_{S8mRE?&w&Bgwi1s|K?@L%6KKcL2f62C3RfW>e^U)MF6VKwLhRjd
z+*IEeNyKG3Pgnt5X7D(Hr=-m1%yx`vLj>qi1TUBXRh2)?jr_9HmYktgb=U=*As_TI
zPZfXsu$5b~WLhj0plE>t3ea0^mvR?g9#};L=ybGBK?G>p#8ceaw8^`PLb{F?fp8Mj
zKooSs7Y5lp+BTUCHRWo+QNU2cAB6}jn(M_jUKkriI$u`=gU$yjAfi7zv}AX-MbM8j
zEJA1qK|>UX&|3TL8CTt$BTX~J7xoKHQzUCtgBM0MVe4Egq>%Ejr~_D80V;g%;-bya
zwd8nPAru@hP&JxV@AF_^LOPcs?QjkP!vtrna0UUIx-^^n8SDR#6q!2+2}MREigldu
zmxb146MquRj&*@7Smqszkv{jTvy6&v?j$?Z2npE%gz*AJOM9`;y0s1@I5N-z5F7^5
z7`<V=C#&XS6-G)#OF!5+ln9YHntXo`YQZ`;&!t}#G#kN5RY$;jj?cN7b01G72mY`j
z*o6ZJ+6XUv2ln}U<G+$P>8t`g#EF450xJ~9Imf`bp2YZC1P0?GX*{$GA42b0$!9(B
z_E&@_-p0rV+sd%Gv3K7QZ!-?IE$3~#z^fG@EUxd7t;E|C&;k-~W3+l-HdkT#6&sSz
z7Z4idv#9_L|5%8vEAnRuAzhZi)`7H%0QJ~y!CYI<c~7i%Lr7|s^q{uyEtsFG=5x{n
zmm@4R!F&Yhn9oZ%cl+*rhycCZ4ECLm0G)j+jGbE_X+~;whvtBTT8#y0v}bkZF!A+K
z((u<H7*tzR0s1zf8{6cPnNCjl6N1tcMggtL@MCYvsJ)2M2EnjbY7~dW;neCZ^>WcU
zM1YP(P#FQ*q?!jCKBv-2B0$d}3=yEh)!e2>tBC+D+X6NxCqM;I`tFccWSH-cKoS8e
zAbvDj_lXG56$n8E=#Ti?JcrrWi2ywm3cHdD(8LpagDgt?u0nFpyDdN<_hbZU|AWVa
z=Fg9#pI0A>(2xZZ0owmylfdPH^R38fWFf3X+JuD_2@F$Cxj@vK>iOPDdTH}gs@5*7
z4^`ROv&*Di-+uuBS+#b$YPza+zlroSpxSIe=|fgdfLeQu3wSa7(tSedF+xfx&2+O0
zoLBPrK|*O)pL4VIlzvMo^?u$~)p^v1V}#P$aR88|6lk_w<s2H6M6z(>ctGV#X++%-
z0l!+WYfUI|696fpR4l7x;PY6^{e;qsfS*hCl%|_WZoP+B3sH@9UhYFE&8rUpSxQZ3
z&R2aI7T=Oks@?!l`BHjSF)?6LwSK1vr3{3WP~yKsInZo;))=Cx_YHiq%1FuNrtasy
zvsFtUuo|Qz{!D=Dp(BdDn8;e;Y2gc(ul6Oi?|33C0JYClQx{)enk`<u$dxpg$0-1X
z<^n?qG2SJm2pdpicU>|*JsS)w!1!dGII*VQu-=tz`sn|Gz&oeP!A0^&oraz$!OBek
z@||evqX<r>F9h7&^*6W9_E{7efINnPT^N9XIKr2@WX4)`pWK2>Uy~7DGJPSiLh*@n
z^m6(`JpLYmWjs!1g4d^iaONkbw<R8LHxzbHJWil?L0R15uLCO(kGC2RP&tp26{4bE
zaJMJ0MZ{%J6JQ02#|d1kUB~sSaH$p1)N>HLV46BRa0RD$9QTl%;XkwtoT0p?-c|iF
z7m>YpDACjr6M;qnHFaptDlRp0@kFAjkE3-8qN(4%vtea#rM4pB)HDq?1K}j5fv8av
zT-vec$84LDIc+ln!<>dSb?>$A>~*J+-AOYqH3@7&GlvuqjSim_!Y+<3vY3Qm0zyLw
z8uDI*HY-=0o%nUbjZDLpXM$mv14Ob$fx9<E#e9OZ%aHQF6k%mG^^ZH2tf-gIY;wF-
zv%n%8FHkj_ELEx$n|!fZWzsv&A}|c4#tLW9)cjvp+3=jUjYyI4oegH8$cRL-G}Vr4
z&AK&ennEnkMR>4mqNyFWy0L$8FJ_SJ_=S*=9Y7eHwzs!up`k06kl^Ss2euBuVIYms
z^``l<>elPiNQpR#&`=^o;%Jz6>lVc924+1bRlU+&FixsE0<Lqp%bi*C)s-B$#XbPS
zfdg%X?>r`(3t9iciNwjGOu$2&7+51P|0T<udH*q`iSemN0Sv}P(pZOV+FFm<WmbDY
zyd8ZU@Wk5~c{tmSbtswrnt1!|NkGbZ8!vEaKY!psox_N?Kb?UEB;Lm8$~C@opKj(T
zNj^JV05r;HQ%${Yx+QyG=BW!I-S9GCfwYOH-p2o(EVAY3@x<!%Yk;IyNe@aZlf!lB
z)s6mPdH7AhLKDnKQ!lHzn7ce^<P)N)ciKJ`hrE0=b?V!;%<pLXwWL;ew*ws1YOJYe
zg?qAYC9g~%)%HAsLA5p2)IB0QusVa2=`Sx#EC&|p38R4i)UhG+zW?k!G5Q4|sZksf
zRiAjXF{>K2Cz`sKJs6eI)H&x}*^17`D-unei!ek}3s-kKl=+%y>hSVlK~7T(peFx5
zeI&#D5d_i?^Z2H&fG~e+!4XYe$pOqzO}&8CWNj4tI}=Skv=Uq{)zp)mGlTw>X*`hR
zp28I%kb5$kx|RQtph5qdS0z~xgV2x#5>4$<J20?#$qf1k?sbHfl{V#^KNKfwE%f2Y
zZF*_5N~+ez9BHZg>p!9|Y1g)o0U)c^{J34JYyLHFk)p5_q4Xgur>Q%bYXh9dW>zK+
zuJZ)!Nhm24E`cmD;|QU&GjPEHJ*B-;O2VHBToV%a5=uUe03b{0c$+mU^@|Tv2&J<K
zl`o~tH$4MJ&l;0UD5W(9dlE{iOFaS~+<SbB2+*?w7M;*jI%Z~iQ`i2(JXO)_G4z|~
zb+K?gv=6ay5e+DOU(&%f(}GE%@QZ^5pir0!(3k!dSo@TLY9dEABPdj2=-9+~WTZLE
zsX8;B%o-Kr!8pts#_<db(0bDvu&HGiS`zpKS-40ZNx54UEm@(zu?xwl@&&=ksDglN
z$GziH*4dOH)53&-ux*$YfH=Zu5BSP$u5{I!j4Ig(FBw%3Sol(ri^uF*l7XS64vfoq
zoHWpA8*`=|yCIEud=pxLc$`4bUH!tfZNBRk@p#}+ur23tvcjL=zd5_5C#lDK4}%pX
z9w%@wueaQjA^{#mfZj&%f(g(p{*$^@`s}A)u-1P#>;leEUVxSh`^|+ueLIW@(3@z1
z0t(P6&5v;LRjt1g0ori{Y@{FpG&$Ly*`>Us-(|EHEdt>rrhzEv1f4S1v)>ys)HsZU
z9l%h71?c{luB`WkPxDFV8;W4i`5*;Eqhm{O?6UgCOA><j5gJ0!koO`~_@r)1>q%au
zY1W<y`-P?{k~OM9;gh<K*ZilG^1l&bWd&%0M!|Ny|K&`w)h-Pz!tnxCqsdz*i!i5>
zt%6DK7>2+w!5J%@L4XR6KbX7Mr@z|u7$Kp^h(xiDtNOGvdw5hq8*#Hqux+qxB0z6^
ztinIvnj1+f%qoP0>;S^pblbUsOr3jp3JDJL$zU3S!$2CNh0lugc2q`@s;)t3C=nuY
zH2Dfnb|1QG3#sZS5u8+Y1Uxh2CHLt5y7i>0znTfVh64xM2!BF*p3~ka8$#kFViw>b
zP7JIOSfQ|B)zZh&9~nD`z+hY?jfeIXwtg)9rUxT&<~<vX6K`YW)+j4hIV9SLczYH?
z%6S_v;M%1*Yv<;BlX&|pT0r7$jNZ5AGB?u9<0HxEusN`Cl+UID^g&MxcCp60F@*GP
zgay(j0`#+EAr|I!XA!Y#J2yY8qz5%WW6i#4H1|mpOhQ;_g82wgwq*-fZ?X6PhyaaW
z3fA)xpsJA_+18Y)QKVLXK{%+@Sb*+X>%(#t$}^<e_F4w!pxT-W(3TNR*^HrS?a2w>
zLr{9cD4^dq@L{u>Ta+Y5o2KVy6o<souxc#(oTfezplc9RMu1w_RA$%a?AlKRsB$@&
zBLY;o`q{@x7l;6jL08KOPyzI4BDaAI^Oq1vB0vSirt{(5i2(Ik0h^)%)N^1>_U`1h
zVnl#eT?dy-1!%feX3&#!;}4PCTZtf$dolv_YU?9GCtZD9NfubI2ZNA%5&@diE-<k3
z`&FIDX~ZL}MB40$sUB!=_OLrqYmOB&&+4VkIjLGZf3B6PURsrQq+KUF13*@-SyatX
zjWd5kKh|2>1yK5sl@p-uSK9|HDRIk%P@0U85=vhh)(UKBIoFa<3R?I3x}MS_DW!6I
z+Nt(e+SPzi+O{45vXmyi-k^$0IXseNVb}&h<x6S${^kKAKQKQ+X(K{PDDgjO6lniC
z*OySLKezcKJ*6*FN)6)Ms9YA?O(T^4%maWdr31TXs9YNkqu=s3aXz5(rSxlg!+?_!
zfBq0kzYtPF>E&Q9(EawGQbbet+|Io)QZl)zd&zdL$}MGQDbf+yPPiUAqS%XxtQ8)V
z_WRkf9jSfy(E?C#O*M7)FeUr*y~ah-T>9+-+t6HK2qDHDYnrh`pZZgU^%SiD<CAgX
z#F|?8_AA|$X!=!3-FL$V04|b8>U4AyYj#TEQJG9%R}h>`UkF$xBb&Q+VWtzAzJm6E
zT^N9XIKux-`ptbm;%-5vue}H_nZ6KMp}4_0Oiy<t9{1P_#$`NCW&&0De_TfPWd-s0
zLbL$!IDzh1w1ZnwCnTA8{PhvAE$4Bvf~GY87x7K?8<T<g$Wd4UTxRe%fq(okohx?a
z%6g)ygO35cV48Z<)_vUIwENe{8D^qo;0)z8^{nA*xQ-?JZX%jGAPe?UKuvwM_B`(I
zoT@#Ere2HIDTt;%c;EncYgN={5>C#?VKWd;Vj74VRd`VP+2H5X$ecC{fniR=np*f>
z)q$(OdXQ#r{vX(cW)3MJ8oiYn%UnKJctJvNFhWBJ8lpgi_DuZ9T}qEiCUd}y3t$-L
z0FkUw;6~dtVb%kOekJ9<;YGm8YU*0{rP$LZ3mTB)J%&(lyg=1x@_l_5Ry=+79MTR0
zE`doHN{toHps8ETG-vf!53wRe<~u?{kr9bvX)1iz^!N4|7l`GA%W{@YH1#W088##N
zX(-8#rw9qz0fe#X^ChfV(f$YOlHllh1x!P57)WFE$$=G_cAHf>QX)PeG?WODI2z`c
zO<~OYz|1$Ks%x%-aZ=S0(89McI~N>xgdBK{y8wg(2igc<cHm3ymEUnI5+_**4{>5(
zjlc>;SvKKCEBb|s4e!Yr7fE9sa^{B@vnn$CD)II{goiley^WFGHx^@GB8sgd-mZ2Z
zEX#QtFEFQW6}GDId-@GLOVI)nZ)0?g?_aoCEi>ug$m}10d6dtlnp!ckD66-3K@K53
z0bzl(sivM<n4RU`?<G~_H$qaYqz83&&gJ$xzW73#VEjYaF*Lz^H1(t!OSo0fN-QFp
zy7Eg{As<aWxnmgnm-CxGD18RupjKl|{W`ratI>YdW>Rh2z5;VlZA~?G<Bq|sM??sH
zp79!j(i27jeOc3x&GmZTlNfFHIzOX0Bv$^e!MM8{Q;4R%j-WD{+P01>>rv-yBciFh
zya97WQwvxBK2p0Z(bPB5)pD9z0HqvC*-eJ|upBU<ALj8*T>;T}*yWByQ(r|0s;RTj
zR%d<YS9(G;^~*0{K&q*4KRgulV~y2JQfViA1qkGxjHceII~26%!1@s+3#`5Y8nQs5
zsZS5$h03i?9m#2oL|9pA6CPY~Gf``8r<MMsmo^j4C6D0xyr`-2cd}ea+I7Vl0FYH{
z<>CjcTwZ1mB}L&ZLg_<RPE$vw)eb0gex@&>)Nv-*lThko-z+d@OQo`e((>z#f9okN
zmQqSt?5fKA>~BpdIot$*ET!%j)~eb+o79C+%0Q@mDV>jZ4~U-OypT{Te+%qMDBa!N
zI<V-YZ*zzMbspEF=x5<qnpT^e-qdy3n6CPr<4Wa-CJnBK_8~Sdq5*#ovSK@$PNlc_
zDola}pgQrLi$K3u`NUlv>d}c*;&cRsN(>#FtdQ(kg86k#d`M;u`^jJ&W)0(bh6QNF
zlb5+e&(nSo_;gvgNFGVKd8wt@7l(^$$*5vI1+0@%1p!+$R<PxcQ|V{AMj)WvsDkiz
zb&9g_y?0F|qskYAmy9Y1tWbR6CLTFPf1RV>RM@qQ$4LX-#Q*C3>A3DOq&2ushXsho
z3AE~jvs}3}e|zHba{B-(=W(*anolpeR)ZWC5|=$kD@Z&};JbG2<R-s4yo(6X=>4#P
zf(g*zoyWLM9~YG%L)R;`44fe!^n9_3O^DGZBxvJe!!<fxoX*xU?E<%Y>FohTfc7~6
z`zW9Q9a45Pw<~j8S0X@fp>+x(Kp))y!%cWT{|O1FHV0ud5Kdwmh=R^BxFcKfUvL@`
zpjikELk)ivBJB8ss?4+X`(vc@`DcPn=zNg-BD%$YJ=pMtbz6}T+=$Q+f`+^op|$o~
zvT@ECe@WByvhFDsNNAcOS)&@v4s67(*qlB=%KuM<l@*|`Kb2tbzI1L$jyJ6sScKyR
zsz#HudRAapt3@0jy~DaVfMJ3&RyczI?PG7jvg&5juLDR!NGLKQQLN*Hk0sx3l}>+U
zwWy7pW!|wE85L#6Vn-$nCfP9#At5_}FkV3Lkwwndy(BoyOMqzz4g+b77QTKtC+V0w
zDG}=s8cKvn98JET?lxsjXAF)ZRo$^97$;R70cWbsSe^dM=?^Ydj|LzdIM7D;m6iT-
zvCfyuk~n#T@DL{k)(EUnShBUJKV2j0cW{iHagj70+J#%$<r+1$Cvo-*;UUg=Z)0TX
zBpddw$xixvaw)N3S<c&dfurF~+47ku?TNS7PKN~~-o|L*>zA9^kEfr!?>z(1D4$IQ
zXkiB{c46%6QG|4jnSce-CIU34PI0!{V%IBDMRp)0wMu$W{r>Mc&rT=llTCiJz$!Gs
zd<5vVBD=W_hbM**0lFKlkdFX;^}kMR<6ifgq*nXQ2Fp;Zu>cLN;KllyT{=jr?LGv9
zYHKP$yUq(?s~hdKCMR5P4p^ioi~@SV;fAcwprks)=pKZmMoCDFuf=B7x3DDwv{9{I
zVu_a#ptlxPVZP&bzb68;o;P5K02Qvb__5&@5ugXr)p7z<01X|!z6KfQy=sFAi2xN4
zrLI_=Ap$fLA*cX7e4#p9-OOSz5umMoz<^YMx<?)g@@U1aAh~w~K_K^J1nAQDhk`W2
zAG(k%=<W*!Aqyk|bnq)KFwCs=9&#G@5LO~>?8mnY?6|-AQli%Cc1kU#mo{6aYR#&G
zlj@L5ogJiIpY03)S+%xj)F0Ky`KK$9ke%EGQ2LOS6QKTGDg>OfcArcrJw-?frLeHD
zz#I1Qk%ZFBO1tg!l#WX&Eedm16+ar^gHXzK2Y@W4?Fq|Ndm7YxODN6u1XRA1ma~!p
zvns4ONGNrz2}lX0i=TT0{(F(RhERH(_|-*E>6Vnz!sLpoTXv&r6G~T-0U%4s_477W
ztzFfN5lS5f11eui)*eLy%nF5j5K12qQbLLUG3UTnr!IA&ntIP?cOxZ}o4OZguTp(}
z`K<_<e-7=1>!Blxy_m>a;im4-3O_tZ?d!h}7J%BvH)8^Q6lBfzJgZ87;N}H_LUVy3
zgskA~X2zUv-9Jypr<(i0IE+umi4$w;lLO1Lw~ZX?6L^v=TqKXwsqpPra~{V%C)3wH
z1SiuM0ygPin5AYvn?wenTm+Pxz7W1?<KLWH@WC5o`r-}%ZOQb7zzRiCHo-0O0`d4L
z1eWnQnF)j+v2yv>k!tGWXaVAJ0u8<WgnQxh&6;RxkK?d$IggVSo?kA+#*7RZO<cAb
ztswC@forv=xhB(htR|ZJGlCaPQw!gI_2NeVujCBt{|7wa4Edm!(bO9UXK+!wEx!{@
zJq#^SKu!JKb3PZ|Wmr3+sgI&{3ZkjQ!gg^pi`y+D;be9K*g!amX&`FUd8zGLT)S6A
z$eh*`fniSLk3xiX{aBUVO=6u%Garv&(99wCMf9Ogp=|W&tz}5jIgQW|f`+^op~8dG
zzS@d?$Q<ze9FT=MKqPAvxPqqM@uz4NQvSP~2du27Za>7DZBKr5f*kL9go5J*sz#G*
zt~;{K=&SEZ?|6^EFq9fAoIz7B&Hl-4He=^Wk@2|zn}s4H62;O~__5?4^Fk*P%Rdkv
zEc1@V$g{^QvhPJRyOZqbb`dN?b^u|#!13i}*s1JwHY7L}qXi&145Tqy(A2hWtA>&i
z@c^NrM2N)EFmLDAoCRl$Z9uBJ^Cj3lsp<%5Ubqk&H*WJwa^MfH0}u`zXd`?j%fjr!
zr$!DWPI}z{Jj98CH3BR6UqMLIo*YJuuSH-mE|SJNM7XKjyj`b~#M|!?o_HH0g`2va
zrd{4ZyzP4vwk_vvynt|1H@Tdee%DzdT0r7$j23R{b{)Soh~)Dgghu&ns;NEV&DnsB
z_q!9)KM@v4n`r6{`_0*e?jD1Q)%v%9G_^{4kaJ`aW*>8MFlmCR2n$UxA5H!9L^`*j
zD;GvIwZj9jo{y%kXWxN6&q=W$wR$|lL9ND`I%IfFmbK?LeNei|Lof%`)>KoETicFx
zfA_dIIpIwRN>3OCbdmB6*!j=ttx1Xhh>+AM4vEudp3JMYe;1;u>pp`0%4q5?-QAe>
z(*Qf7sRttr(bU4#&nKnsrJ5RDEvKmk(6M>;eaSHY4}tW<Jie(bAYQlj96&U6;A0>{
zHMO~-CVM)6X)U6uFTa4xrJB0z;ln|it7bI!+GYa;a!*E6kD8Mibf$U=&4R@U4Ot-3
z)ct0t0^6)`{6tRU8N$j+n-U9cUlX-<bW6RudTEm*Rcp$dl~tLme!nH{TD=VbvTAMn
zx67&@i~D+$kgc*EQ2LOS)6}mje(@jDa6}nGX)!`dC}nv^2QK+nyd<GCVr5sRr!=yN
z<UY06c1P9a-1_$krK%YKkfl`1JzW)l?>qetx)TVMFQv>k_x*2t9RGw+a$XJgB$QMa
z69T8j4R1*VXpveiTk0uIDq?z5_jB7VD!21zK9KgY!yB%L_8~Sdq5;==n6chE&u~&G
z!fL|;P$*0VsBMQw+<3F}J){y}K~Si~(6Nc};0G4$#)TmI#X$}}U>s%*<9LPzsPNkX
z3u}+pkWr<VEL<dyq?~Yf^NBjQ6d6@kAvhUT5YVjK3+~{{1{q{pc!_{8EdX(Z|LpaU
zoBT=Ji;OC@e1W!PR6$^c;vMI@`OHk>@dN~x@i=Lq!fpHmUpsvv9^Z);ARZ^s3;iE(
znf-fCBOd?M05&e?ak7H-k0Q*v@P#JCWdj?+3KEYKxRcW%uJh&@dx-!&jNk<mpnF3y
zxvtiY=%<~Fs$dszhVlZmXU%zB<GE&Ki2!Yd7AT+qT@d^~&QCpIHxZ!I&^iSXpbAAg
zSNF4W3<;-8Xb}h}F%3jPci>tFcKynLGi0bK&B0N?P=f_%Q?5FDI6!@aXp9aB2AvO5
zKs4&>+kqY2pZ1!B;2ea85H#ey2vsObvZ&Qg{$zBV&;<4iO;aRmRD;%w8nZ!fg4Lw_
z|3Fw-0UETXIP36k!B%p-N1B2~I9{M?G|ByNVY%E_Thb28Gy^b9aK;K}5TF-7|K|3G
zU2aT@Oc#WNA|n#TI<D927A)uK%0|TUB7_IaCIWQ*D|^;&(c&CZVeTU&WCsw&rgwfS
z!`?n_|Az!erRG2yg2O->qn`yhu^8@uB}j?rgV0bSMB-@j75>UltcU6gsp|U>oK$rL
ze4CTQece3$COz=huxmJQppEc*?!M*vZ0$h5RR1c%L!1~`Bd|h|!@av7b$}RmY9nV{
zB#npm<aHj*zL|SN;%yDW6K`YWW<UO$h{HEMBHmt$kaFI}3oKkvo^4)smws9>2Q47+
zHbyt^@{U{AyW~oe&t9QG9ObjA04-@=nC(7R@f9H*kFY@6M1UUpFPA%&*1I~fx*Z{@
zRnmi|?J;9n$BXr)O%P$B3Fad}+ea?o3LSm5f(XzZVZb>b0jl%vz=nM(8$fDxhfaWl
zT8#y0>|HO`ul%6$q}r}PFsQbs0(5q}mTb+^6#7*0O9Z7Si~@RiRvqR%sdgkWTB9@U
zl^Vq%(bKFN8!#sM9TA{91eFn>jXhjh$ECv`5COUqVTb?~uAa7Z(lsJLJ-Wc=<OHbR
z#N-_#$S@y`KoS8eAneM1Ngx7r3qnu<+GAohcD3~Vw?u%x?FqY*3eZnM`-2W2>ySe#
zt+p3HAopYhXyNKvLEUW~H<B#ajnI$<5&`P#+c>b|mZx3GY2+fTMB1b-O%Aj?f7+g?
zwf`#g2-QoQ*;2KZ+1^fdtM%Pyq+Q240zg)+HFtWbx*Ez|CPm>pLg_<RPJm9|f6~9@
z-m&y!t^J+Ao`lk#m?436KD$jLlyY4Tb=6Z^E2R|D)k3xKgeScN+MqH3WGPkMwm>yM
z`ciR1=_*3yODQsKoBxRk=N=F*Rdxe=5=!>tM+I(O8N7f{vTpV<LQm<Slv4lJpBo+v
zYq*PeDXlpGWGNN?{ajVqecU%f$uk&G`BLJ4CfNVzjyppLr7;L8p%fN2F0k#WYFmk>
z{%`qbt&x(+O<gzhovIgsLo7%~T)7gihmI)rVj^pWJDdCZ<~Whs=e`OSfZAuOspt2%
zVb6S4{U*(24uV2+fgyw#e`ryRE%@0yl#EZgXayLbj1wo;)YG1pWV&A+z7lxv4A?us
zMe<0U3gU8}+eue4eH}$`GJPT7nH|r$1y3#&CIgV`YOpIeeIdN>v+vxwZN<~cls*RG
zCDRuI3!l{WvVS&&c>Eax%Xpm31ln=M*|PR^PZEzeT?4x(9w*T6zyETUK0#NB$8)y=
zRL<jM1>tu;?z*M?BQBf14ORe`89Ywl!na@j)RlTkG<Biv056!PzA)-I*P_uWH95on
zXc;&|c}@Mc%1-Xg(u825sV}1i3aF`FT~}~N4xd|0G<Dq_z^5RZdc*oNTxk6+<48Eo
zMT<Z<iD@8e)NS?JvfA5zD#)Dn1A$>q!<xFEiziDw7jcy|^KLs~<Iv0@_eG-$MId{&
zI)MJj#y*6G5Hv)A2o-+PX#9(H=gAz9aR3a%93YZ43fy}c9Q*uqZgEonD;@-_tft;R
z#D?jv@1wsMH4&lUc!8?Xq{Z0M>`JYs^cklQ2n<81vBDWN^@jCIHup)@cBII3$b`*8
zkr9bvX*#+|2y<!F?hWZ$yAU2Mn`r7|$qsCleMWJT9r1_2GGqr3#-?jNwPTSkr>B$P
zxQ`Zq;4qNJXhBndIb%cr&>D0Y%tMI~iKAg&oe;?WdwhNlsp_i`oK$rLRH}2i11C-l
zAqVc{{zYuPaNs~2;qBMI;%v5+b|Yz6)D!R!CkECCEIcUP<?_Wb#CT^f0E2OnG}a-Z
zCu*?+vqsUM&iddDc;ao0eDSsz8`UDaJn?peI)Ie(HeMjPiYIG-Xzw=S?Q#CFfW+Gv
z%}xEl#cQHllYDMi56~!|O*M6mLsslys*-*;(=mhv(k7bPVJ!dI5{p$Gh}B{B^Rr5N
zkX;`uHsfLY4AKOTHUKO%!F)9JluPTlxsNvc5l#KL2dt2froMKy9h;`vai7%cl-_`Y
zT8%Yz#tSdD<=RjUY52MbfI+o2)zsSP7R+lG>qLr3SQJ3%38R1>e8!J;I`AN!7!A<^
zk{ZP!(OKuo)Z@<h5KYZR15`#+Cs(b)I{D6dL^O4kIKU81EnHpHGl?Ub+Fl1&%V}x>
z)b9K7@no0}NCuF8n8!DD1;nLiAA1l@?Vbt<s;Qs*R%hRO#8?qc{dFu{F4fem*^!{b
z=Kd{6?)gsy2;`oOrtVWRGwAY+XEjI`1Wg7s<eo%Rcd}6heqN-pA*a!N8enCmP2Eqa
zg@{_qxbZ7lFKsSK)taXKlZIdC=UgZ4T6GHmvTAMO*F&lfZ^||$A?tD*Q2LOS)71P=
z8u`EGT<A^R(FiG_G}A3DaQgFrxrEZpd5uTwDLs=?itKr>Vdo~*?-5G23jiQX>B`0f
zs_wqkei2IR5h`Cw{MRr0pS$bRgLtXRLa-;Hbn)|)z>gUxXAuGFTPSU+p3*zXP2Gyt
z2USs;ur;K8R5gR^p?!#ri)cXM>z9w53N<H%;wV}G3Wccv-LkwGtD+jYkyK(|bFdAS
z7&<mFzHmVic5L(}`ujhrXa$%xjN=&=pu$td@9*pAZ|WYFg^T2oloRe}-@6n2fQ%~U
zMPUDAR6)R`s~<S`_SS95w9p&@VOjv<2tV}x8_sFDZX+30(hy!UsvxjJ@r^58>)%x3
z@v{gl<8jhJ-MSTG-kC1PiN{M=0Bz!N0&Um*Cb#%ey|KjOJxc&o&f{c-M>W544VKzP
z5&`-ItswC@feW|s4{TG{BLdXBBy6By0#x{_s46?QX~`L;pk?3;<pt=gMJu`a6;}Ks
z0yGmXP(T4XPC18j-hZbm5uk-i0iS{hP~oZKB|mNHw|O^4i$FMuX&?$Z;im4^Tj5lI
zjzeG=YOnxZIk_fti`dzPbiNY^2AvO5Kr|}+4CjK!+BPHvElUG!2th;Mi%>y;Zk)Cz
zf;7!G_5g*ZDUvm+!LN}G*&e4C<w^O^LReV=8dX=x&XhM_PO`N`d9Vn_3sj9JOU|)j
z)uW!!XL~y#Fidd93Wq+a`+0vI`jG1)goGj^62&@Bc&hlB*$pq!wH_cmST+%$1I{=w
z_n=itBs*LkfG}hS5XPo&sLQhSb2sT1@@_>7KyVmHV|2xnmDq59rvs!!d_!m`5h8Ik
z`R+W{jEy>1w;QSI4J*LzNmWO{xhaL2jrIZ6fIF%H5Dpw@BYfpw3f8sNin}CE>be6S
z;>5rjffWjKHs5M=CTS@X5g3e%r18)${EYFqlPl;`#n%y@cpD>^hL&LMEjjv-kdp^&
zTh7~ffku{<n5My#YNQnOMGHv0jnP9L3h~ck*N!Clyal0AKAQ^A3sFVcWvd+e#Kc#G
z1=1!0bWGXb+<UjoUBqgGsz91rB|T_$Mj_^s-02Kyf};@@nqWQxwAB9P+<}bM*N6ah
zt_9Zf5ukDW&tyAI+j)Tq&@%`JwHgah`|)1vLh86%q}sOj26IqtO$F%gQY~1_-?RWy
zMCKqUJz*5kQ!WIs{Y9$A5~Ft!k{ZP!(W-iNW^->rRU$y0Yr}qJ1ZZ}kD|_yJ+nxx}
zB!nRXRJi&`w8s-7Ko6p;<pihzifmK=DH-O2ePCM>0V*K67dr5q2+(N=K?UgNr=BdS
zUJ*wkKsN+{0jU6uD}6kucf|#bNbWh;0|?}vi~x;_+81QmxfcEE^H_w2ERYD$gnsn`
z?OeC)Bd4(sVI|Tg_U^R6nw{gP6SWrC@yJZQwD}=bYhV6cYj~(m?sn3yUxficR;?v3
zU#&V?DM?L2c0wmW=|fgdfbw4z<$rCZ!#zUj5kg8R70a3)SY<`(974%_MfD|mN>eN(
zPZfXNcc<a8iN)#HFQ=>ofGnkD>4#OFiZ0tiDD_<hsC+5$A8Yk5@*tuyp;RaXkP=EQ
zX3h?DRqL7&N^bsR*61mvODUC4xY@8r#IYRWrT+B*AWNye$0Jo#@uN)$rMC3}l`kd!
zKDFP&j%$_^N^T7RDWNo8JwI?{zr$ULrvANY>Lw#4lbgD`;t#9pufCg0=AUO<;CkqY
zVlO7L*3PdeyVKz9GE)1Zw!#8X`%E?Uop#pjYqu}!NpsnWpwL`k2qDJPCR(zJ3zm71
z@#zOz0mdie#ECWavj7|R*|Fn#0&lPl_6~57JW;0#R;^r#V`Taof#78NLcqRzK5!v>
z9G8#*=m-MB00hJlUie{If6o=cWcn(+9ri7mz7SZU_|0`q>$He?yfFgHc$~}xKErdl
zl}CrHA|4-y79bucP#e!nT<i4nzlg`}j=;v{JWf`yJ@$s{v@~e|aoKLP0=UfJaRTqw
zZ9i9Z#*H+hsY@J%4HQgM3paJU-%n{k&af9+2F_4kQ%}9UguDH@S}mff*PsOosHs=G
z&gDW{wjD<_^?S5VK{Rz#)N1bH_-Uaeoa!8dLxFG-(?Hay!w+<1XFjENAamLf1co^c
zYwGB#wV37YN*dD4k0KZ}b4UTvs6x?}MLNCpCLw5<1v`ciG(>?2z1*}MYt`ZIKr#)F
zJ`GTq14Ob$fjh*9W5v=-)F<WtH^Rzl>Nh#oOnEigg&gnpGhh*p7pNLdR%ub1y;o%Y
zM|y|%SpdUOYOHWbQy*S9xGX6$BM=gbj7StqQ{h2rk9KWG6U#pk9xR(^YT?P(s->!S
zC)uGo2YZI>0K(XGgKcHly3Ruek>D7C7J%R|kj7~KhYH!zku^O@iMWf<P$ER)Xqc}*
zJ(wL-Ep#JQ-T6H1o>X-NJU6Kji^weDP7eIb6#&A418sya-m(Zwo!6x{iIb$OfQL9S
zuts2^s?W|!pGJ%yLtrp2lEyluY4sXxS)CkuQ&)Knj1zBT<n6TLtVH`K^e-rF5K_+D
zcmcs*n=jfGBHmtr7La%wqpi;V;gZJBZ%Fd_1wx~IHr3Sj<E>bU2#ZV-;`Oe}kv7rP
z9=jDRV{fDO#Og?dq*h4}Vjcf*E9*3EMVjDIgoP%UkEV{!UCmXv-e5M-)Ti&lzVp%4
zLDxI7#1k7jl3LB(102+9tf^zty;wDm;vuBkPDC)Mwx*i8Uis!Mu~c?na>C~k6v9e?
zqJR#V62Lk~Or1iER=E#*rABc`6wC5teJp;AB$_%JL1i?x!%<hZ?$(-NL{o1?7^10#
ztA}qY)`Mv3Pv~knO)Y?SH#<dt(x~nOV5A@B@l9O;QQ5s;3!<q9Aq3UbYxjGys{N{V
zA(}epDHxDyYVTG@gYNEc-;Ly6w`Tx>+>_DNeM%k-`dNgTkt|q&(2xZZO<mivVc^m!
zttJs^^BiGirA^R>;d_W$i(~^2=%vk0d9^k&t<`MOuAPDaAgk8A27XtCRH}WDgzRC2
z(uZt5n%b}Y`zN&srRI&mo`h1C_u@cJxxKlBlHLB(|LG~6l2U3Af1%;An0|){rHlgr
zkfn5g#aY#zTZO+9N+AaUl`ka)|F6RMHmwPz)d(q}#DD58@Itv0^!RFzO22HRgly>#
zF<h4vo2ZS^*q-q(-gNo+JB7$Bdww(^^|Nf=!RH}mLejj`p}>|MNB#2^cFzARw#KEC
zRrnZKq>8e#vLaa|Q4^mK9<R}bCukz#Vk2!&4?4s(i{?s`D!C0Up|6sLCE^m*(cw|i
waS679wUrL15(w;1J}{kH8yn7}5^R4T4pvPp?LC&9aI>+nVc8Ql&r%%ve|1P)QUCw|

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612332.26610f3d33fa.1894.0 b/logs/second_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612332.26610f3d33fa.1894.0
new file mode 100644
index 0000000000000000000000000000000000000000..cd6b3acc3dd59de5924f5c744c6dfed965de0bc9
GIT binary patch
literal 4110
zcmai1&x;*J5LOAAEQpwk;2~M&D0wgYHoI{XL^Kds6pX037^LYp(=)HLJ>6q>&+fj2
z`~kt6Ac7#ddGR9N#J|IXC;bBikKV+ux@UT3HZO4xyKGf;b^ZA2tDeIb;?KWdAAWz~
z{^K`({p07~-+%MfrR7r3ZyyWoZ|;6F-@4+3cE&CZx5eDGb<OR1eqyB;b}q<@l~M^7
zC4c|;-jg3*i+=z7=E+wVe)}c*ee1i+Ti;&ZdY#+KUG?JBv(DA6e=ePW<>I-EFP(q!
z)?b$%Zf$R8ru9whSs`t9b32<~wZgT^yQ{X<v)!HDz1hyeY<GV)TO7>x-=6JV&#r`1
z$HImn*yMc4l@r0BFw9eKa5@o+W3C|cwoP`86@|?G#FEUIg9k6FH3jCpXk5IzvzJCQ
zWt?M1D>50bX@q5kIF`AHF|Z}~`HDIDK)`NsJp?#13YR_0rfS+vUUaKe@<TRHJ4q&$
z<%Oi)@jP$qR&h_U$Bb@n?*=#F_(pwl6HE%BIAyyNPAS2ymbzk8C`hk~+0K5@oCsO1
z&{mO0^ZCvq$a%qQwiKQ($jSW1RJa?3H%4NVT`EI;T)_E3u&?<kqokNNwkFLyhNIiM
ztCeU>zH(4q>r`~4&-h@ki{3JquSC%*G|aCX${<rxBdOSFH8wapAvnY2;iH%#w{_c4
z=F&jAGmi<vip2e0-A$%q3iCtI%Np*SJQm#*y@pxQD4BDzfDfwbV1afC4hU&eP>>0|
z%2XAVKTQt=|2I1QnP;H8SC`T{%q?J#3kPiX>OQ~Slwb%%bIqhKi6uZ8<4_$x7I3$5
zqB<Q-7@D~gT?U~y9)qZ4EonX@RHHR7MdT_vlL``aXR0*>-)YsF)hHx2W6*(jys?7i
zT&X3`j}R*`?O16|&47~ps4-GcSwb6@w*|Fq@Y6c@XxgV_b>lixn5di(1q6FBwSs%j
zI#HUt`_hMa!-!7~(_OL=rxW3_TwI?B$|eX>jp%=78b2WN&LTo>#Z}2bZ@F~D5L5=C
zDZGrg-b5R6&q}b*)8Pcin^nHNMzfuAQU-O#?C2v%W50`{8QqvHu^kxAb&lpxt0#(D
zcq?-%?6R$(9NN%J?tKx4l?tm2FNm3~X_(?}+R8v~J*fA)h=xNH!HKa)!cq*8g+@M6
zGr$o8TJjty8i$E#_ZW#HbZHhj0=tcAUI)H;{+=tO<4Y`D|7Af$H{@qUZ7@)+1_lZg
zN42${OoHM|p~5Ybs!<WZzlbS}0OOv;7AJVbnzGW^ksa>N$_^!!6_^amq$46fGv=}a
zCkqyVd?csjQM!TzT~FvgR!fuu#cjCdHANarA9T6VrKGmfLjg~P&9+dSs>uvS2@5`s
z33jP$V@+5fnCgTBToRc*90XHoha%L-6e|zEbLE&fc>8clBH<8AQ(%E`^c{VeM(jZ8
zKCb@&-f<vkZ4=%FVe?P0jWFr4H9FXL2O1iBqKJSC$Lj`{Bc73krrE`W9nlV%X21C<
zkX9QdmC8^E5ONdwDP<dB<!WR6iZ)LIe82xdERvv_(WXB^!EC;Y%n>xKTcXJ!%#x2(
zo~*J4OLVygN9oG1##v-h)L}l&QD+#Y9jnw>kGpa1Uc}DmU7JDDu>t^CVmA9R3LwN<
z2}7PD48+ue3O$ws5W9resnTMoa+>pHTb5@}4<yn6>ow-q(gUjRS2m^*oRuIUJk<uW
z8H(aCPn`q&3CuTkM8Hs@;`y35oSwAtiE5@p*tBj%I9|1s$l#qG-nH1U?^3Qf(Xoc4
zr7)3?H#@Lv*Nm2Jw^ne@8>|u>%HBD=dy9Q`=fm3{y*p?dE04h`^ai4y4MEYBT%GW>
zV-)v9m#{o+=QDr@jJlWa#Dq=o9uOmSuG#|IRO4d=OCPg5RCov{K7q0Ijn2WG!{nxJ
zh8?(Zanjkge|o29P%7f6EgOqG)*D-KEguAD<3@~ahJt^5_tQgEoSiLv-9*;y0iVTw
z(5jD9G5I7JiV!w@L8m~-(oOeLnD=6h?M97E*;x)i<feOK)AEgL83PEq5cr6L_2Qs=
z`UNK_a^2u#bJFvPX5({#_@+M^3ONqrIh>oF<?0lBkBv}K;1H|82_S%GV5mxOMawLp
zC_>&5S5Ln}wJ3e71s?Mf6U$YzqV1Kgi!|xMlOP-{X?&ebHhAnxK;knYiUok@s~uL`
TqYut+y}WgfzQrGZ{!{iJaFsSy

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612413.26610f3d33fa.1894.1 b/logs/second_run_logs/run-2024-08-25--18-57-46/events.out.tfevents.1724612413.26610f3d33fa.1894.1
new file mode 100644
index 0000000000000000000000000000000000000000..1dcb305968855178b24dd6f7a555aee19ce2cfc5
GIT binary patch
literal 88
zcmeZZfPjCKJmzw~+3$1y{-&FbQoKn;iJ5tNu4SotC00g3dR#gssd>fuMM?RIMJam4
hrMbC@MU{HxMVTe3MS7_qRq<(=IjQjw5$P!#odAX1AyfbW

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612452.26610f3d33fa.1894.2 b/logs/second_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612452.26610f3d33fa.1894.2
new file mode 100644
index 0000000000000000000000000000000000000000..3619b50087b5c9d5154ef9d48164fbb6b557b856
GIT binary patch
literal 88
zcmeZZfPjCKJmzx#+^T%@{-&FbQoKn;iJ5tNu4SotC00g3dR#gssd>fuMM?RIMJam4
hrMbC@MU{HxMVTe3MS7_qRq<(=IjQjw5&ur38vuSTA$R})

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612515.26610f3d33fa.1894.3 b/logs/second_run_logs/run-2024-08-25--19-00-48/events.out.tfevents.1724612515.26610f3d33fa.1894.3
new file mode 100644
index 0000000000000000000000000000000000000000..bb878f89e4e3b7341f99e0c1014a936814064682
GIT binary patch
literal 88
zcmeZZfPjCKJmzxx@_v1Bf74AzDc+=_#LPTB*Rs^S5-X!1JuaP+)V$*SqNM!9q7=R2
h(%js{qDsB;qRf)iBE3|Qs`#|boYZ)T$Ug?vZU9^kAkhE-

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612582.26610f3d33fa.5666.0 b/logs/second_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612582.26610f3d33fa.5666.0
new file mode 100644
index 0000000000000000000000000000000000000000..7588ca876fce4b56f15ca5f21534edb4b4f6fcc6
GIT binary patch
literal 4317
zcmai2&#NWH6&^)&-j$#;Iw1^3UoK?f;JtJ2n>Ul0AyFI<9tlKAbR|;O?XK=ydDT^&
zs_OUVI=BdA6S7Jm1Og%8A|NCnLdYWd2P6wOZbB9cfe4~;5s0`@^PQ^h>h3pp%q-sU
z&N+4J{P@mys^7t>`0w*y9{hIaFZ_*9-+lA%PoMwF%5tga&m0NuukZa}K6%0m?TlR+
zZu7Zos*2mq{Mbq_>|Br)E2I)EO78ypKi~TO1JUn$zxv(Jcb@)5^n2kqk4=94*yIei
zrMu|GP0w0aH$Hjv^nGXVIeYi%yKa2==&i}_?$k8CZamASonGIa&M#Ww8s*(Z+vwTe
zVsC%8xH{XrJew`A%@%vJ{cF=F!l@%+LlA6qzTnD<V2~T;DK|JB3B?gtka^oAyV{Ce
zW`1NzX3W8Z7v+Wmb6(Ug-rd?uqnR?!F{2fk^w-qFvRoX=OvD)2iu-KMoP0&VZh0jH
zI4}yA-N~k`n{8fnqg3)kHn%%TCZ*-Mq~7r?YpO<ZPqD|0uJ7&zH{tkJeR2~_a-le7
zy9lS0;8sgrGAb0Lm&9ywIcScBEZ1nO$fEgtu?%va^NOv6=Sy-jzcv=`Md7u97-g5r
zP#>3Yel^%v{3fHMm^QW|%{+#qo2spqs7<zZP+e+ObfnAp>V6x&WiDHbyisVF-_(@B
zR7s7bVyD&6;P9B>43k^0#tgZwnwm0~2GYemCI~AMcXzcnnUX2Y4?%BQbLZrdXs_rr
z%!*pcjFSa?P*r;iv`cV6NSmC3Oz2gnEUElydLa0Jqtl(a1G+nPA+5vQ0`|DD$9AXg
z^4m)ZhCnpeOzMJI0+cZf)$t<%cUvc_)zO5ZnTzN$2)*$bL?tUp^BJKUt$86LSJ9bN
zkf1wJtv>ixt2V4cA*mU?4!q-y6)fXQt$22bSb=HB3Tr9`lw^msk$TJ$+OVw2sb#&N
z#=%G3+$O7P*OJ0W<%Gx~*pI2@+;i57(%k(>`Vemz@zG(rOV;A%NVqDNS4M)O4uVu8
z`kk5D_lSIU8KJi3s$ihEOgdr+Dud7z-jp}qL>qF?3b4=Z;RMIqRlc}Hvz>BM26e{l
z=tD?jzw^8v+?Xt}9T?4ZhUQSKM~X^#D>EwWswtuD+t5i~d?^eo6;>IZ6Ej;=GsWGw
zm4VzkP+x8%>JL!_$HpECOEK)9^og1Qju_B_XF$;~Oia7SNaUeQv&a$HZA|kj@XeR+
zxm-HF!ou}G&WY&y{H&-92CC7(K#t<5rqZKHP<$m+xMfr|DgyWyF@+Ie+_Tu?1dmu#
zRv0_5!`;)OMM-G|Cc`r6h{#Wjxh%oSf<+)7$}xEqE+;|T6Z(x+5~V<KYi@Z(k;c*o
zU1oG4sjc)-z*AwnEfhD^Xa=K%1s_KQyHvKJCM*z4b;1EIiOe1jf~mAa5h`Sgm4~01
za?BgNeK;kNaEPTTus}Hajy{YdcA#`0*M9)-Fc7r14)21n`A67BnDp2h9PGLS4Gld}
zM8JjPRgKFL&%i>{>|(+WX@`un-~JRxtBsOMWyl2xxsLplvW>8El`(!zn<oK&(JopL
zizKK{Y11F0U^ZJv<_H?rF45!=X30k?&$P4#OLVmXN9oe9hgnRcsKtDmqt-A?CRVAj
z9(Uv1ixE46ckL9CmK6ZN0<+nTQ2-&<N*MAKVIZakROqoBfY=qhj+GWemC>B9nxZ&)
zx+jquSg$a*mL5=Dzor9f!kf@ZkPx0~J=yd{ahRvp0safjH*`e6P@?47hB%y_wDE~*
z#zR;)ZcR8|H<U>4ogUtm*s||Zt~k-MhNPu1k`FgKuxmGrmTkLMaLya75*(U-`{0Ee
z?D=QE_ssX7?6tL(N8l8C15wZVpy*1jj`_whihHC>Snjv;3BUtJ?aOy$!bW%xh><!|
zO^$7<@iBs>k6G?3+=mmNz}WgmXJF2La#J_M4&1sp>1@+Iz0)%&6>-p(twkQ{jV-yB
zuLNhqMhtEGf<O1d4-ZgrcCzqQ9a*;n{51B1R(+g`(I-h?gs|arIt4<OZoHSmyyqKi
zH)>?c&T;@EGwl<bmTz347(md4z(*Xcmsi`TUvPpVS2aF1M?D{Dwmuh#Z@QzQki#&(
zgmb4SxjMz(VIx!&IK(P&0tlcP=&RCM(J~7tija52)#I;FEehXgfycbW#Bx=yX?vyX
zB29YmBnSsf8eb=)4IaA^ka$OkVgca!YWvmp>UU31?w#C2-{R{0VZPh`HQ@1&UVm=q
zUmyJG-uLj=fCnGG@X+MUWD>rcFVd&&xeKpfdg}c@pV|3lYyZJtul#4H{3zLfZSvL0
z1JORLQ~Dk|_t`tIJ@xX>etPhDd+1wVuv0t#=np+IIX`)*y=3_Pc<zsX`1s1-K6`EV
m8|}fnZhiRQo#nsNZ4Xb*PVSEf!}r~}@4W4v{QRw-ocbTXzI?y{

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612683.26610f3d33fa.5666.1 b/logs/second_run_logs/run-2024-08-25--19-02-49/events.out.tfevents.1724612683.26610f3d33fa.5666.1
new file mode 100644
index 0000000000000000000000000000000000000000..ef95393aca2d3ecbd709755be79cb946c7b69f49
GIT binary patch
literal 4110
zcmai1&x;&I6jlkEtcWB>56RL;$xLP@iR^|DFc4T2jHvMvq^Rkxo++ogs;#P?-AM@k
z0pdjvK@bmue}Mmux7@sV_a=U?s=K;-Gl_fHW#4=C>iziM_o`1`i$DMVaPrHg&E>zY
zJpbd*58r!dX}Q$X2V0^2oyBL<gX>;sXYA5&TTES3Row2Tr&fAlr-H0lDV1PR^7i+?
zo&NrA^!xnBU%$Kb>|XTy{?FGAe!6yWncJ1S;l-I}t*iV0T)p_#l?zwixcK`0zpp+$
zI6TZu<Lkz=LfY)kVK%*Cg=>^|H*BLPi`nAVWOg)J+@4J4$CKG&a_cy|9!_nA4MDKc
z`I0Lqf<a-Jr`+IlBote&AoI3KcC{6S%>Brc%$S1*FIGDW%z06}cz0_rjb_R?$Bb5F
z(qB^x%L=iTxri~aCHMK7Ir&7uZvH_Ca9|WJdzsCuZuWW6jZ(=E*}Ui^nXD`?B=wHx
zc~dotdx||~bmwpp+=SzM^~p^zDTLyb?JS&9f?F;1icz5;y(uQM+d*?G<Z6w!iaeT6
zXY(NE1+Umrcs?g5)8nyl5rxMCG0HBLp+3&x{3zI0{ESgjOdDH~W*WoMP1V*))Fxj$
zsBX3@I?`o)bgPZtGMBGK(I_;`?`p~*Q&J<T*l9I1*qjoaVe<4@%#hovsVQ@5Ae~KP
zg0LcScUOCpSuutAA?Rf_cTR3adqt;VR@6%7oGjpjs@hwiU4jEb+7uLILa#Eb6_r0t
z4+Q@=I^CI<pu1C-(mKp7V2=xXY<KD|zeP$g1fscSQkTRMpp0Rtj&B9r?VYGrM-zr-
z&Z5g8^u}Wlm8>MqXM}3B=B0>SMQ2h$g6>?k`ruow+OZ0Sq-OLw@QycDu$(Kk<oO1%
z0@IF_)>I5A$v3r;ddw2qu)Ha#Wxb!q!AIS^AggNElEO&kgeV}m6;ms?=d2Z_x%*oB
z5N{aq(P6qv*5Yg=T$b|>MuM^qf>a~=pPAbCh<rGYP+N0VGSFKt9WexzL1+pu<Bd1b
zhTO9f?DJwc!SQ~TFK^Opr<{~QoiRIl18MAcQPhJQlO?tTqq)w}9BTDQQ3-ElPK8}I
zD=7Ombdtwkg<++_D#Ht6W@~DuxEr@JkXr}p+igVsA&TJC*p09hLu8?WPt*)>#DJDO
z2a1MaV%j}Mq6l4@M2^62W13fiZ(hCU3hDR~3)g>B5YhGdSy34bRHK1`0>x2HrAL#X
z_)@5F%cyEp1n@6n3M0U{XR*Zz9<ipZG<INzyR))I$;t{$hGo(bk)IoLxdJB(7J<Bx
zWAZ3nL4vj?^dGAvN`d0m-13SdjinE|+~`tLTj`;Ir^0?)D9+Sq2BU-pA6tT5D%(&K
z76_&~;Q*IJW)BC!RNA2k6*9%j!|z-<<_+FHoRUa5#L^U4ARK*1AI1?oP`Z!nKY(`_
z2wGc*cR|?vBWxo~dTb32cHM!7hMp)Q;KK2$#^s1-V4-PtF<~3pA>-`#KLyfiqoh(9
z3IRf{BR{2VBdlCyj9=5{Nr0cVix$Kp391=w`co9l=Ih8DLBrZ5njFF``AFr-R@PvN
zE_dK4z4Gf}7FiUvm``)m8ir}dDmB*QZk&4@u`_trW{|Y3005Sl&2EeW2(ebekf#U(
zF*Tq<kL3WwF5z{ov>2+K=6u<d<@wV+iPXS)g}Jr#fa>~{4QT{tB}fQQwVrJHqBzV`
z>i~ZO^9>ylFqByFd`BEkPuloIHRB<y8@DDLuNz9F_f8M*O6=KpDOa3mSwqrN7|Dm5
z9oV%yM$5KcD>&y3RtXMecTXPOXJ0)0_`xUldTnjx7MwzFAnI8k6kW;HDc?CpagTHf
z%l&pf2YA4!eff?|*a+_dF;eHMDX>j7K1Q(gG0T00`*7kD7+c@y9L(8IZt7;(fqNGx
zoo%|OcX|e;A`aTJx5z`iu`90Sli+OFh@nki@J}Coeu9d#^M$YK$hsZiv)B(>^>Hdj
zpCo+|!iF#C6bM<m@m>n^UhJ^lsF5i<%K?bov`=hWzHu#M06`Z5A91jrAGJ@v-~>gk
zYJ6;tdOp(ZeJ&8+bVoxWhhe;ebF=eYonr5>5h@BCVih<61keohRq3o~nFSO@$UEZd
z@mHu8rEj#rV_srnxvJN+z0!4&COvo(go7oGuanURk6j5!d?`e+0PuXZ{c3ym(Zz!|
O4=&KR__x>Ye)%8r;xqgJ

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--19-05-27/events.out.tfevents.1724612737.26610f3d33fa.5666.2 b/logs/second_run_logs/run-2024-08-25--19-05-27/events.out.tfevents.1724612737.26610f3d33fa.5666.2
new file mode 100644
index 0000000000000000000000000000000000000000..6ea8859409230e96f44bb013b856c7203d32cb1b
GIT binary patch
literal 30486
zcma)FcU%<b^FF(G5)>7siHInQf`X#h_64HEu0gTudB6du$K82F#a@YBEMGO2Xv7k0
z>>7JCMosLo#u{4)_8LXe-@AKzJMXYde&mnjliT-s=Y3}8otcNFv{gC(`82ofGIdRJ
zhk!fVuEqF#k!aN!Okr(?>P`0M5%FP~`gXm^X0|4pb=KrCn<G74XUzy3W;NLL)-XN4
zN}FOZ>b1^K65IcIe{Ff^zHNgp?Np!o&be>MlIog8)ioYEYpSh*T|eBe6}O6g=U=u=
z#gY|^mn|0i#DAnFARy4}uv;8<ZL+}{*gPOGtbtW;a~SQm23ChDG@@Zd<Isjtp%Iax
zq2bL!8%BmkG!3jTpoZ$L!XdN>pQ1C`^umH<v)0aYqnn}+^+R<=`kCG8kk(nO`eZ|r
z9j)Y-X>D`|yFN98KbXy~x7eJv#q|cWNozFQY+AF)$S(mi7QI!QtRHGf(mT(fP1M<w
z(zG_i2t8dF-b6Scvq&&j*qT&}LunV2!)TQD;n&DFk(Q)db;$-kcDke_N4mqPv-4+n
zGHM<WA#5YS6`xDn2uqUnMjg+#p@1^#bykzXl&a+&ML&(y(;x{?hUpEdX*8_*B<J$5
zhT+2Jx@29tHc@Zah4Y()HNzi9I6rL0d`#VLFq-)|hSSZXg!SpV;aZ-Q)?~J(^H0K@
z$1yq5MPKPH=A<+mebh*N#6w(+8%Kzzw`y%kY5HV`kp|43Vc{7B8Vx2&${A@RyI}Az
zKAq{3k)xdqb=Gu;g=cOs(@z_QISJBL{Kqa^q9t>x)=2LUpL&57oy}$#suxYTo@lLl
zi_wsz<5$o<c(1|=8ZNz#rjW^;%pX$9UZXiRm3O~%d*}!M-_>zp3dwHQ;}nC{M(-_6
zdukAJ+x6JR&*e(-ASRtjYcQqoRf3ib_NX@9P(9sF@kWWy_!~xp+0f}T;X}LGPA{U-
zkZ$1br!!TZt92=QXRYEfl0$@N1-u1^7oQq4wCU85d@$e{tvhzJRj*Cb8I6g$q`~y8
zG^cIa6stL1OS2?tu*GaJ;VNM=Ym*$we8^xQhmG!GamYSPx7fs@fF9ZO`egb+V<)v_
zon5CDKa%d<AcI{vuW-c?n73uH=!c^Z6H~&Qpbt_k!UvL%`2W|MEq2JrF5%8pOVb%s
zw6we>8Ekxo;N74%@y83)IqYWV8h(3i3avi!;|LooKeDGZ;_tS^$!Mm2rgz6Qn11Sv
zcXG0Yu_&#i-9gLW>m(W+KGf(zy54RzB=L?-bfnUU5C+#LeftZSm3OSstV`xAv(;?T
z8g(`tN};&9k~&g65rFs;3^Q8?>#ahHLbSw9sncja(i@thOQLDSo|9g8JG~RhLYP9G
zMS?artx1<I6rBHVbjb#rE|GRz`;cV5(g8uMKHW@jl*2@mAenl{=twuAt6<b6>WzXG
z<Tn1mv;fmH33ouNvum9(mh!{tRBejc%B-Wd15?B_Nww<f)v#&}HolS<xaWpcT9ox#
zXC@Ce;OdcLOXfcigTnuxHr+s7LA`6yS#|0BNu9nIo+X)0DF!~S{G>o<1?9E$!EhtG
zgw9X2!X88Uyp!C<J`p+yz1C79P$SOLE&yq%GTEpT(kWBg^XR{mj5e*^OotyGPeMvW
z+BNmGgXn17X&ms0ZM3G_o&8_PUiKg~v=(775L!P<ZhB09+G2pN=+Gzpt><fmkQX*x
zx`i62=V4Zu_{&SL*kHbiVD`$hfYjS~Qj!~z^|T^eoCQi^>r5+Ky4h?`<6DoE@D0UI
zODl_kPt`!a)eoZ%vnHiEONa18D|S(7L%K}bLvl}Gs?|)pXkrE}Ql?aU8p|Tk`H`6O
z(mfIv3fG<XQnS@gZBtxdXYMeAZ6N(bEDJONQ|R4vT_Yh0X<rd;xpYFIGC621wA1cD
zGd7X#iXU3(rAp%Od7>jFr63xrh=t~QI=#16eu8qvE0C#8bSShLXceNf8dMu_ijCeU
zaRd4vq4cpE@rhy7r|Ob2_#)2Fw9XmTj4z?ZVN2ulIL*Nmf!+DZJ6*5P@FcDjAyy3o
z-xbj3Y-^{vYt7K|-B#=?6lXWnUP3{EE!!r<YP)yo(55p7Pc2r%P+C*?A&8F|9Evxo
zGY->b*tGn)(K9KRp*<H!c$!Ay=!=$UQN9ZqV=yHd9m%wvny4C~U7y}%@Gu<5IRn$S
zZ#E^-$_Y1Dil)#66+0)7b+~44X%3Q{$b?1F$?S8pHC1Oaj1V?s1HrBVhj&fr)|Pr)
zThQ_87H8FUC48VW9(=EN-U_5izzIT&Pv%jyARBDBFA4WOIfHgLK9CYStBxi_l3ARw
z`R?oN%k%=$a-pXxj;;=m5@$bQ6YAu23ssva=IDvyyujBR7gTU0dyN06xq$_1wRCn@
zZU`Qwc}QD<O;0m`zXRYE*DAi530dS!c^hA=u?#ioQ|u0to=)=ydSi7)OB&z3By-L(
zX{U9P79857sXRd)+!+-i6AMjI+5qVMYK7J|x_wzqX-!F9jB^oVCI*OK4ygNNT~~Eq
z@5Z70mjfzP4ymN^&}anV+<@1%o<4_t$;w+}>(>(3=k9OvO5NL6TJNQ)qABlOFSJu$
zka-UFnny03i9c9Z1U3DvRjE${$WP;=sU#X<1*B)z-s4Rh&06kPLj)EZ`S`86mY-y;
zvZkV@oD(PrJkM5VZCsfriw6aW0D;c^nmd~bbn3|H9Ixv`{`qoY8APW}Eor?Xool;h
zkoHObeG#2G0QwI)Uqv+<+pQL&^EUt$=)B^l#_WGoU8M7Mht^)|EpEkI3v?nvDrzcd
z)O_Hb!3p>!fanHh1vC_w|L@H?tvarPw7jOKhQ>!&&I@}h-xm^%g0xAp(wy{co>i8-
zThnK6h=|`jbIu#J-9y4x)A(vC3;2|9j6B}mgZwz>?f!5PxoqOZf7CYt8Keo+_zFly
zO|Yi_byt(H7XR)F6E`^ad(S*|ql%L08k*`FFJS|!MyMjg<~OYOAp-}E-ht2)mCzFL
z6gpkfsg%zCo-=Q|lYMKOcCRDa{%onzUVQ=3z8Y^$MHg)b7ki^TiMl^Iwt<LC?_aH}
z+P{)?W;#}AybF+K;8#l)C-?Uj52-K0UvECuMm-4N)iu>LRanXlR;8*zX3rYxK#|@F
zScORQv!#J*tVE;3>KQTS12?kF@2gRyRh6ZqqevrU=ffIuW!mHxDAGgyB&1TL(FQB-
zyyDXPwYrUBbq_X>MH-=BJP#m!dXyd0M08r0o->}QBdST}>uQ2E0fN({NYnaYvZWj7
z*I4+k16ZwQt7nxa$GKE>`?|<|Ghi2RA5C-x{F;TExH{MRj%zBO;_sZ_daG{%K15ST
zQ=>rS6}E}Jx11ZaW6|Tr;x+>sRqdw^@t4R1Yieoy*$hzF<JQdsoNao1Ta-l(*aNai
za){1o(Anr<<oS1WELlABkXLQdzOBnWmHIGXy){)e6$Ja#4H9ZXNIv&%-kZ2k5!Etg
z+6T2;b?MXr8h=gI0^}udz>LPkG;8p7lz2S>42dV+a|yXO=Lk3Js}X1GiDol>k9JT`
z43f<HYy32xf?0`ZqRy1e^PGG-hCEm>-y2o=8vqrmyo;v`G@(Qp;&yW2YvkC4wIqAs
zSf-?j1`q!0Pd@GVP!)BEcYqZ-gkaEB%|zga^|{>cdSk|;k`h;2G72R{BAKkS(yvV=
z=}mf@r%E-DTn&ER2-(d7IM}5DWr3&j!^xb8%*p6NTn8Xr2q4Si1M<^IR?iLtP)^he
zmd*`1ArfZ6Dpdti*5<zjRRjxwp(046>1;tKO-h;P98I!j&cA@F_kBR4>doNVJvhpJ
zJ#|$x<mylLr6Yr@fieTXdE*9mtloywC_VawN^nRIkuif+skU%=5gR`t(`x|>rX|AZ
zW<k3N#k)<t`lL$f9~Ph}zXmvpG7C&d*vT1p=T%2hCSlT<m7>hz=LQ)_rG<|Up(sxS
zye!Hrc$ANhbIaHG)D-XUmgU2~R9^yc6-{N$7wG<aE}s=dLf%;NtBH_-4Q|&|*JvP}
zw}Qr9Q%1bE!s3o+Y|Vni#j*9qbyAyPaaE0{#$8w}#Z$l}O{@C>4P-w7Mi|JLdny2K
z-g+`1P&6{!qgB3oN@M9no|;M;Hz&Yh;&qJYk%i-9emtGo4f*U2Ai!tNuiUV5!gFej
zTWo%Udofh=LL((}Q1gVZR5D7$lSH~l|8>SK8&SIgP}!`Rsq-S8D|~Z0K%^#&&g+-8
zZZFK5EoGzg-ygh5$fi?gQBl4AowQzYbPn`cKpvjEGz6t?>~ab8pQCe)i(}>?Itu_O
zqhq?>gxvk^PAH-iJpDuuflgN$o&KLHk&1in{f6kQn<1@Nq!Th{DVaXyOmRfVW2OWu
zluq9FEu(%l_BxK}7yu}vLxl|)nvr8cbdLVGGD)D5ETeO!VsR3);lXl5=kKl3dPO=z
zZcHb`NBpx0(Mi}QfeNLQJT*M3LxWBKB03uZD5I09TSP{lI~;_B&8=J0Qbjrigw3MF
zATsLRl6t6c>F-F(p>RnB6A7D3^E`-l%Kph{>i7fjFm)6ZHs!wwBojRI>!EQs;V;Ru
z(zuIE=MHcu7dv{oq2{(4Hh|{l8U<Kcl#tMhe7sZ}x5gJrXo)zZ5?)>}Mdm#{)(N%7
zmUpGYqt?jaeoH7xn)mhVgZfh@;GjQArIdb^P|{P6(q3^Y)tI`dHJ$^wtTi%NmCA#R
zx;%9uinPx?>BtI^M*5&lJvWllQ2z!+ItlP7(g-=o_6t{I%$r~o=~Is+%Sw?(8~ont
z3vzkQ+6^eu;g6*aWRXVbg$ovNPZNCJB4INY&_xqA>w>en&l}yRBlq2fUBG?H!e;!|
zGu*h1&oYs)340=OD59_#o1e|S@9^hkBy2KZo1zGt{rk$1z9++vqAWTBdq5URsV`}T
ztA1_Cr5nrYAz`C_D$!6DHUpn|k+jr32T`k@4Jc^UkoJ<vCB_I+KRoFgO1x_ThQt%^
zxrDTzFGW_nb-#d`e3{qMVWG)OL|qz%7jP?*uC72;zCS<}g-y1fnw<J!T_SSqIsk!V
zfu_sgXs$Y0cJV~7y5f)^d?oJ<U}5xhRWni8xE+1YwcK}X1}Z7hZzPgXQY4bBM3{NI
zGnu{ZMFO%r9pGTMfUw!tp(fezKF5eI#6<wYg#faw;dfK~$kgbo87L>Jzm?7nIUy2e
z!5h|xkjW<ko}-G824JWN5@|*yWTmws*OvF#fvWd5K%?r-;1-X%#!dQqZxV9#-4D`{
z!PP*Sflv80kMp~bX+-G}^-+RDdWeh}tV(s5`@PwLE6DU@z=COsFe?$p>}yO?G^MMf
zD4zp3iZTnl*i1vF4&8PQMcF4`I<r!gS^U!pgmkExQXNG(1@N*cv*6`%SGg}vUYmr3
z%~k*_2%9tGgGlCxj50{rd;ka%HvD4Wd%n+*u=(nfL{3TA@R(g4r`<pU*>1oH0~sr9
zc)*J_k?u&?{8MteYao*gn{K5>#2mde_cQXDrIdsKpD75NfqIYF_|vsEqndXDz)<sK
z!e+m{QglhfArC~&x3pwean=kgGbt2}&bwZOID}bqxNLNu<#@q`rkwl&71i4>rS*!V
z^M&CfiK<1(0Vs9bzLG$Nj?SkowWC_OS09b&tN@^lj_LX?qP_3700DGMHjfwR%qS+~
zGkMf=ZmYEnZid^J0zi?_xBfGUXXk4(P(Sls2B<<AHSOgaRsY-FnJAU43{plZA>k4^
zwSQVQ6rsyquS^#xEhtv-J;#k_HOb13u{+SHHaZ^G!+n?P3>wM<&TGiFK`l$7^n1W;
z0NuKvAQ)u!AoE7v+8QBtz##7)kJT-@fo-KZ5gE@nlp`N}hAluvaw@X}v;|iaWhH@1
z<wg9OZLfjg7Zu?Wd1k;qGrlCLdN^(nO4RDz;rytrFhGC*Pu$AbD=JiDzh{6@+@Y^9
zc$Mlow|MPDPgF0SGk95JLEtjvuQIQ^P{dm%z^N4?j>hY9OG=QH<8Gfo5zk~cKoLjK
zMYk_;8?XH1jv}7fA8ac{9PJSMG>_|M$nK3o_K?{@7I6gc(`PUD<or)-(NG>a01i+z
zZLsg+AKbd<S*MW?$1~f250%Bi^v`FwOTYAd8YQNFcE;$Q>NCs+MN|h5`tRg&zF&4d
zQrzeAvHlO$-df;O6nWs`@q#<=@0*R%NzZHo=_FkPY2@a|$>{ezrUr{A*!S1gIQ13=
ztSk_=t_~wNyjJ~*=DGI_40;}10m<l)K8?thUJ>|<d2I#)ZAd}!-b>IKVIgF0i|xK>
zCcB~sDD+E-tZQU%p4FCUqJQp$v{1toz$%J_N#!b$DW9s>L+%~TpuoLA)n(GUxB=<*
zvcYE5JAPxpFvz(ooG25{zWT^bTD_t^Dl(p_a9AiZ5>a;Iz7v)}?)UBY1X&));K6bM
zq44#>T4a9deihK|SkEBgb^u}4w0md-*;(#Wca$7&m<=E~MA9spe}jEIq2(l0B1jq#
zhY}$XX9n-KJ7P)stxqbWsy>y0qpHpTQ`0YVNAk0Ck;B)fgI#bq&}Q)WHa+I*^jFnD
zdGd+DL!OAN8L&$A3%B!bjq%8Mdy|rJi8MQTPwf#!lG@h5U+noGgGbS3kt+t2BkA=Y
ze1oEWn?Wi?o892kjV@%f=k_mAwCkGT=(1?D=%3b}=W;`u&p{)-jX^7DhSf`h$+B6S
z)BMGnb?rvQ2=xI5gG2+rI%C;)*U=DL#scS4QVl#Pr)sA?XmabyfMl|PNBrDngB#L5
zeH>s$t{YUU5i#~3mcKxr+sZ(|a|*)2mgW(w>)vTJs)U~yG*m)G<?z#)poNHD+o5n+
z*<jjk?tOA#RJCeo*6xvAZLTnwu8_~#c~2g3HOIvzpkiBMH2@T6?PbF=Ny1laZlbES
zmO&LdYkyBFM5Pt4I}cH+um<eOD78BKiuB1Gj^8|}M}GWHptM6yspsRfT)^WCml36U
zqXD2uY0|=}q)fR!eG#Q4460B{<$_B^jqjb;8&P_}AZ3*12C1WU7w?`!Q6eoSZxAS*
zkW)Ih{U@$$glQ9^G_fTB6e%qmIEf6nQe^?6^q4^vO6ecJ*G-Fgt?Pv-#k2x@GD`Gi
zT=bjMvvLB&%Pzc(+wh^!k1k3DUXVQXuSYIT8mUM1Y*0R|hk7PeQY1>c{92wwA2?A9
z&By1N4PZDasEAsmRU-d;_V^1lAJ_f_ww2~%WW36@3M4KhMvI!?P-X{cey(|eRYdor
zs*ttSxtj=nR1q$bXS79wHzmomxCL8L?{xbN=SRJh0e-dn75DANerHho>cIe&dMATF
zGw&JqzjevssCO=B@Uq^?fK{q@T<;Gx!coL?8L&dck*e6!A)oWVezz2gcq3I`DaBC4
z5j4tjhx=h%N>3DVQWl^}5l1`ReEN**`KDDz6td;a4zh?N_#YQ8a9@x7@*Yw|j~IB-
z6w$&F+qn73=RA-P8>@i__)u999VdIZ+m}|)LyBlLvq2FR(Y2||xb{aqdm%-1oY|)+
zis;tOo7}jnTTY{Ns-OWjkWSJykQBz#_%7s<sdWfaM6(&NvLf14wIP}BS-&@G=6^9T
zXy!0JN=6$sqKc^Xk-aDd>z9KQLkf!bUV_F|4JE5*eI1PEfO!=F3Uh!&)+JAtlxRnM
zyLF$A%Ku{qtEh-3YdlGOtvxf5dz*WJMQ|@rb(y?9vH>|XI{5?Y9hnRmCRJC36BUtH
zW-+p^`RhMWkvYd8p~y%?Sz&Rge|r*DZOlVt*|#Db8!Q)4L>7lHiQn|L7`h!P3=(b!
z5N1s`4ya5z)c0|q<k-q=0LdYeX3@=N)g-fz{(Bach>r{!N`yq5(H851za*bGhm=88
zU0VrwqpHpT<8NQ%vU0pVki*A&0T3Jxv>E)j(X_PXR`o!6@+X6bJP}zlV3q0wxBcLh
zPsn(%w~}#*G%GCn=o%4wy?LKew1+Tw6m1syIl4SKbZcG*6zzQsQYqT(23@B#B8ht@
zr=n<=@`0nvqRpbe>h+YnpMGEhQbe5@w1OhK+c=0EO=$l&Qbdaw3{ph=>UKfBKOjZq
z?F;8qQbatc^X+1IzD{L8!sN!jNZ}DRkJM;_6w!<TFe6t)IfEQA?d%68BG27tAmBL#
zMRc%w<=C0ccCJE|ut5zl2$fK#h)86W=s7p)`J%ye1cOx^OqY}}yhgKj=fy+z3xnwm
z`K*2L=iS`s13vpvv7N940E)AAkGvUV#mG&)P#!;GP=(Ig2OiyQT6Ncyx`-0D6zs_;
zjrcA&I*pVYjwp2<`0j*2>A9Rz+^+4MH9M;pqBLCx07Xil`Ya)P=Kl5xQF_Us3Z=yV
z6ihr9Q4LXwO9Xo|O7xrGqQ@2s`wGd2<6Zpz5GduzUZ{Al4<k#}U*Z=k6TgP_aQUUO
zgC_VLM@o~r>mM6XC(mUzfKFaeK0G{BfgGwo!GVU!5%FMKX_!RDU-T+N0tRi`g*wIW
z%nr~gT+Nl056x!zlKT&apF(i&Zg2pAOXQhBd|hw}^49!11~r-#296pH18m{Q<!Ve=
zPzu%h9~q!hqhauma&x&~--aJUjpjXrmo*v&tWxE2aSK|cqjKN6JCIh0I2yr&`V=F1
z5es5a#Ah=bpok;r*sx>V#PM8p6!EqFz_wDv(GEwazv8s}wwO`KO818yAY@|15q!<+
z)m&Ui?h7Oz5*T>V<in;dKXGM>#V<xa{GQnce5foRxOXeKGgaPfM)Kh?vq2H%L$g`)
zxZhW|{1wTEr~%-PqR0mikFDIQRe8B6oyIepKsrg+KpMwmRwj^(2~$oY`EZH>E6a!b
z+r!Ak%}pMl&R0bX2ZzoFvzBCZ*sz{t|J$EhpcEX)pdkgtERdjkb1IV55?QBF(|ni=
zhM{RnWL>j7KT)?n`KBBy|E=|aRg@1`eLYFpho@H~_s(HZ;9j8WGTCQIB<UGlB@*?H
zTMQV+I#-2bUZ|W|l;@9%Oq~=sEEE}uC_9B$+|-qf9T;s!mTe3kEEkXunI&tGBbkG@
zq1$nYLBj0-!mR0s|I{SSbu+$4$x${Hjt<Eol4jBOqx{M5*B9Z}<nas|N`yq58N}Zt
zw<NpAwY!X}`Z5NNsyYLVI`9v-wQH|5<nW)3aB6Tk&}Q%<FRyX!YF0`^dE%1}c*qlx
zH3L?uK62eV&whf8r!ZhJE|F%Za9cn)S(mXFzqs4V;8C<$WV1_d#C!Fl%P86(7^G6P
z*$vhY4<qW230N$)F#&H`v{`hQnvc1YCF<`*@?kcER*(-nVuHw57naUN^5IVggX9Ch
zy4;<dGe|zvGQ;_l<O2_CmwnYAO>PzjB$E$3qW9v1`A9xYwu2eDe25)5JZ5xGubSwd
z++rZ$IR*Jpw_nBBR;7K{qDolL0S2KG%H+e?L6xKDYJbDu>EmFqvcWVbgNqI;<?e}Q
z?FBdYToVS<NyTN~ywt4CT5k5_ypyQdzPb$n#aa8#hQ8#7e-|D_RcqiKKovS`Cr{nl
zbjs!Yu_$7@7^I9+qE}qBs^6JVMCsY(=!XKOMRH1~#!lhZaKB$glp?MIK#|f(pRwfM
zaaYPCN{bm(p_H2TTGBM&&$v~H(t8Feqm+=45Pj>%PU8@z$19G#6ew+!Q)<{TlN)mN
zrxS?Mk(B^Yq_ktm67qJ#;ZcZEpKL%CN~v7%<fc*8dQ3o+b}&d8rFL_ZqkDDjR|P4e
zl__)Hx+oQRp%RzZjD!ZQeU0i_oitbv^-QXyNR)KzQkwiR<%k6hC$pFhU^pqLh^}^Z
zC%Wkk_o4at9s^aHkCE|4jmnTVowJvr`M9wGP7lq`H7~G=h<~Bd>6zDU1fQS?m&h~P
zVr0Z8PJg{+In+DPGH}#88Q`Q&H@KlKLyw{M<u?e94ebkhCxbt8{0_Hd{^4b)ccwCU
zS?^@PD%Dl4;~oQ^=C?6mg@_|n!7Cy!-N2?O;`z)5DB=h@=i(jC^I6#)DB>rrKwK%}
zXoud<-*A@>-n5~R)wjV8vWO#i`SNGENbQS?ND+-<;6+nJM;{*MR`)2Mjc&~eW*hLK
zvLZ5E-osTJ{o^2{h$`EGMiCX!CiiTvnd)R7Qbby2pQ0!t`faJ)<`y5aP&%z=Hi2}K
zu7RX57LJG~@6T-<f)vqP2CS@zF4hVrF9v6JM9n<f0S<s>4p%@jY97&zy!l~m1C)X@
z7&N4yc<&|Xj4%zk&^@yvnggzl0K+f`NMv2|q+6FbLVtk#AS(aiBLS<Zh#o{%CTHJw
z?Tp+zjzNKYfvU^on$@*PNbY|8t4e-nz%Z%0Dx9c@61_^3Ul#iYqassv6dV?cj6{?b
z7W~f^HQ0CL8R}Yz3?3{OP(+Kr^(G6_66c`Xv57&#?Eu28>DPnH(|19pY?K`DnGGO0
zMA9reA)yxGyr*_RCF0A`KpaYhM4ZtUL49J#4>vcTL{)tz14mVz0rre~!fkF_bsTc|
zwh3St91gS@eBQWR&ewbCM3g6*iGYVZ5m_@}{=1H|%Jw*pjQ3)|U|b^23X7-l4al~T
z##2zVS2K7NZ5H{ay*u#<%=AXle$F73qRnm)vN4!cAJcmpigxo!z*`n=7G1Oczg*`#
zwe}%Jl*ynK6p_!*fuuuz#YadHontUa5%H@Be{$0zMO1AvoKHy+@t|^}whcs+TN(or
zCO7tl3XfRzD7zR^MD3@68Mz{Q`p6pN(Rd*)qAM8)cuqkP-CX4vTWQU$XQ&cBWzbLw
zWr}Fn!K%?043=m_FKRj*R&g-BJ>ZvkG;2Tjd49>i_+P+vSUziO7tP?bGe-P@imm%m
z04UDdlWIREe)$PTl*hvuRH3u>fk*wD{;~Y^C`9QrgOpLCKdT-c(Wmu!M5%1MfgS>-
zi*icaM_RdYTef3Gr0)O#MM@;pMtYBLWk8h9GpIr-(H~cD8u866KSZf^N3bWO<dr!-
zI`glCV~~8P9-ZMUP`XvT;7_Rh-I0)O!~83w%YPyU*2CqO$_|>~@9i!}x>cQc9d+`C
zv9JMj@`CaqXYUte-SPYF(J=W9163L(k#YVfnFsuOZxiYi7nvQPQ@ENdmJjaaMD>*)
z5j^lqI6J^4^2{J^bT3Kb^aluPG-d{l8Vv)i+wdRm?ZJ+JqFTS70Ya^ZM#JF09QK&Y
z9`bY=YBZ%=!?|URh5@To54l;#F8HB{$1`Ash@%ml|9-4iwkIo5#FsD|pok;r+s?PS
zBOUYJqlmk8f`cnX9PJQ%>KVs{-ra#hmd@+|Arm8x;4hw^<Hj3nc0lss1OqRce5lrT
z2Y2GiikrxXo}GaQ_)u9s%rVX3T1W1ii{yir*`SE>VO81`&Zmv;A(9V2F#8lmKJ4GO
zgbNydYd%V+7tAJ*PSQ1y#&KJ~*W}v_pQT7XH17hgP?irJE{2g0{VW-1W|+jlp!2~M
zkc|HLeM{0UYe_Cj!P5*HQc%nS3A#5Yko>i@d?ad`>I671G);-DYnFd`y$uO|K64f7
zXo(C~Q9e{ORVM9Ae0v?acRPau_X1UyN&el!#&vDoQ12+!1C9%0ovXr$@?pexWyojG
z`9Y}2bY+lGWF(^O6drKCJ*k_t{T;Hrgu#R50`ej1y)TLUwFmyqdk+{S+zuejnr>Xd
zlSE`a`x+%jgPuSdl0ziTqWSN~syn$-4k{7D88nm#i8wQeM~rDfvPX{TjH>z(29Bya
z1N`1Smoq&mI}bU0V?Q`GI2>p*_$KqOaDDwEcA`8f-XHLgCn9SGOuzk_{JQ@${=H~j
z888@^NV8M;`zI0PkVpIt6z!!99z~l)_RiLj?MpM%s21ihNTq1A8#Eu-l#D4}DgZ@0
zVgT@#MVm!WdT@)osyh50$%l~)x)AvgaHQQ`Bp;437@2$+cJ`_X$%pb<IA3A%q1Uu+
zL(t^bn*k}xhrdU^T7=}ov}7<Nmk*O4Ib!CfzF&_#_ke+b=M?0_<MWkbUthf47*)au
zJs5;aD3cG}?|DW?yt?)X(HqNPWrJx#!sO@=pI$CUv-Yc!`)de;X^wo>uG+RMS7&9f
zZm8ILmj;01ti2}ANRAzeT7&X<JcAPQnErZ7gQ)@f8!(>q$2pot?izXtQM$|^Wt8Ry
zO^rSiduc7AloJ&bCQ$k)r*!9FXKvBV&$SVyjOGAPq%?7yja;esH~z+0zZQThloEYM
z)THO2HYX6JkqlBs$?a%X^t}?*1|v!hTm9Nxpp;cY_S)-z`nH^|X2y6#DVqa;BBi)1
z14!+P&+&JEx)VVC2c_^P`|@fhB1(x2QbvjX2CwLN%RmiML>0Xkb1q5+UZ~9SiY3;5
ztyQR=W%|H+sAp0oMWTdPMD}^f>(FrWme~M?6D38|^P3p_EoPg2!M4(TjEwV&=+*u6
z?@;qA?gu+S^K;D$1r^bhYn_iGco#*uM4r(W{Lk#~e3$e$>Ya-jIO?4Y@X6!H+`f>J
z38;5IWPs4Vpm#F(-PfORrbk!sFMMoR4QR`HCj(Zg9&@^uAGf23k6^$G5l5<`mG@_E
z>ekM$QN)ij8=#0I=)|*^xh11j(MS<pt_25Iia6R~|LN!4sGg<VQOKfd!w#~DBlz9F
ze&!}_&K!jlQ5FL)nj(6Wyq~+)qhv+o!<)=D;6r6abYb{XuH)+Ufk+Y62?iQPR75jM
z&E>wBzx)nTM0RGMq9~%NQ-0to@A;xU8pRGVn?O2A*FaJjeRSQ(-r3y)kRsC50Vc|d
zsLY2@5}Z4z0&3>n85lHkxB`;VS3d2D*NsYjPztVK(2#=Sy_cX3_Ii;fk5=_XbHJxi
zI4{fr5?Pl#;a{lyTkraLRQ`K}0aj5FMdwx~FQ@!l7rFN!g97&gRhP;4-nGaJE)usx
zO#?6qld7x2F^cHV?Kcdl$aG_nP-G;ctgzsJ8AFEJQVz`)OBp;^E})2l{Q^j6zhm{$
z?YPe%;dTIF)^tvWKN<at&tE7xLK^~UNDh%Si}pKSgQVQOyc?B>;S3r|ghZUt7W^-7
zyWb)oza~Gzz)@9afH8-2x$QIB&qNN-kAzc$!+|z~Pn+|E8|7K`ca$dyO#lyhBC=+{
zDpd|=f3@KfGQN@lgK>#8D=hdIDoZAvpMawMl)<BDv&ho(+=w+`4}PK2yeS-8DcbA?
zBdgUVB!1ZsDB2U54P?<~(cH}E+^<<tZ;&Fo!k`rt(MNj_8Qf*;0HlZlqu|I$5%H`0
zeQ~!rQba~(wUQ#@L9=VwAEU`_Cj$~DH}-`Jk9bkce;-mr2U@|Q<cet2OMA@tm)`hy
z_Iq&v0naHYB3oMJ*y;5na#1BrV$e_tWs0cYH&vpGeILIG(c8pe6$jHlR({(H&DtHd
zcIYS!rmN($cJsL%xYAE=;P2<%zYPG2vv%}n8(DDen|3IVt=j=r=&Vh@rl4w-*XARl
zbeciRD5+E{qJLW#SsdBwcwE!TMTxN`XfQ4P8*;*LpWdfk(AqCJDjK!zZ6^RJv~8zu
zb0QVuH}P;pOo^bYbsF&}r~kOE4d|ghbrLqIrKzd$L!0n_9F_fT)HSwj<DTy>orRir
z=igupp?Qm22!G-+h5or2Pu|+zhCjz5*!P9Nc=0Lx0aj1Xy+3n{1BdQM9)7{V6g_-?
I*Ke`^2S<Phk^lez

literal 0
HcmV?d00001

diff --git a/logs/second_run_logs/run-2024-08-25--19-05-27/run_info.txt b/logs/second_run_logs/run-2024-08-25--19-05-27/run_info.txt
new file mode 100644
index 0000000..a22fb47
--- /dev/null
+++ b/logs/second_run_logs/run-2024-08-25--19-05-27/run_info.txt
@@ -0,0 +1,138 @@
+Run Name: run-2024-08-25--19-05-27
+Model: MultiTaskTimeSformer
+Training Arguments:
+  output_dir: ./results/run-2024-08-25--19-05-27
+  overwrite_output_dir: False
+  do_train: False
+  do_eval: True
+  do_predict: False
+  eval_strategy: steps
+  prediction_loss_only: False
+  per_device_train_batch_size: 16
+  per_device_eval_batch_size: 16
+  per_gpu_train_batch_size: None
+  per_gpu_eval_batch_size: None
+  gradient_accumulation_steps: 2
+  eval_accumulation_steps: None
+  eval_delay: 0
+  learning_rate: 5e-05
+  weight_decay: 0.01
+  adam_beta1: 0.9
+  adam_beta2: 0.999
+  adam_epsilon: 1e-08
+  max_grad_norm: 1.0
+  num_train_epochs: 3.0
+  max_steps: 420
+  lr_scheduler_type: linear
+  lr_scheduler_kwargs: {}
+  warmup_ratio: 0.1
+  warmup_steps: 0
+  log_level: passive
+  log_level_replica: warning
+  log_on_each_node: True
+  logging_dir: ./logs/run-2024-08-25--19-05-27
+  logging_strategy: steps
+  logging_first_step: False
+  logging_steps: 20
+  logging_nan_inf_filter: True
+  save_strategy: steps
+  save_steps: 100
+  save_total_limit: 2
+  save_safetensors: True
+  save_on_each_node: False
+  save_only_model: False
+  restore_callback_states_from_checkpoint: False
+  no_cuda: False
+  use_cpu: False
+  use_mps_device: False
+  seed: 42
+  data_seed: None
+  jit_mode_eval: False
+  use_ipex: False
+  bf16: False
+  fp16: True
+  fp16_opt_level: O1
+  half_precision_backend: auto
+  bf16_full_eval: False
+  fp16_full_eval: False
+  tf32: None
+  local_rank: 0
+  ddp_backend: None
+  tpu_num_cores: None
+  tpu_metrics_debug: False
+  debug: []
+  dataloader_drop_last: False
+  eval_steps: 50
+  dataloader_num_workers: 12
+  dataloader_prefetch_factor: None
+  past_index: -1
+  run_name: run-2024-08-25--19-05-27
+  disable_tqdm: False
+  remove_unused_columns: True
+  label_names: None
+  load_best_model_at_end: True
+  metric_for_best_model: f1
+  greater_is_better: True
+  ignore_data_skip: False
+  fsdp: []
+  fsdp_min_num_params: 0
+  fsdp_config: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+  fsdp_transformer_layer_cls_to_wrap: None
+  accelerator_config: AcceleratorConfig(split_batches=False, dispatch_batches=None, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False)
+  deepspeed: None
+  label_smoothing_factor: 0.0
+  optim: adamw_torch
+  optim_args: None
+  adafactor: False
+  group_by_length: False
+  length_column_name: length
+  report_to: ['tensorboard']
+  ddp_find_unused_parameters: None
+  ddp_bucket_cap_mb: None
+  ddp_broadcast_buffers: None
+  dataloader_pin_memory: True
+  dataloader_persistent_workers: False
+  skip_memory_metrics: True
+  use_legacy_prediction_loop: False
+  push_to_hub: False
+  resume_from_checkpoint: None
+  hub_model_id: None
+  hub_strategy: every_save
+  hub_token: None
+  hub_private_repo: False
+  hub_always_push: False
+  gradient_checkpointing: False
+  gradient_checkpointing_kwargs: None
+  include_inputs_for_metrics: False
+  eval_do_concat_batches: True
+  fp16_backend: auto
+  evaluation_strategy: None
+  push_to_hub_model_id: None
+  push_to_hub_organization: None
+  push_to_hub_token: None
+  mp_parameters: 
+  auto_find_batch_size: False
+  full_determinism: False
+  torchdynamo: None
+  ray_scope: last
+  ddp_timeout: 1800
+  torch_compile: False
+  torch_compile_backend: None
+  torch_compile_mode: None
+  dispatch_batches: None
+  split_batches: None
+  include_tokens_per_second: False
+  include_num_input_tokens_seen: False
+  neftune_noise_alpha: None
+  optim_target_modules: None
+  batch_eval_metrics: False
+  eval_on_start: False
+  distributed_state: Distributed environment: NO
+Num processes: 1
+Process index: 0
+Local process index: 0
+Device: cuda
+
+  _n_gpu: 1
+  __cached__setup_devices: cuda:0
+  deepspeed_plugin: None
diff --git a/notebooks/data_prep.ipynb b/notebooks/data_prep.ipynb
new file mode 100644
index 0000000..110b2e5
--- /dev/null
+++ b/notebooks/data_prep.ipynb
@@ -0,0 +1,473 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e006d00d0980cdb6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import json\n",
+    "from pathlib import Path\n",
+    "import torch\n",
+    "from torch.utils.data import Dataset\n",
+    "import torchvision.transforms as transforms\n",
+    "from torchvision.io import read_video\n",
+    "\n",
+    "\n",
+    "class VideoNormalize(torch.nn.Module):\n",
+    "    def __init__(self, mean, std):\n",
+    "        super().__init__()\n",
+    "        self.mean = torch.tensor(mean).view(3, 1, 1, 1)\n",
+    "        self.std = torch.tensor(std).view(3, 1, 1, 1)\n",
+    "\n",
+    "    def forward(self, video):\n",
+    "        return (video - self.mean) / self.std\n",
+    "\n",
+    "\n",
+    "class VideoDataset(Dataset):\n",
+    "    def __init__(self, root_dir, split, transform=None, clip_duration=5.0, target_fps=30):\n",
+    "        self.root_dir = Path(root_dir) / split\n",
+    "        self.transform = transform\n",
+    "        self.clip_duration = clip_duration\n",
+    "        self.target_fps = target_fps\n",
+    "        self.target_frames = int(clip_duration * target_fps)\n",
+    "        self.video_files = []\n",
+    "        self.labels = {}\n",
+    "\n",
+    "        # Load labels from labels.json\n",
+    "        labels_path = self.root_dir / 'labels.json'\n",
+    "        with open(labels_path, 'r') as f:\n",
+    "            self.labels = json.load(f)\n",
+    "\n",
+    "        # Collect video file paths\n",
+    "        self.video_files = list(self.root_dir.glob('*.avi'))\n",
+    "\n",
+    "    def __len__(self):\n",
+    "        return len(self.video_files)\n",
+    "\n",
+    "    def __getitem__(self, idx):\n",
+    "        video_path = str(self.video_files[idx])\n",
+    "        video_name = self.video_files[idx].name\n",
+    "        label = self.labels[video_name]['graininess']\n",
+    "\n",
+    "        # Read video using torchvision\n",
+    "        video, audio, meta = read_video(video_path, pts_unit='sec')\n",
+    "\n",
+    "        # Extract frame rate from metadata\n",
+    "        fps = meta['video_fps']\n",
+    "\n",
+    "        # Calculate the number of frames to sample based on the clip duration and video's fps\n",
+    "        num_frames_to_sample = min(int(self.clip_duration * fps), video.shape[0])\n",
+    "\n",
+    "        # Sample frames\n",
+    "        if num_frames_to_sample < video.shape[0]:\n",
+    "            start_idx = torch.randint(0, video.shape[0] - num_frames_to_sample + 1, (1,)).item()\n",
+    "            video = video[start_idx:start_idx + num_frames_to_sample]\n",
+    "\n",
+    "        # Resample to target FPS\n",
+    "        if fps != self.target_fps:\n",
+    "            indices = torch.linspace(0, video.shape[0] - 1, self.target_frames).long()\n",
+    "            video = video[indices]\n",
+    "\n",
+    "        # Ensure we have exactly target_frames\n",
+    "        if video.shape[0] < self.target_frames:\n",
+    "            video = torch.cat([video, video[-1].unsqueeze(0).repeat(self.target_frames - video.shape[0], 1, 1, 1)])\n",
+    "        elif video.shape[0] > self.target_frames:\n",
+    "            video = video[:self.target_frames]\n",
+    "\n",
+    "        # Change from (T, H, W, C) to (C, T, H, W)\n",
+    "        video = video.permute(3, 0, 1, 2)\n",
+    "\n",
+    "        if self.transform:\n",
+    "            video = self.transform(video)\n",
+    "\n",
+    "        return video, torch.tensor(label, dtype=torch.long)\n",
+    "\n",
+    "\n",
+    "# Example usage\n",
+    "transform = transforms.Compose([\n",
+    "    transforms.Lambda(lambda x: x.float() / 255.0),  # Normalize to [0, 1]\n",
+    "    VideoNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n",
+    "])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "a21a7b0a8e86913c",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-19T16:31:42.133858Z",
+     "start_time": "2024-08-19T16:31:42.128809Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# Path to the dataset\n",
+    "data_root = Path('/Users/sv7/Projects/mtl-video-classification/data/graininess_100_balanced_subset_split')\n",
+    "\n",
+    "train_dataset = VideoDataset(data_root,\n",
+    "                             split='train',\n",
+    "                             transform=transform)\n",
+    "\n",
+    "test_dataset = VideoDataset(data_root,\n",
+    "                            split='test',\n",
+    "                            transform=transform)\n",
+    "\n",
+    "val_dataset = VideoDataset(data_root,\n",
+    "                           split='val',\n",
+    "                           transform=transform)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "id": "a9092ed9c5027597",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-19T16:31:42.761488Z",
+     "start_time": "2024-08-19T16:31:42.759166Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# DataLoader example\n",
+    "from torch.utils.data import DataLoader\n",
+    "import os\n",
+    "\n",
+    "batch_size = 4\n",
+    "num_workers = os.cpu_count()\n",
+    "\n",
+    "train_loader = DataLoader(train_dataset,\n",
+    "                          batch_size=batch_size,\n",
+    "                          shuffle=True,\n",
+    "                          num_workers=num_workers)\n",
+    "\n",
+    "test_loader = DataLoader(test_dataset,\n",
+    "                         batch_size=batch_size,\n",
+    "                         shuffle=False,\n",
+    "                         num_workers=num_workers)\n",
+    "\n",
+    "val_loader = DataLoader(val_dataset,\n",
+    "                        batch_size=batch_size,\n",
+    "                        shuffle=False,\n",
+    "                        num_workers=num_workers)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 26,
+   "id": "77d2d43a9fe4c2c2",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-19T16:55:37.595972Z",
+     "start_time": "2024-08-19T16:55:36.873079Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "import json\n",
+    "from pathlib import Path\n",
+    "\n",
+    "train_data_path = Path('/Users/sv7/Projects/mtl-video-classification/data/graininess_100_balanced_subset_split/train')\n",
+    "labels_path = train_data_path / 'labels.json'\n",
+    "\n",
+    "# /Users/sv7/Projects/mtl-video-classification/data/graininess_100_balanced_subset_split/train/labels.json\n",
+    "video_files = list(train_data_path.glob('*.avi'))\n",
+    "with open(labels_path) as f:\n",
+    "    labels = json.load(f)\n",
+    "\n",
+    "video_path = str(video_files[5])\n",
+    "video, audio, meta = read_video(video_path, pts_unit='sec')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 32,
+   "id": "f7d927a0c9c73948",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-19T16:57:53.317039Z",
+     "start_time": "2024-08-19T16:57:53.314764Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "clip_duration = 5.0\n",
+    "\n",
+    "# Extract frame rate from metadata\n",
+    "fps = meta['video_fps']\n",
+    "\n",
+    "# Calculate the number of frames to sample based on the clip duration and video's fps\n",
+    "num_frames_to_sample = min(int(clip_duration * fps), video.shape[0])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 37,
+   "id": "b2c6a74027e9f3",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-19T17:13:49.049139Z",
+     "start_time": "2024-08-19T17:13:49.046501Z"
+    }
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "300"
+      ]
+     },
+     "execution_count": 37,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "num_frames_to_sample"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 41,
+   "id": "4d960113bee6e247",
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-19T18:51:49.590079Z",
+     "start_time": "2024-08-19T18:51:19.547632Z"
+    }
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Some weights of VivitForVideoClassification were not initialized from the model checkpoint at google/vivit-b-16x2-kinetics400 and are newly initialized because the shapes did not match:\n",
+      "- classifier.weight: found shape torch.Size([400, 768]) in the checkpoint and torch.Size([2, 768]) in the model instantiated\n",
+      "- classifier.bias: found shape torch.Size([400]) in the checkpoint and torch.Size([2]) in the model instantiated\n",
+      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
+      "/Users/sv7/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/feature_extraction_utils.py:142: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/utils/tensor_new.cpp:281.)\n",
+      "  return torch.tensor(value)\n"
+     ]
+    },
+    {
+     "ename": "RuntimeError",
+     "evalue": "MPS backend out of memory (MPS allocated: 17.77 GB, other allocations: 40.66 MB, max allowed: 18.13 GB). Tried to allocate 1.76 GB on private pool. Use PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 to disable upper limit for memory allocations (may cause system failure).",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[41], line 124\u001b[0m\n\u001b[1;32m    116\u001b[0m trainer \u001b[38;5;241m=\u001b[39m Trainer(\n\u001b[1;32m    117\u001b[0m     model\u001b[38;5;241m=\u001b[39mmodel,\n\u001b[1;32m    118\u001b[0m     args\u001b[38;5;241m=\u001b[39mtraining_args,\n\u001b[1;32m    119\u001b[0m     train_dataset\u001b[38;5;241m=\u001b[39mtrain_dataset,\n\u001b[1;32m    120\u001b[0m     eval_dataset\u001b[38;5;241m=\u001b[39mval_dataset,\n\u001b[1;32m    121\u001b[0m )\n\u001b[1;32m    123\u001b[0m \u001b[38;5;66;03m# Cell 8: Train the model\u001b[39;00m\n\u001b[0;32m--> 124\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    126\u001b[0m \u001b[38;5;66;03m# Cell 9: Evaluate on test set\u001b[39;00m\n\u001b[1;32m    127\u001b[0m test_results \u001b[38;5;241m=\u001b[39m trainer\u001b[38;5;241m.\u001b[39mevaluate(test_dataset)\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/trainer.py:1948\u001b[0m, in \u001b[0;36mTrainer.train\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[1;32m   1946\u001b[0m         hf_hub_utils\u001b[38;5;241m.\u001b[39menable_progress_bars()\n\u001b[1;32m   1947\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1948\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43minner_training_loop\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1949\u001b[0m \u001b[43m        \u001b[49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1950\u001b[0m \u001b[43m        \u001b[49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1951\u001b[0m \u001b[43m        \u001b[49m\u001b[43mtrial\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrial\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1952\u001b[0m \u001b[43m        \u001b[49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1953\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/trainer.py:2289\u001b[0m, in \u001b[0;36mTrainer._inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m   2286\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcallback_handler\u001b[38;5;241m.\u001b[39mon_step_begin(args, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol)\n\u001b[1;32m   2288\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maccelerator\u001b[38;5;241m.\u001b[39maccumulate(model):\n\u001b[0;32m-> 2289\u001b[0m     tr_loss_step \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtraining_step\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   2291\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m   2292\u001b[0m     args\u001b[38;5;241m.\u001b[39mlogging_nan_inf_filter\n\u001b[1;32m   2293\u001b[0m     \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_torch_xla_available()\n\u001b[1;32m   2294\u001b[0m     \u001b[38;5;129;01mand\u001b[39;00m (torch\u001b[38;5;241m.\u001b[39misnan(tr_loss_step) \u001b[38;5;129;01mor\u001b[39;00m torch\u001b[38;5;241m.\u001b[39misinf(tr_loss_step))\n\u001b[1;32m   2295\u001b[0m ):\n\u001b[1;32m   2296\u001b[0m     \u001b[38;5;66;03m# if loss is nan or inf simply add the average of previous logged losses\u001b[39;00m\n\u001b[1;32m   2297\u001b[0m     tr_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m tr_loss \u001b[38;5;241m/\u001b[39m (\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mglobal_step \u001b[38;5;241m-\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_globalstep_last_logged)\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/trainer.py:3328\u001b[0m, in \u001b[0;36mTrainer.training_step\u001b[0;34m(self, model, inputs)\u001b[0m\n\u001b[1;32m   3325\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m loss_mb\u001b[38;5;241m.\u001b[39mreduce_mean()\u001b[38;5;241m.\u001b[39mdetach()\u001b[38;5;241m.\u001b[39mto(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[1;32m   3327\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcompute_loss_context_manager():\n\u001b[0;32m-> 3328\u001b[0m     loss \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompute_loss\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   3330\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m inputs\n\u001b[1;32m   3331\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m   3332\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mtorch_empty_cache_steps \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   3333\u001b[0m     \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mglobal_step \u001b[38;5;241m%\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mtorch_empty_cache_steps \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m   3334\u001b[0m ):\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/trainer.py:3373\u001b[0m, in \u001b[0;36mTrainer.compute_loss\u001b[0;34m(self, model, inputs, return_outputs)\u001b[0m\n\u001b[1;32m   3371\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m   3372\u001b[0m     labels \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m-> 3373\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   3374\u001b[0m \u001b[38;5;66;03m# Save past state if it exists\u001b[39;00m\n\u001b[1;32m   3375\u001b[0m \u001b[38;5;66;03m# TODO: this needs to be fixed and made cleaner later.\u001b[39;00m\n\u001b[1;32m   3376\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mpast_index \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1551\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1553\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1560\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1561\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m   1565\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.py:759\u001b[0m, in \u001b[0;36mVivitForVideoClassification.forward\u001b[0;34m(self, pixel_values, head_mask, labels, output_attentions, output_hidden_states, interpolate_pos_encoding, return_dict)\u001b[0m\n\u001b[1;32m    673\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m    674\u001b[0m \u001b[38;5;124;03mlabels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\u001b[39;00m\n\u001b[1;32m    675\u001b[0m \u001b[38;5;124;03m    Labels for computing the image classification/regression loss. Indices should be in `[0, ...,\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    755\u001b[0m \u001b[38;5;124;03mLABEL_116\u001b[39;00m\n\u001b[1;32m    756\u001b[0m \u001b[38;5;124;03m```\"\"\"\u001b[39;00m\n\u001b[1;32m    757\u001b[0m return_dict \u001b[38;5;241m=\u001b[39m return_dict \u001b[38;5;28;01mif\u001b[39;00m return_dict \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_return_dict\n\u001b[0;32m--> 759\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvivit\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    760\u001b[0m \u001b[43m    \u001b[49m\u001b[43mpixel_values\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    761\u001b[0m \u001b[43m    \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    762\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    763\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    764\u001b[0m \u001b[43m    \u001b[49m\u001b[43minterpolate_pos_encoding\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minterpolate_pos_encoding\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    765\u001b[0m \u001b[43m    \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    766\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    768\u001b[0m sequence_output \u001b[38;5;241m=\u001b[39m outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m    770\u001b[0m logits \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclassifier(sequence_output[:, \u001b[38;5;241m0\u001b[39m, :])\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1551\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1553\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1560\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1561\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m   1565\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.py:611\u001b[0m, in \u001b[0;36mVivitModel.forward\u001b[0;34m(self, pixel_values, head_mask, output_attentions, output_hidden_states, interpolate_pos_encoding, return_dict)\u001b[0m\n\u001b[1;32m    607\u001b[0m head_mask \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mget_head_mask(head_mask, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mnum_hidden_layers)\n\u001b[1;32m    609\u001b[0m embedding_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membeddings(pixel_values, interpolate_pos_encoding\u001b[38;5;241m=\u001b[39minterpolate_pos_encoding)\n\u001b[0;32m--> 611\u001b[0m encoder_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    612\u001b[0m \u001b[43m    \u001b[49m\u001b[43membedding_output\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    613\u001b[0m \u001b[43m    \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    614\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    615\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    616\u001b[0m \u001b[43m    \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    617\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    618\u001b[0m sequence_output \u001b[38;5;241m=\u001b[39m encoder_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m    619\u001b[0m sequence_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlayernorm(sequence_output)\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1551\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1553\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1560\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1561\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m   1565\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.py:378\u001b[0m, in \u001b[0;36mVivitEncoder.forward\u001b[0;34m(self, hidden_states, head_mask, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m    371\u001b[0m     layer_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_gradient_checkpointing_func(\n\u001b[1;32m    372\u001b[0m         layer_module\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__call__\u001b[39m,\n\u001b[1;32m    373\u001b[0m         hidden_states,\n\u001b[1;32m    374\u001b[0m         layer_head_mask,\n\u001b[1;32m    375\u001b[0m         output_attentions,\n\u001b[1;32m    376\u001b[0m     )\n\u001b[1;32m    377\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 378\u001b[0m     layer_outputs \u001b[38;5;241m=\u001b[39m \u001b[43mlayer_module\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    380\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m layer_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m    382\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1551\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1553\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1560\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1561\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m   1565\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.py:321\u001b[0m, in \u001b[0;36mVivitLayer.forward\u001b[0;34m(self, hidden_states, head_mask, output_attentions)\u001b[0m\n\u001b[1;32m    320\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, hidden_states, head_mask\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, output_attentions\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m):\n\u001b[0;32m--> 321\u001b[0m     self_attention_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    322\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;66;43;03m# in Vivit, layernorm is applied before self-attention\u001b[39;49;00m\n\u001b[1;32m    323\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlayernorm_before\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    324\u001b[0m \u001b[43m        \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    325\u001b[0m \u001b[43m        \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    326\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    327\u001b[0m     attention_output \u001b[38;5;241m=\u001b[39m self_attention_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m    328\u001b[0m     \u001b[38;5;66;03m# add self attentions if we output attention weights\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1551\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1553\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1560\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1561\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m   1565\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.py:265\u001b[0m, in \u001b[0;36mVivitAttention.forward\u001b[0;34m(self, hidden_states, head_mask, output_attentions)\u001b[0m\n\u001b[1;32m    259\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[1;32m    260\u001b[0m     \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m    261\u001b[0m     hidden_states: torch\u001b[38;5;241m.\u001b[39mTensor,\n\u001b[1;32m    262\u001b[0m     head_mask: Optional[torch\u001b[38;5;241m.\u001b[39mTensor] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m    263\u001b[0m     output_attentions: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m    264\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Union[Tuple[torch\u001b[38;5;241m.\u001b[39mTensor, torch\u001b[38;5;241m.\u001b[39mTensor], Tuple[torch\u001b[38;5;241m.\u001b[39mTensor]]:\n\u001b[0;32m--> 265\u001b[0m     self_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattention\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    267\u001b[0m     attention_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput(self_outputs[\u001b[38;5;241m0\u001b[39m], hidden_states)\n\u001b[1;32m    269\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m (attention_output,) \u001b[38;5;241m+\u001b[39m self_outputs[\u001b[38;5;241m1\u001b[39m:]  \u001b[38;5;66;03m# add attentions if we output them\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1551\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1553\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/torch/nn/modules/module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1560\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1561\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m   1565\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
+      "File \u001b[0;32m~/opt/anaconda3/envs/video_classification/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.py:188\u001b[0m, in \u001b[0;36mVivitSelfAttention.forward\u001b[0;34m(self, hidden_states, head_mask, output_attentions)\u001b[0m\n\u001b[1;32m    185\u001b[0m query_layer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtranspose_for_scores(mixed_query_layer)\n\u001b[1;32m    187\u001b[0m \u001b[38;5;66;03m# Take the dot product between \"query\" and \"key\" to get the raw attention scores.\u001b[39;00m\n\u001b[0;32m--> 188\u001b[0m attention_scores \u001b[38;5;241m=\u001b[39m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmatmul\u001b[49m\u001b[43m(\u001b[49m\u001b[43mquery_layer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkey_layer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtranspose\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    190\u001b[0m attention_scores \u001b[38;5;241m=\u001b[39m attention_scores \u001b[38;5;241m/\u001b[39m math\u001b[38;5;241m.\u001b[39msqrt(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mattention_head_size)\n\u001b[1;32m    192\u001b[0m \u001b[38;5;66;03m# Normalize the attention scores to probabilities.\u001b[39;00m\n",
+      "\u001b[0;31mRuntimeError\u001b[0m: MPS backend out of memory (MPS allocated: 17.77 GB, other allocations: 40.66 MB, max allowed: 18.13 GB). Tried to allocate 1.76 GB on private pool. Use PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 to disable upper limit for memory allocations (may cause system failure)."
+     ]
+    }
+   ],
+   "source": [
+    "# Cell 1: Import necessary libraries\n",
+    "import os\n",
+    "import json\n",
+    "import random\n",
+    "import numpy as np\n",
+    "import torch\n",
+    "from torch.utils.data import Dataset, DataLoader\n",
+    "from torchvision.io import read_video\n",
+    "from transformers import VivitImageProcessor, VivitForVideoClassification, TrainingArguments, Trainer\n",
+    "\n",
+    "\n",
+    "# Cell 2: Set random seed for reproducibility\n",
+    "def set_seed(seed):\n",
+    "    random.seed(seed)\n",
+    "    np.random.seed(seed)\n",
+    "    torch.manual_seed(seed)\n",
+    "    torch.cuda.manual_seed_all(seed)\n",
+    "\n",
+    "\n",
+    "set_seed(42)\n",
+    "\n",
+    "\n",
+    "# Cell 3: Define custom dataset class\n",
+    "# Cell 3: Define custom dataset class\n",
+    "class VideoDataset(Dataset):\n",
+    "    def __init__(self, data_dir, split, processor, max_frames=32):\n",
+    "        self.data_dir = os.path.join(data_dir, split)\n",
+    "        self.processor = processor\n",
+    "        self.max_frames = max_frames\n",
+    "        \n",
+    "        with open(os.path.join(self.data_dir, 'labels.json'), 'r') as f:\n",
+    "            self.labels = json.load(f)\n",
+    "        \n",
+    "        self.video_files = list(self.labels.keys())\n",
+    "    \n",
+    "    def __len__(self):\n",
+    "        return len(self.video_files)\n",
+    "    \n",
+    "    def __getitem__(self, idx):\n",
+    "        video_file = self.video_files[idx]\n",
+    "        video_path = os.path.join(self.data_dir, video_file)\n",
+    "        \n",
+    "        # Read video\n",
+    "        video, _, _ = read_video(video_path, pts_unit='sec')\n",
+    "        \n",
+    "        # Sample frames\n",
+    "        num_frames = video.shape[0]\n",
+    "        if num_frames > self.max_frames:\n",
+    "            start = random.randint(0, num_frames - self.max_frames)\n",
+    "            video = video[start:start+self.max_frames]\n",
+    "        else:\n",
+    "            video = video[:self.max_frames]\n",
+    "        \n",
+    "        # Ensure we have 3 channels (RGB)\n",
+    "        if video.shape[-1] != 3:\n",
+    "            video = video.expand(-1, -1, -1, 3)\n",
+    "        \n",
+    "        # Convert to numpy array and ensure correct shape\n",
+    "        video = video.numpy()\n",
+    "        \n",
+    "        # Ensure the video has the correct shape (num_frames, height, width, channels)\n",
+    "        if video.shape[1] == 3:  # If channels are in the second dimension\n",
+    "            video = np.transpose(video, (0, 2, 3, 1))\n",
+    "        \n",
+    "        # Process frames\n",
+    "        pixel_values = self.processor(\n",
+    "            list(video),\n",
+    "            return_tensors=\"pt\",\n",
+    "            do_resize=True,\n",
+    "            size={\"shortest_edge\": 224},  # Adjust this size as needed\n",
+    "            do_center_crop=True,\n",
+    "            crop_size={\"height\": 224, \"width\": 224},  # Adjust this size as needed\n",
+    "        ).pixel_values\n",
+    "        \n",
+    "        # Get label\n",
+    "        label = self.labels[video_file]['graininess']\n",
+    "        \n",
+    "        return {'pixel_values': pixel_values.squeeze(), 'label': torch.tensor(label)}\n",
+    "\n",
+    "\n",
+    "# Cell 4: Initialize ViViT model and processor\n",
+    "model_name = \"google/vivit-b-16x2-kinetics400\"\n",
+    "processor = VivitImageProcessor.from_pretrained(model_name,\n",
+    "                                                ignore_mismatched_sizes=True)\n",
+    "model = VivitForVideoClassification.from_pretrained(model_name, num_labels=2,\n",
+    "                                                    ignore_mismatched_sizes=True)\n",
+    "\n",
+    "# Cell 5: Prepare datasets and dataloaders\n",
+    "data_dir = \"/Users/sv7/Projects/mtl-video-classification/data/graininess_100_balanced_subset_split\"\n",
+    "batch_size = 4\n",
+    "\n",
+    "train_dataset = VideoDataset(data_dir, 'train', processor)\n",
+    "val_dataset = VideoDataset(data_dir, 'val', processor)\n",
+    "test_dataset = VideoDataset(data_dir, 'test', processor)\n",
+    "\n",
+    "train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n",
+    "val_dataloader = DataLoader(val_dataset, batch_size=batch_size)\n",
+    "test_dataloader = DataLoader(test_dataset, batch_size=batch_size)\n",
+    "\n",
+    "# Cell 6: Define training arguments\n",
+    "training_args = TrainingArguments(\n",
+    "    output_dir=\"./results\",\n",
+    "    num_train_epochs=3,\n",
+    "    per_device_train_batch_size=batch_size,\n",
+    "    per_device_eval_batch_size=batch_size,\n",
+    "    warmup_steps=500,\n",
+    "    weight_decay=0.01,\n",
+    "    logging_dir='./logs',\n",
+    "    logging_steps=10,\n",
+    "    eval_strategy=\"epoch\",\n",
+    "    save_strategy=\"epoch\",\n",
+    "    load_best_model_at_end=True,\n",
+    ")\n",
+    "\n",
+    "# Cell 7: Define Trainer\n",
+    "trainer = Trainer(\n",
+    "    model=model,\n",
+    "    args=training_args,\n",
+    "    train_dataset=train_dataset,\n",
+    "    eval_dataset=val_dataset,\n",
+    ")\n",
+    "\n",
+    "# Cell 8: Train the model\n",
+    "trainer.train()\n",
+    "\n",
+    "# Cell 9: Evaluate on test set\n",
+    "test_results = trainer.evaluate(test_dataset)\n",
+    "print(test_results)\n",
+    "\n",
+    "# Cell 10: Save the model\n",
+    "model.save_pretrained(\"./vivit_graininess_classifier\")\n",
+    "processor.save_pretrained(\"./vivit_graininess_classifier\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c239dc3cc6e29490",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "# Cell 11: Inference example\n",
+    "def predict_video(video_path):\n",
+    "    video, _, _ = read_video(video_path, pts_unit='sec')\n",
+    "    inputs = processor(list(video.permute(0, 2, 3, 1).numpy()), return_tensors=\"pt\")\n",
+    "\n",
+    "    with torch.no_grad():\n",
+    "        outputs = model(**inputs)\n",
+    "        logits = outputs.logits\n",
+    "        predicted_class = logits.argmax(-1).item()\n",
+    "\n",
+    "    return \"Grainy\" if predicted_class == 1 else \"Not Grainy\"\n",
+    "\n",
+    "\n",
+    "# Example usage\n",
+    "example_video_path = \"path/to/example/video.avi\"\n",
+    "prediction = predict_video(example_video_path)\n",
+    "print(f\"The video is predicted to be: {prediction}\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/subset_for_patching.ipynb b/notebooks/subset_for_patching.ipynb
new file mode 100644
index 0000000..b5852cf
--- /dev/null
+++ b/notebooks/subset_for_patching.ipynb
@@ -0,0 +1,293 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "id": "initial_id",
+   "metadata": {
+    "collapsed": true,
+    "ExecuteTime": {
+     "end_time": "2024-08-23T23:46:55.159025Z",
+     "start_time": "2024-08-23T23:46:55.155910Z"
+    }
+   },
+   "source": [
+    "files_to_use = ['Tennis_1920x1080_24fps_8bit_420_Motion_QP47_SFB_1.avi',\n",
+    "                'Tennis_1920x1080_24fps_8bit_420_Motion_QP32_BT_1.avi',\n",
+    "                'DanceKiss_1920x1080_25fps_8bit_420_Dark_QP47_FB_4.avi',\n",
+    "                'DanceKiss_1920x1080_25fps_8bit_420_Dark_QP32_SB_4.avi',\n",
+    "                'Kimono1_1920x1080_24fps_8bit_420_graininess_QP47_B_4.avi',\n",
+    "                'Kimono1_1920x1080_24fps_8bit_420_graininess_QP32_FB_1.avi',\n",
+    "                'OldTownCross_1920x1080_25fps_8bit_420_graininess_QP47_SB_4.avi',\n",
+    "                'OldTownCross_1920x1080_25fps_8bit_420_graininess_QP32_SBT_2.avi',\n",
+    "                'BirdsInCage_1920x1080_30fps_8bit_420_Pristine_QP47_SFB_3.avi',\n",
+    "                'BirdsInCage_1920x1080_30fps_8bit_420_Pristine_QP32_FBT_1.avi',\n",
+    "                'ElFuente1_1920x1080_30fps_8bit_420_aliasing_QP47_SFB_1.avi',\n",
+    "                'ElFuente1_1920x1080_30fps_8bit_420_aliasing_QP32_FB_4.avi',\n",
+    "                'ElFuente2_1920x1080_30fps_8bit_420_graininess_QP47_SFB_3.avi',\n",
+    "                'ElFuente2_1920x1080_30fps_8bit_420_graininess_QP32_S_2.avi',\n",
+    "                'BQTerrace_1920x1080_30fps_8bit_420_aliasing_QP47_FB_3.avi',\n",
+    "                'BQTerrace_1920x1080_30fps_8bit_420_aliasing_QP32_SF_4.avi',\n",
+    "                'CrowdRun_1920x1080_25fps_8bit_420_aliasing_QP47_SFT_4.avi',\n",
+    "                'CrowdRun_1920x1080_25fps_8bit_420_aliasing_QP32_SF_1.avi',\n",
+    "                'Seeking_1920x1080_25fps_8bit_420_graininess_QP47_SF_2.avi',\n",
+    "                'Seeking_1920x1080_25fps_8bit_420_graininess_QP32_SFT_1.avi',\n",
+    "                'riverbed_1920x1080_25fps_8bit_420_banding_QP47_SFBT_2.avi',\n",
+    "                'riverbed_1920x1080_25fps_8bit_420_banding_QP32_S_3.avi',\n",
+    "                'station_1920x1080_30fps_8bit_420_graininess_QP47_SBT_2.avi',\n",
+    "                'station_1920x1080_30fps_8bit_420_graininess_QP32_SB_1.avi',\n",
+    "                'shields_1280x720_50fps_8bit_420_graininess_QP47_SBT_3.avi',\n",
+    "                'shields_1280x720_50fps_8bit_420_graininess_QP32_SFBT_2.avi']"
+   ],
+   "outputs": [],
+   "execution_count": 1
+  },
+  {
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-23T23:47:04.814760Z",
+     "start_time": "2024-08-23T23:47:04.812533Z"
+    }
+   },
+   "cell_type": "code",
+   "source": "from pathlib import Path",
+   "id": "f68ef83150ac3734",
+   "outputs": [],
+   "execution_count": 2
+  },
+  {
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-23T23:50:25.116050Z",
+     "start_time": "2024-08-23T23:50:25.090048Z"
+    }
+   },
+   "cell_type": "code",
+   "source": [
+    "dataset_path = Path('/Volumes/SSD/BVIArtefact')\n",
+    "\n",
+    "parts = ['part1', 'part2']\n",
+    "\n",
+    "# file paths of all files in files_to_use in part1 and part2\n",
+    "file_paths = []\n",
+    "for part in parts:\n",
+    "    file_path = dataset_path / part\n",
+    "    all_files = list(file_path.glob('*.avi'))\n",
+    "    for file in all_files:\n",
+    "        if file.name in files_to_use:\n",
+    "            file_paths.append(file)    "
+   ],
+   "id": "fdfacf937f9f286e",
+   "outputs": [],
+   "execution_count": 3
+  },
+  {
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-23T23:50:36.713565Z",
+     "start_time": "2024-08-23T23:50:36.711235Z"
+    }
+   },
+   "cell_type": "code",
+   "source": "len(file_paths)",
+   "id": "b4c910a7e71b9503",
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "26"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "execution_count": 5
+  },
+  {
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-23T23:51:31.282402Z",
+     "start_time": "2024-08-23T23:51:05.913927Z"
+    }
+   },
+   "cell_type": "code",
+   "source": [
+    "# copy files to a new folder\n",
+    "import shutil\n",
+    "\n",
+    "new_folder = Path('/Volumes/SSD/BVIArtefact/subset_for_patching')\n",
+    "new_folder.mkdir(exist_ok=True)\n",
+    "for file in file_paths:\n",
+    "    shutil.copy(file, new_folder)"
+   ],
+   "id": "fa2b07cf8f56b3c6",
+   "outputs": [],
+   "execution_count": 6
+  },
+  {
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-23T23:53:20.804168Z",
+     "start_time": "2024-08-23T23:53:20.793023Z"
+    }
+   },
+   "cell_type": "code",
+   "source": [
+    "# copy labels of files in file from /Volumes/SSD/BVIArtefact/processed_labels.json to /Volumes/SSD/BVIArtefact/subset_for_patching\n",
+    "import json\n",
+    "\n",
+    "with open(dataset_path / 'processed_labels.json', 'r') as f:\n",
+    "    labels = json.load(f)\n",
+    "    \n",
+    "new_labels = {}\n",
+    "for file in file_paths:\n",
+    "    new_labels[file.name] = labels[file.name]\n",
+    "    \n",
+    "with open(new_folder / 'labels.json', 'w') as f:\n",
+    "    json.dump(new_labels, f)"
+   ],
+   "id": "3ab6eaf72d2ebf1c",
+   "outputs": [],
+   "execution_count": 7
+  },
+  {
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2024-08-24T00:02:44.629506Z",
+     "start_time": "2024-08-24T00:02:44.547315Z"
+    }
+   },
+   "cell_type": "code",
+   "source": [
+    "import os\n",
+    "import random\n",
+    "\n",
+    "# Paths (Assuming the script is in the same directory as the dataset)\n",
+    "dataset_dir = '/Volumes/SSD/subsets/subset_for_patching'\n",
+    "labels_file = os.path.join(dataset_dir, 'labels.json')\n",
+    "\n",
+    "# Load the labels\n",
+    "with open(labels_file, 'r') as f:\n",
+    "    labels = json.load(f)\n",
+    "\n",
+    "# Split ratios\n",
+    "train_ratio = 0.7\n",
+    "val_ratio = 0.15\n",
+    "test_ratio = 0.15\n",
+    "\n",
+    "# Ensure the output directories exist\n",
+    "train_dir = os.path.join(dataset_dir, 'train')\n",
+    "val_dir = os.path.join(dataset_dir, 'val')\n",
+    "test_dir = os.path.join(dataset_dir, 'test')\n",
+    "\n",
+    "os.makedirs(train_dir, exist_ok=True)\n",
+    "os.makedirs(val_dir, exist_ok=True)\n",
+    "os.makedirs(test_dir, exist_ok=True)\n",
+    "\n",
+    "# Get list of all video files\n",
+    "video_files = [f for f in os.listdir(dataset_dir) if f.endswith('.avi')]\n",
+    "\n",
+    "# Shuffle the dataset\n",
+    "random.shuffle(video_files)\n",
+    "\n",
+    "# Calculate the split indices\n",
+    "train_idx = int(len(video_files) * train_ratio)\n",
+    "val_idx = train_idx + int(len(video_files) * val_ratio)\n",
+    "\n",
+    "# Split the files\n",
+    "train_files = video_files[:train_idx]\n",
+    "val_files = video_files[train_idx:val_idx]\n",
+    "test_files = video_files[val_idx:]\n",
+    "\n",
+    "# Helper function to move files and save labels\n",
+    "def move_files_and_save_labels(files, destination_dir, label_dict):\n",
+    "    dest_labels = {}\n",
+    "    for file in files:\n",
+    "        # Skip hidden files or files not present in the label_dict\n",
+    "        if file not in label_dict:\n",
+    "            print(f\"Skipping {file} as it is not found in labels.json\")\n",
+    "            continue\n",
+    "        src_path = os.path.join(dataset_dir, file)\n",
+    "        dest_path = os.path.join(destination_dir, file)\n",
+    "        shutil.move(src_path, dest_path)\n",
+    "        dest_labels[file] = label_dict[file]\n",
+    "    \n",
+    "    # Save the labels file\n",
+    "    labels_file_path = os.path.join(destination_dir, 'labels.json')\n",
+    "    with open(labels_file_path, 'w') as f:\n",
+    "        json.dump(dest_labels, f, indent=4)\n",
+    "\n",
+    "# Move the files and save the corresponding labels\n",
+    "move_files_and_save_labels(train_files, train_dir, labels)\n",
+    "move_files_and_save_labels(val_files, val_dir, labels)\n",
+    "move_files_and_save_labels(test_files, test_dir, labels)\n",
+    "\n",
+    "print(\"Dataset has been reorganized successfully!\")"
+   ],
+   "id": "9b909bde7c2e0915",
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Skipping ._Kimono1_1920x1080_24fps_8bit_420_graininess_QP32_FB_1.avi as it is not found in labels.json\n",
+      "Skipping ._ElFuente1_1920x1080_30fps_8bit_420_aliasing_QP32_FB_4.avi as it is not found in labels.json\n",
+      "Skipping ._BQTerrace_1920x1080_30fps_8bit_420_aliasing_QP32_SF_4.avi as it is not found in labels.json\n",
+      "Skipping ._Seeking_1920x1080_25fps_8bit_420_graininess_QP47_SF_2.avi as it is not found in labels.json\n",
+      "Skipping ._BirdsInCage_1920x1080_30fps_8bit_420_Pristine_QP32_FBT_1.avi as it is not found in labels.json\n",
+      "Skipping ._riverbed_1920x1080_25fps_8bit_420_banding_QP32_S_3.avi as it is not found in labels.json\n",
+      "Skipping ._station_1920x1080_30fps_8bit_420_graininess_QP32_SB_1.avi as it is not found in labels.json\n",
+      "Skipping ._shields_1280x720_50fps_8bit_420_graininess_QP32_SFBT_2.avi as it is not found in labels.json\n",
+      "Skipping ._DanceKiss_1920x1080_25fps_8bit_420_Dark_QP32_SB_4.avi as it is not found in labels.json\n",
+      "Skipping ._DanceKiss_1920x1080_25fps_8bit_420_Dark_QP47_FB_4.avi as it is not found in labels.json\n",
+      "Skipping ._riverbed_1920x1080_25fps_8bit_420_banding_QP47_SFBT_2.avi as it is not found in labels.json\n",
+      "Skipping ._Seeking_1920x1080_25fps_8bit_420_graininess_QP32_SFT_1.avi as it is not found in labels.json\n",
+      "Skipping ._BQTerrace_1920x1080_30fps_8bit_420_aliasing_QP47_FB_3.avi as it is not found in labels.json\n",
+      "Skipping ._shields_1280x720_50fps_8bit_420_graininess_QP47_SBT_3.avi as it is not found in labels.json\n",
+      "Skipping ._BirdsInCage_1920x1080_30fps_8bit_420_Pristine_QP47_SFB_3.avi as it is not found in labels.json\n",
+      "Skipping ._Tennis_1920x1080_24fps_8bit_420_Motion_QP32_BT_1.avi as it is not found in labels.json\n",
+      "Skipping ._ElFuente1_1920x1080_30fps_8bit_420_aliasing_QP47_SFB_1.avi as it is not found in labels.json\n",
+      "Skipping ._OldTownCross_1920x1080_25fps_8bit_420_graininess_QP47_SB_4.avi as it is not found in labels.json\n",
+      "Skipping ._ElFuente2_1920x1080_30fps_8bit_420_graininess_QP32_S_2.avi as it is not found in labels.json\n",
+      "Skipping ._CrowdRun_1920x1080_25fps_8bit_420_aliasing_QP32_SF_1.avi as it is not found in labels.json\n",
+      "Skipping ._ElFuente2_1920x1080_30fps_8bit_420_graininess_QP47_SFB_3.avi as it is not found in labels.json\n",
+      "Skipping ._Kimono1_1920x1080_24fps_8bit_420_graininess_QP47_B_4.avi as it is not found in labels.json\n",
+      "Skipping ._Tennis_1920x1080_24fps_8bit_420_Motion_QP47_SFB_1.avi as it is not found in labels.json\n",
+      "Dataset has been reorganized successfully!\n"
+     ]
+    }
+   ],
+   "execution_count": 10
+  },
+  {
+   "metadata": {},
+   "cell_type": "code",
+   "outputs": [],
+   "execution_count": null,
+   "source": "",
+   "id": "e52181730c5b3138"
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..74a4f29
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,12 @@
+albumentations==1.4.14
+av==12.3.0
+datasets==2.20.0
+numpy==2.1.0
+opencv_python==4.10.0.84
+paramiko==3.4.1
+scikit_learn==1.5.1
+torch==2.4.0
+torchmetrics==1.4.1
+torchvision==0.19.0
+tqdm==4.66.5
+transformers==4.44.0
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/data_prep_utils/__init__.py b/src/data_prep_utils/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/data_prep_utils/data_setup.py b/src/data_prep_utils/data_setup.py
new file mode 100644
index 0000000..2122d13
--- /dev/null
+++ b/src/data_prep_utils/data_setup.py
@@ -0,0 +1,31 @@
+"""
+Contains functionality for creating PyTorch DataLoaders
+"""
+
+import os
+
+from torchvision import datasets, transforms
+from torch.utils.data import DataLoader
+
+NUM_WORKERS = os.cpu_count()
+
+
+def create_dataloaders(
+        train_dir: str,
+        test_dir: str,
+        transform: transforms.Compose,
+        batch_size: int,
+        num_workers: int = NUM_WORKERS
+):
+    # todo: implement
+    # return train_dataloader, test_dataloader, class_names
+    pass
+
+
+"""
+Usage:
+from going_modular import data_setup
+
+# Create train/test dataloader and get class names as a list
+train_dataloader, test_dataloader, class_names = data_setup.create_dataloaders(...
+"""
diff --git a/src/data_prep_utils/preprocess.py b/src/data_prep_utils/preprocess.py
new file mode 100644
index 0000000..bdee0ea
--- /dev/null
+++ b/src/data_prep_utils/preprocess.py
@@ -0,0 +1,92 @@
+import os
+import torch
+import random
+import json
+from torchvision.io import read_video
+from transformers import VideoMAEImageProcessor
+from pathlib import Path
+
+# Load the VideoMAE image processor
+model_ckpt = "MCG-NJU/videomae-base"
+image_processor = VideoMAEImageProcessor.from_pretrained(model_ckpt,
+                                                         do_rescale=False)
+
+
+def random_spatio_temporal_crop(video, num_frames=16, height=224, width=224):
+    T, H, W, C = video.shape
+
+    # Random temporal crop
+    start_frame = random.randint(0, T - num_frames)
+    video = video[start_frame:start_frame + num_frames]
+
+    # Random spatial crop
+    if H > height and W > width:
+        top = random.randint(0, H - height)
+        left = random.randint(0, W - width)
+        video = video[:, top:top + height, left:left + width, :]
+    else:
+        video = torch.nn.functional.interpolate(video.permute(0, 3, 1, 2), size=(height, width)).permute(0, 2, 3, 1)
+
+    return video
+
+
+def preprocess_video(video_path, num_crops=6, num_frames=16, height=224, width=224):
+    video, _, _ = read_video(video_path, pts_unit="sec")
+    video = video.float() / 255.0  # Normalize to [0, 1]
+
+    crops = []
+    for _ in range(num_crops):
+        crop = random_spatio_temporal_crop(video, num_frames, height, width)
+        # Apply VideoMAE preprocessing
+        crop = image_processor(list(crop.permute(0, 3, 1, 2)), return_tensors="pt")["pixel_values"]
+        crops.append(crop.squeeze(0))  # Remove batch dimension
+
+    return torch.stack(crops)  # Stack all crops
+
+
+def main():
+    dataset_root_path = "/Volumes/SSD/BVIArtefact"
+    output_root_path = "/Volumes/SSD/BVIArtefact_preprocessed"
+    os.makedirs(output_root_path, exist_ok=True)
+
+    # Load original labels
+    with open(os.path.join(dataset_root_path, "processed_labels.json"), "r") as f:
+        original_labels = json.load(f)
+
+    # New labels dictionary
+    new_labels = {}
+
+    # Process videos
+    for part in ["part1", "part2"]:
+        part_dir = os.path.join(dataset_root_path, part)
+        for video_name in os.listdir(part_dir):
+            if video_name.endswith('.avi'):
+                video_path = os.path.join(part_dir, video_name)
+
+                if video_name in original_labels:
+                    try:
+                        preprocessed_crops = preprocess_video(video_path)
+
+                        # Save preprocessed video crops
+                        output_name = f"{Path(video_name).stem}_crops.pt"
+                        output_path = os.path.join(output_root_path, output_name)
+                        torch.save(preprocessed_crops, output_path)
+
+                        # Add to new labels dictionary
+                        new_labels[output_name] = original_labels[video_name]
+
+                        print(f"Processed {video_name}")
+                    except Exception as e:
+                        print(f"Error processing {video_name}: {str(e)}")
+                else:
+                    print(f"Skipping {video_name} - not found in labels")
+
+    # Save the new labels
+    with open(os.path.join(output_root_path, "preprocessed_labels.json"), "w") as f:
+        json.dump(new_labels, f)
+
+    print("Preprocessing complete.")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/src/data_prep_utils/resize_bvi_artefact.py b/src/data_prep_utils/resize_bvi_artefact.py
new file mode 100644
index 0000000..ca8d064
--- /dev/null
+++ b/src/data_prep_utils/resize_bvi_artefact.py
@@ -0,0 +1,108 @@
+# resize_bvi_artefact.py
+
+import multiprocessing
+import os
+import re
+import shutil
+
+import ffmpeg
+from tqdm import tqdm
+
+
+def resize_video(input_path, output_path, width=224, height=224):
+    try:
+        (
+            ffmpeg
+            .input(input_path)
+            .filter('scale', width, height)
+            .output(output_path)
+            .overwrite_output()
+            .run(capture_stdout=True, capture_stderr=True)
+        )
+        return None  # Success
+    except ffmpeg.Error as e:
+        return f"Error processing {input_path}: {e.stderr.decode()}"
+
+
+def get_new_filename(old_filename, width, height):
+    pattern = r'(.+)_(\d+x\d+)_(\d+fps)_(.+)\.avi'
+    match = re.match(pattern, old_filename)
+
+    if match:
+        video_name, old_resolution, fps, rest = match.groups()
+        return f"{video_name}_{old_resolution}_to_{width}x{height}_{fps}_{rest}.avi"
+    else:
+        name, ext = os.path.splitext(old_filename)
+        return f"{name}_to_{width}x{height}{ext}"
+
+
+def process_video(args):
+    input_path, output_dir, relative_path, width, height = args
+    file = os.path.basename(input_path)
+    new_filename = get_new_filename(file, width, height)
+    output_path = os.path.join(output_dir, relative_path, new_filename)
+
+    os.makedirs(os.path.dirname(output_path), exist_ok=True)
+    return resize_video(input_path, output_path, width, height)
+
+
+def preprocess_dataset(input_dir, output_dir, width=560, height=560, num_processes=None):
+    if num_processes is None:
+        num_processes = multiprocessing.cpu_count()
+
+    video_files = []
+    for part in ['part1', 'part2']:
+        part_dir = os.path.join(input_dir, part)
+        print(f"Searching for videos in: {part_dir}")
+        if not os.path.exists(part_dir):
+            print(f"Directory not found: {part_dir}")
+            continue
+        for root, _, files in os.walk(part_dir):
+            for file in files:
+                if file.endswith('.avi'):
+                    relative_path = os.path.relpath(root, input_dir)
+                    input_path = os.path.join(root, file)
+                    video_files.append((input_path, output_dir, relative_path, width, height))
+
+    print(f"Found {len(video_files)} video files to process.")
+
+    if not video_files:
+        print("No video files found. Please check the input directory.")
+        return
+
+    with multiprocessing.Pool(processes=num_processes) as pool:
+        results = list(tqdm(pool.imap(process_video, video_files), total=len(video_files), desc="Processing videos"))
+
+    # Print any errors that occurred
+    errors = [error for error in results if error is not None]
+    for error in errors:
+        print(error)
+
+    # Copy json files to the output directory
+    json_files = ['labels.json', 'processed_labels.json', 'subsets.json']
+    for json_file in json_files:
+        src = os.path.join(input_dir, json_file)
+        dst = os.path.join(output_dir, json_file)
+        if os.path.exists(src):
+            shutil.copy2(src, dst)
+        else:
+            print(f"Warning: {json_file} not found in {input_dir}")
+
+    print(f"Preprocessing completed! Processed {len(video_files)} videos with {len(errors)} errors.")
+
+
+if __name__ == "__main__":
+    input_dir = "/Volumes/SSD/BVIArtefact"
+    output_dir = "/Volumes/SSD/preprocessed_BVIArtefact"
+
+    # Get the full path of the current script
+    script_dir = os.path.dirname(os.path.abspath(__file__))
+
+    # Construct full paths for input and output directories
+    input_dir = os.path.join(script_dir, input_dir)
+    output_dir = os.path.join(script_dir, output_dir)
+
+    print(f"Input directory: {input_dir}")
+    print(f"Output directory: {output_dir}")
+
+    preprocess_dataset(input_dir, output_dir)
diff --git a/src/data_prep_utils/split_dataset.py b/src/data_prep_utils/split_dataset.py
new file mode 100644
index 0000000..9bb7311
--- /dev/null
+++ b/src/data_prep_utils/split_dataset.py
@@ -0,0 +1,92 @@
+import random
+import os
+import json
+import shutil
+from collections import defaultdict
+from pathlib import Path
+
+
+def split_dataset(preprocessed_dir, train_ratio=0.7, val_ratio=0.15, test_ratio=0.15):
+    # Load labels
+    with open(os.path.join(preprocessed_dir, 'preprocessed_labels.json'), 'r') as f:
+        labels = json.load(f)
+
+    # Group crops by artifacts
+    artifact_crops = defaultdict(lambda: {'positive': set(), 'negative': set()})
+    for crop, artifacts in labels.items():
+        for artifact, value in artifacts.items():
+            if value == 1:
+                artifact_crops[artifact]['positive'].add(crop)
+            else:
+                artifact_crops[artifact]['negative'].add(crop)
+
+    # Find the minimum number of crops for any artifact
+    min_pos = min(len(crops['positive']) for crops in artifact_crops.values())
+    min_neg = min(len(crops['negative']) for crops in artifact_crops.values())
+    min_crops = min(min_pos, min_neg) * 2  # Ensure balance between positive and negative
+
+    # Calculate the number of crops for each split
+    train_size = int(min_crops * train_ratio)
+    val_size = int(min_crops * val_ratio)
+    test_size = min_crops - train_size - val_size
+
+    splits = {'train': set(), 'val': set(), 'test': set()}
+    split_artifacts = {split: defaultdict(lambda: {'positive': set(), 'negative': set()}) for split in splits}
+
+    # Distribute crops ensuring balance for each artifact in each split
+    for split, size in [('train', train_size), ('val', val_size), ('test', test_size)]:
+        pos_count = size // 2
+        neg_count = size - pos_count
+
+        for artifact, crops in artifact_crops.items():
+            pos_crops = list(crops['positive'])
+            neg_crops = list(crops['negative'])
+            random.shuffle(pos_crops)
+            random.shuffle(neg_crops)
+
+            for _ in range(pos_count):
+                if pos_crops:
+                    crop = pos_crops.pop()
+                    if crop not in splits['train'] and crop not in splits['val'] and crop not in splits['test']:
+                        splits[split].add(crop)
+                        split_artifacts[split][artifact]['positive'].add(crop)
+
+            for _ in range(neg_count):
+                if neg_crops:
+                    crop = neg_crops.pop()
+                    if crop not in splits['train'] and crop not in splits['val'] and crop not in splits['test']:
+                        splits[split].add(crop)
+                        split_artifacts[split][artifact]['negative'].add(crop)
+
+    # Create directories and move crops
+    preprocessed_dir_path = Path(preprocessed_dir)
+    data_split_path = preprocessed_dir_path.parent / str(preprocessed_dir_path.name + "_split")
+
+    for split, crops in splits.items():
+        os.makedirs(data_split_path / split, exist_ok=True)
+        split_labels = {}
+        for crop in crops:
+            src = os.path.join(preprocessed_dir, crop)
+            dst = os.path.join(data_split_path, split, crop)
+            shutil.copy(src, dst)  # Use copy instead of move to preserve original data
+            split_labels[crop] = labels[crop]
+        with open(os.path.join(data_split_path, split, 'labels.json'), 'w') as f:
+            json.dump(split_labels, f, indent=2)
+
+    print("Dataset split complete")
+    print(f"Train set: {len(splits['train'])} crops")
+    print(f"Validation set: {len(splits['val'])} crops")
+    print(f"Test set: {len(splits['test'])} crops")
+
+    # Print balance information for each artifact in each split
+    for split in splits:
+        print(f"\n{split.capitalize()} set balance:")
+        for artifact in artifact_crops:
+            pos = len(split_artifacts[split][artifact]['positive'])
+            neg = len(split_artifacts[split][artifact]['negative'])
+            print(f"  {artifact}: Positive: {pos}, Negative: {neg}")
+
+
+if __name__ == "__main__":
+    preprocessed_dir = "/Volumes/SSD/BVIArtefact_crops"  # Update this to your preprocessed dataset path
+    split_dataset(preprocessed_dir)
diff --git a/src/data_prep_utils/subset_and_process.py b/src/data_prep_utils/subset_and_process.py
new file mode 100644
index 0000000..d733275
--- /dev/null
+++ b/src/data_prep_utils/subset_and_process.py
@@ -0,0 +1,274 @@
+import os
+import json
+import random
+from collections import Counter
+
+import torch
+import cv2
+import numpy as np
+from pathlib import Path
+from tqdm import tqdm
+import argparse
+from sklearn.model_selection import train_test_split
+
+# Argument parser
+parser = argparse.ArgumentParser(description='Preprocess BVIArtefact dataset')
+parser.add_argument('--input_dir', type=str, default="/Volumes/SSD/BVIArtefact",
+                    help='Input directory containing BVIArtefact dataset')
+parser.add_argument('--output_dir', type=str, default="/Volumes/SSD/BVIArtefact_8_crops_all_videos",
+                    help='Output directory for preprocessed data')
+parser.add_argument('--num_samples', type=int, default=None, help='Number of videos to sample (None for all)')
+parser.add_argument('--crop_size', type=int, default=224, help='Size of spatial crop')
+parser.add_argument('--num_frames', type=int, default=8, help='Number of frames to extract')
+parser.add_argument('--crops_per_video', type=int, default=4, help='Number of crops to extract per video')
+parser.add_argument('--train_ratio', type=float, default=0.7, help='Ratio of videos for training set')
+parser.add_argument('--val_ratio', type=float, default=0.15, help='Ratio of videos for validation set')
+args = parser.parse_args()
+
+# Configuration
+INPUT_DIR = args.input_dir
+OUTPUT_DIR = args.output_dir
+LABELS_FILE = os.path.join(INPUT_DIR, "labels.json")
+CROP_SIZE = (args.crop_size, args.crop_size)
+NUM_FRAMES = args.num_frames
+NUM_CROPS_PER_VIDEO = args.crops_per_video
+
+random.seed(42)
+
+# Create output directories
+for split in ['train', 'val', 'test']:
+    os.makedirs(os.path.join(OUTPUT_DIR, split), exist_ok=True)
+
+# Load labels
+with open(LABELS_FILE, 'r') as f:
+    labels = json.load(f)
+
+
+def parse_size(size_str):
+    """Convert size string to bytes"""
+    size = float(size_str[:-1])
+    unit = size_str[-1]
+    if unit == 'G':
+        return int(size * 1e9)
+    elif unit == 'M':
+        return int(size * 1e6)
+    else:
+        return int(size)
+
+
+def read_file_sizes(filename):
+    """Read file sizes from text file"""
+    sizes = {}
+    with open(filename, 'r') as f:
+        for line in f:
+            parts = line.strip().split()
+            if len(parts) == 2:
+                sizes[parts[0]] = parse_size(parts[1])
+    return sizes
+
+
+def extract_random_crop(frames, num_frames, crop_size):
+    """Extract a random spatio-temporal crop from the frames."""
+    t, h, w, _ = frames.shape
+
+    if t < num_frames:
+        raise ValueError(f"Video has fewer frames ({t}) than required ({num_frames})")
+
+    start_frame = random.randint(0, t - num_frames)
+    top = random.randint(0, h - crop_size[0])
+    left = random.randint(0, w - crop_size[1])
+
+    crop = frames[start_frame:start_frame + num_frames,
+           top:top + crop_size[0],
+           left:left + crop_size[1]]
+
+    return crop
+
+
+def normalize(video, mean, std):
+    """Normalize the video tensor"""
+    mean = torch.tensor(mean).view(1, 3, 1, 1)
+    std = torch.tensor(std).view(1, 3, 1, 1)
+    return (video - mean) / std
+
+
+def process_videos(video_list, split):
+    """Process videos and save crops for a specific split"""
+    preprocessed_labels = {}
+    label_counts = Counter()
+    total_crops = 0
+
+    for video_file, video_name in tqdm(video_list, desc=f"Processing {split} set"):
+        video_path = os.path.join(INPUT_DIR, video_file)
+
+        # Skip if video is not in labels
+        if video_name not in labels:
+            print(f"Skipping {video_file}: No labels found")
+            continue
+
+        video_labels = labels[video_name]
+
+        try:
+            # Read video
+            cap = cv2.VideoCapture(video_path)
+            frames = []
+            while len(frames) < NUM_FRAMES * 2:  # Read more frames than needed
+                ret, frame = cap.read()
+                if not ret:
+                    break
+                frames.append(frame)
+            cap.release()
+
+            if len(frames) < NUM_FRAMES:
+                print(f"Warning: {video_file} has fewer than {NUM_FRAMES} frames. Skipping.")
+                continue
+
+            frames = np.array(frames)
+
+            for i in range(NUM_CROPS_PER_VIDEO):
+                # Extract random crop
+                crop = extract_random_crop(frames, NUM_FRAMES, CROP_SIZE)
+
+                # Convert to torch tensor and normalize
+                crop = torch.from_numpy(crop).permute(0, 3, 1, 2).float() / 255.0
+
+                # Normalize using ImageNet stats
+                crop = normalize(crop, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+
+                # Generate unique filename for the crop
+                crop_filename = f"{Path(video_name).stem}_crop_{i}.pt"
+                crop_path = os.path.join(OUTPUT_DIR, split, crop_filename)
+
+                # Save crop as .pt file
+                torch.save(crop, crop_path)
+
+                # Store labels for the crop
+                preprocessed_labels[crop_filename] = video_labels
+
+                total_crops += 1
+
+            # Update label counts
+            for artifact, present in video_labels.items():
+                if present == 1:
+                    label_counts[f"{artifact}_Positive"] += NUM_CROPS_PER_VIDEO
+                else:
+                    label_counts[f"{artifact}_Negative"] += NUM_CROPS_PER_VIDEO
+
+        except Exception as e:
+            print(f"Error processing {video_file}: {str(e)}")
+
+    # Save preprocessed labels
+    labels_path = os.path.join(OUTPUT_DIR, split, "labels.json")
+    with open(labels_path, 'w') as f:
+        json.dump(preprocessed_labels, f, indent=4)
+
+    print(f"\n{split} set statistics:")
+    print(f"Total crops generated: {total_crops}")
+    print(f"Number of entries in labels JSON: {len(preprocessed_labels)}")
+
+    # Check if numbers match
+    if total_crops == len(preprocessed_labels):
+        print("✅ Numbers match!")
+    else:
+        print("❌ Numbers don't match. There might be an issue.")
+
+    return label_counts, total_crops
+
+
+def check_split_overlap(output_dir):
+    splits = ['train', 'val', 'test']
+    parent_videos = {split: set() for split in splits}
+
+    for split in splits:
+        labels_path = Path(output_dir) / split / "labels.json"
+        with open(labels_path, 'r') as f:
+            labels = json.load(f)
+
+        for crop_filename in labels.keys():
+            # Extract parent video name by removing the "_crop_{i}.pt" suffix
+            parent_video = crop_filename.rsplit('_crop_', 1)[0]
+            parent_videos[split].add(parent_video)
+
+    # Check for overlap between splits
+    for i, split1 in enumerate(splits):
+        for split2 in splits[i + 1:]:
+            overlap = parent_videos[split1].intersection(parent_videos[split2])
+            if overlap:
+                print(f"❌ Overlap found between {split1} and {split2} splits:")
+                print(f"   Common parent videos: {overlap}")
+            else:
+                print(f"✅ No overlap found between {split1} and {split2} splits")
+
+    # Print summary
+    print("\nSummary:")
+    for split in splits:
+        print(f"{split} split: {len(parent_videos[split])} unique parent videos")
+
+
+def print_label_balance(label_counts, split_name):
+    print(f"\n{split_name} set balance:")
+    artifacts = ['black_screen', 'frame_drop', 'spatial_blur', 'transmission_error', 'aliasing', 'banding',
+                 'dark_scenes', 'graininess', 'motion_blur']
+    for artifact in artifacts:
+        positive = label_counts[f"{artifact}_Positive"]
+        negative = label_counts[f"{artifact}_Negative"]
+        print(f"    {artifact}: Positive: {positive}, Negative: {negative}")
+
+
+# Read file sizes
+part1_sizes = read_file_sizes(os.path.join(INPUT_DIR, "part1_files_sizes.txt"))
+part2_sizes = read_file_sizes(os.path.join(INPUT_DIR, "part2_files_sizes.txt"))
+
+all_sizes = {**part1_sizes, **part2_sizes}
+
+# Sort videos by size
+sorted_videos = sorted(all_sizes.items(), key=lambda x: x[1])
+
+# Sample videos if num_samples is specified
+if args.num_samples is not None:
+    sampled_videos = sorted_videos[:args.num_samples]
+else:
+    sampled_videos = sorted_videos
+
+# Extract video files and their corresponding folders
+video_files = [(os.path.join('part1' if f in part1_sizes else 'part2', f), f) for f, _ in sampled_videos]
+
+# Split videos into train, validation, and test sets
+train_videos, temp_videos = train_test_split(video_files, train_size=args.train_ratio, random_state=42)
+val_ratio = args.val_ratio / (1 - args.train_ratio)
+val_videos, test_videos = train_test_split(temp_videos, train_size=val_ratio, random_state=42)
+
+# Modify the main part of the script to use the updated function
+train_label_counts, train_crops = process_videos(train_videos, 'train')
+val_label_counts, val_crops = process_videos(val_videos, 'val')
+test_label_counts, test_crops = process_videos(test_videos, 'test')
+
+# Add a final summary
+print("\nFinal Summary:")
+print(f"Total crops - Train: {train_crops}, Val: {val_crops}, Test: {test_crops}")
+total_crops = train_crops + val_crops + test_crops
+print(f"Total crops across all splits: {total_crops}")
+
+# Check total number of label entries
+train_labels = json.load(open(os.path.join(OUTPUT_DIR, 'train', 'labels.json')))
+val_labels = json.load(open(os.path.join(OUTPUT_DIR, 'val', 'labels.json')))
+test_labels = json.load(open(os.path.join(OUTPUT_DIR, 'test', 'labels.json')))
+
+total_label_entries = len(train_labels) + len(val_labels) + len(test_labels)
+print(f"Total label entries across all splits: {total_label_entries}")
+
+if total_crops == total_label_entries:
+    print("✅ Total crops match total label entries!")
+else:
+    print("❌ Total crops and total label entries don't match. There might be an issue.")
+
+print_label_balance(train_label_counts, "Train")
+print_label_balance(val_label_counts, "Val")
+print_label_balance(test_label_counts, "Test")
+
+check_split_overlap(OUTPUT_DIR)
+
+print("Preprocessing completed.")
+
+# sample usage of this script:
+# python src/subset_and_process.py --input_dir /Volumes/SSD/BVIArtefact --output_dir /Volumes/SSD/BVIArtefact_crops --num_samples 100 --crop_size 224 --num_frames 8 --crops_per_video 2 --train_ratio 0.7 --val_ratio 0.15
diff --git a/src/data_prep_utils/subset_data.py b/src/data_prep_utils/subset_data.py
new file mode 100644
index 0000000..f30aa02
--- /dev/null
+++ b/src/data_prep_utils/subset_data.py
@@ -0,0 +1,158 @@
+import argparse
+import json
+import os
+import shutil
+from collections import defaultdict
+from pathlib import Path
+from tqdm import tqdm
+from src.data_prep_utils.split_dataset import split_dataset
+
+# Configuration
+local_labels_path = 'data/bviArtefactMetaInfo/processed_labels.json'
+artefacts_to_choose = ['graininess', 'aliasing', 'banding', 'motion_blur']  # Add more labels as needed
+size_limit_gb = 4  # Size limit in GB
+
+part1_sizes_path = 'data/bviArtefactMetaInfo/part1_files_sizes.txt'
+part2_sizes_path = 'data/bviArtefactMetaInfo/part2_files_sizes.txt'
+
+
+def convert_to_bytes(size_str):
+    size_unit = size_str[-1]
+    size_value = float(size_str[:-1])
+    if size_unit == 'G':
+        return int(size_value * 1e9)
+    elif size_unit == 'M':
+        return int(size_value * 1e6)
+    elif size_unit == 'K':
+        return int(size_value * 1e3)
+    else:
+        return int(size_value)
+
+
+def load_file_sizes(file_path):
+    file_sizes = {}
+    with open(file_path, 'r') as f:
+        for line in f:
+            parts = line.strip().split()
+            file_name = parts[0]
+            file_size = convert_to_bytes(parts[1])
+            file_sizes[file_name] = file_size
+    return file_sizes
+
+
+def get_balanced_videos(labels, artefacts, size_limit):
+    video_labels = defaultdict(dict)
+    for video, details in labels.items():
+        for artefact in artefacts:
+            video_labels[video][artefact] = details.get(artefact, 0)
+
+    # Separate positive and negative videos
+    positive_videos = [v for v, l in video_labels.items() if all(l[a] == 1 for a in artefacts)]
+    negative_videos = [v for v, l in video_labels.items() if all(l[a] == 0 for a in artefacts)]
+
+    # Sort videos by size (smallest to largest)
+    positive_videos.sort(key=lambda x: file_sizes.get(x, 0))
+    negative_videos.sort(key=lambda x: file_sizes.get(x, 0))
+
+    balanced_videos = []
+    total_size = 0
+
+    print(f"Size limit: {size_limit / 1e9:.2f} GB")
+    print(f"Total positive videos available: {len(positive_videos)}")
+    print(f"Total negative videos available: {len(negative_videos)}")
+
+    # Select videos while maintaining balance and respecting size limit
+    for pos, neg in zip(positive_videos, negative_videos):
+        pos_size = file_sizes.get(pos, 0)
+        neg_size = file_sizes.get(neg, 0)
+
+        if total_size + pos_size + neg_size <= size_limit:
+            balanced_videos.extend([pos, neg])
+            total_size += pos_size + neg_size
+        else:
+            break
+
+    final_subset = {video: video_labels[video] for video in balanced_videos}
+
+    final_size = sum(file_sizes.get(video, 0) for video in final_subset)
+    print(f"\nFinal balanced dataset:")
+    print(f"Size: {final_size / 1e9:.2f} GB")
+    print(f"Total videos: {len(final_subset)}")
+    print(f"Positive videos: {len(final_subset) // 2}")
+    print(f"Negative videos: {len(final_subset) // 2}")
+
+    return final_subset
+
+
+def copy_videos_local(subset_videos, source_base_path, destination_base_path):
+    progress_bar = tqdm(total=len(subset_videos), desc="Copying videos", unit="file", dynamic_ncols=True)
+
+    for video in subset_videos:
+        found = False
+        for part in ['part1', 'part2']:
+            source_path = os.path.join(source_base_path, part, video)
+            destination_path = os.path.join(destination_base_path, video)
+            if os.path.exists(source_path):
+                progress_bar.set_postfix(file=video)
+                shutil.copy2(source_path, destination_path)
+                found = True
+                break
+        if not found:
+            print(f"Video {video} not found in either part1 or part2.")
+        progress_bar.update(1)
+
+    progress_bar.close()
+
+
+def main():
+    parser = argparse.ArgumentParser(description="Create a balanced subset of videos for multi-label classification.")
+    parser.add_argument("--local", help="Path to local bviDataset folder", type=str, required=True)
+    parser.add_argument("--size_limit", help="Size limit in GB", type=float, default=2.0)
+    args = parser.parse_args()
+
+    global size_limit_gb
+    size_limit_gb = args.size_limit
+
+    # Load file sizes
+    part1_file_sizes = load_file_sizes(part1_sizes_path)
+    part2_file_sizes = load_file_sizes(part2_sizes_path)
+    global file_sizes
+    file_sizes = {**part1_file_sizes, **part2_file_sizes}
+
+    # Load labels
+    with open(local_labels_path, 'r') as f:
+        labels = json.load(f)
+
+    size_limit_bytes = size_limit_gb * 1e9
+    balanced_subset = get_balanced_videos(labels, artefacts_to_choose, size_limit_bytes)
+
+    # Create the local download directory
+    local_download_dir = f'/Volumes/SSD/subsets/{"_".join([art for art in artefacts_to_choose])}_subset_{int(size_limit_gb)}_GB'
+    os.makedirs(local_download_dir, exist_ok=True)
+
+    # Save the subset list locally
+    subset_file_path = f'{local_download_dir}/labels.json'
+    with open(subset_file_path, 'w') as f:
+        json.dump(balanced_subset, f, indent=4)
+
+    print(f"Balanced subset saved to {subset_file_path}")
+
+    # Verify the balance of the subset labels
+    for artefact in artefacts_to_choose:
+        presence_count = sum(1 for labels in balanced_subset.values() if labels[artefact] == 1)
+        absence_count = sum(1 for labels in balanced_subset.values() if labels[artefact] == 0)
+        print(f"{artefact}:")
+        print(f"  Presence count: {presence_count}")
+        print(f"  Absence count: {absence_count}")
+
+    # Use local dataset
+    print(f"Using local dataset at: {args.local}")
+    copy_videos_local(balanced_subset.keys(), args.local, local_download_dir)
+
+    print(f"All raw videos copied to {local_download_dir}")
+
+    split_dataset(local_download_dir)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/src/data_prep_utils/subset_processed_dataset.py b/src/data_prep_utils/subset_processed_dataset.py
new file mode 100644
index 0000000..5a28d06
--- /dev/null
+++ b/src/data_prep_utils/subset_processed_dataset.py
@@ -0,0 +1,113 @@
+import argparse
+import json
+import os
+import random
+import shutil
+from collections import defaultdict
+
+from tqdm import tqdm
+
+
+def load_labels(labels_path):
+    with open(labels_path, 'r') as f:
+        return json.load(f)
+
+
+def get_balanced_subset(labels, artefacts, count_per_label):
+    video_labels = defaultdict(dict)
+    for video, details in labels.items():
+        for artefact in artefacts:
+            video_labels[video][artefact] = details.get(artefact, 0)
+
+    final_subset = {}
+    artefact_counts = {artefact: {'positive': 0, 'negative': 0} for artefact in artefacts}
+
+    # Shuffle videos to ensure random selection
+    shuffled_videos = list(video_labels.keys())
+    random.shuffle(shuffled_videos)
+
+    for video in shuffled_videos:
+        include_video = True
+        for artefact in artefacts:
+            label = video_labels[video][artefact]
+            if label == 1 and artefact_counts[artefact]['positive'] >= count_per_label:
+                include_video = False
+                break
+            elif label == 0 and artefact_counts[artefact]['negative'] >= count_per_label:
+                include_video = False
+                break
+
+        if include_video:
+            final_subset[video] = video_labels[video]
+            for artefact in artefacts:
+                if video_labels[video][artefact] == 1:
+                    artefact_counts[artefact]['positive'] += 1
+                else:
+                    artefact_counts[artefact]['negative'] += 1
+
+        # Check if we have reached the target count for all artefacts
+        if all(counts['positive'] >= count_per_label and counts['negative'] >= count_per_label
+               for counts in artefact_counts.values()):
+            break
+
+    return final_subset
+
+
+def copy_videos(videos, src_dir, dst_dir):
+    os.makedirs(dst_dir, exist_ok=True)
+    for video in tqdm(videos, desc=f"Copying to {os.path.basename(dst_dir)}"):
+        src_path_part1 = os.path.join(src_dir, 'part1', video)
+        src_path_part2 = os.path.join(src_dir, 'part2', video)
+        dst_path = os.path.join(dst_dir, video)
+
+        if os.path.exists(src_path_part1):
+            shutil.copy2(src_path_part1, dst_path)
+        elif os.path.exists(src_path_part2):
+            shutil.copy2(src_path_part2, dst_path)
+        else:
+            print(f"Warning: Video {video} not found in either part1 or part2.")
+
+
+def main():
+    parser = argparse.ArgumentParser(description="Create a balanced subset of videos and relocate them.")
+    parser.add_argument("--input_dir", type=str, required=True, help="Path to processed_BVIArtefact folder")
+    parser.add_argument("--output_dir", type=str, required=True, help="Path to output directory")
+    parser.add_argument("--count_per_label", type=int, default=500,
+                        help="Number of videos per label (positive/negative)")
+    args = parser.parse_args()
+
+    # Load labels
+    labels_path = os.path.join(args.input_dir, 'processed_labels.json')
+    labels = load_labels(labels_path)
+
+    # Define artefacts
+    artefacts = ['']  # Add more labels as needed
+
+    # Get balanced subset
+    balanced_subset = get_balanced_subset(labels, artefacts, args.count_per_label)
+
+    # Copy videos to output directory
+    copy_videos(balanced_subset.keys(), args.input_dir, args.output_dir)
+
+    # Save the subset labels
+    subset_labels_path = os.path.join(args.output_dir, 'labels.json')
+    with open(subset_labels_path, 'w') as f:
+        json.dump(balanced_subset, f, indent=4)
+
+    print(f"Balanced subset created in {args.output_dir}")
+    print(f"Total videos in subset: {len(balanced_subset)}")
+
+    # Verify the balance of the subset labels
+    for artefact in artefacts:
+        presence_count = sum(1 for labels in balanced_subset.values() if labels[artefact] == 1)
+        absence_count = sum(1 for labels in balanced_subset.values() if labels[artefact] == 0)
+        print(f"{artefact}:")
+        print(f"  Presence count: {presence_count}")
+        print(f"  Absence count: {absence_count}")
+
+
+if __name__ == "__main__":
+    main()
+
+    # sample usage of the script
+    # python subset_processed_dataset.py --input_dir /Volumes/SSD/preprocessed_BVIArtefact --output_dir /Volumes/SSD/balanced_subset --count_per_label 500
diff --git a/src/data_prep_utils/subset_random.py b/src/data_prep_utils/subset_random.py
new file mode 100644
index 0000000..c2b9452
--- /dev/null
+++ b/src/data_prep_utils/subset_random.py
@@ -0,0 +1,82 @@
+import os
+import json
+import random
+
+
+def get_file_sizes(file_path):
+    sizes = {}
+    with open(file_path, 'r') as f:
+        for line in f:
+            parts = line.strip().split()
+            if len(parts) == 2:
+                filename, size = parts
+                sizes[filename] = int(size[:-1])  # Remove 'M' and convert to int
+    return sizes
+
+
+def create_dataset(labels_file, part1_sizes, part2_sizes, target_size_gb):
+    # Load labels
+    with open(labels_file, 'r') as f:
+        labels = json.load(f)
+
+    # Combine file sizes
+    all_sizes = {**part1_sizes, **part2_sizes}
+
+    # Create a list of (filename, size) tuples, sorted by size
+    sorted_files = sorted(all_sizes.items(), key=lambda x: x[1])
+
+    target_size_mb = target_size_gb * 1024
+    selected_files = []
+    current_size = 0
+
+    # Randomly select files, prioritizing smaller ones
+    while current_size < target_size_mb and sorted_files:
+        # Randomly choose from the smallest 10% of remaining files
+        chunk_size = max(1, len(sorted_files) // 10)
+        chosen_file, file_size = random.choice(sorted_files[:chunk_size])
+
+        if chosen_file in labels and (current_size + file_size) <= target_size_mb:
+            selected_files.append(chosen_file)
+            current_size += file_size
+
+        sorted_files.remove((chosen_file, file_size))
+
+    # Create a new labels dictionary with only the selected files
+    selected_labels = {file: labels[file] for file in selected_files if file in labels}
+
+    return selected_files, selected_labels, current_size / 1024  # Convert back to GB
+
+
+# File paths
+labels_file = '/Volumes/SSD/BVIArtefact/processed_labels.json'
+part1_sizes_file = '/Volumes/SSD/BVIArtefact/part1_files_sizes.txt'
+part2_sizes_file = '/Volumes/SSD/BVIArtefact/part1_files_sizes.txt'
+
+# Target dataset size in GB
+target_size_gb = 2  # Change this to your desired size
+
+# Get file sizes
+part1_sizes = get_file_sizes(part1_sizes_file)
+part2_sizes = get_file_sizes(part2_sizes_file)
+
+# Create the dataset
+selected_files, selected_labels, actual_size_gb = create_dataset(
+    labels_file, part1_sizes, part2_sizes, target_size_gb
+)
+
+# Print results
+print(f"Selected {len(selected_files)} files")
+print(f"Total size: {actual_size_gb:.2f} GB")
+
+# Save the new labels to a file
+output_dir = '/Volumes/SSD/BVIArtefact'
+with open(os.path.join(output_dir, 'selected_labels.json'), 'w') as f:
+    json.dump(selected_labels, f, indent=2)
+
+# Save the list of selected files
+with open(os.path.join(output_dir, 'selected_files.txt'), 'w') as f:
+    for file in selected_files:
+        f.write(f"{file}\n")
+
+print("Selected labels saved to 'selected_labels.json'")
+print("Selected files list saved to 'selected_files.txt'")
diff --git a/src/plots.py b/src/plots.py
new file mode 100644
index 0000000..baec057
--- /dev/null
+++ b/src/plots.py
@@ -0,0 +1,123 @@
+import json
+import matplotlib.pyplot as plt
+import seaborn as sns
+import pandas as pd
+import numpy as np
+import os
+
+
+def load_json_labels(file_path):
+    with open(file_path, 'r') as f:
+        return json.load(f)
+
+
+def create_label_df(json_data):
+    return pd.DataFrame.from_dict(json_data, orient='index')
+
+
+def plot_label_balance_stacked(df, title, save_path):
+    """
+    Plot the positive/negative balance for each label using stacked bars and save as PNG.
+    """
+    label_balance = df.mean()
+    label_balance_negative = 1 - label_balance
+
+    plt.figure(figsize=(14, 6))
+    bar_width = 0.8
+
+    labels = label_balance.index
+    pos_bars = plt.bar(labels, label_balance, bar_width, label='Positive', color='#2ecc71')
+    neg_bars = plt.bar(labels, label_balance_negative, bar_width, bottom=label_balance, label='Negative',
+                       color='#e74c3c')
+
+    plt.title(f'Label Balance - {title}')
+    plt.xlabel('Labels')
+    plt.ylabel('Proportion')
+    plt.legend(title='Class')
+    plt.xticks(rotation=45, ha='right')
+
+    # Add percentage labels on the bars
+    for i, (pos, neg) in enumerate(zip(label_balance, label_balance_negative)):
+        plt.text(i, pos / 2, f'{pos:.1%}', ha='center', va='center', color='white', fontweight='bold')
+        plt.text(i, pos + neg / 2, f'{neg:.1%}', ha='center', va='center', color='white', fontweight='bold')
+
+    plt.tight_layout()
+    plt.savefig(save_path, dpi=300, bbox_inches='tight')
+    plt.close()
+
+
+def plot_label_distribution_across_splits_stacked(train_df, val_df, test_df, save_path):
+    """
+    Plot the distribution of positive and negative labels across train, validation, and test splits and save as PNG.
+    """
+    train_dist = train_df.mean()
+    val_dist = val_df.mean()
+    test_dist = test_df.mean()
+
+    df = pd.DataFrame({
+        'Train Positive': train_dist,
+        'Train Negative': 1 - train_dist,
+        'Validation Positive': val_dist,
+        'Validation Negative': 1 - val_dist,
+        'Test Positive': test_dist,
+        'Test Negative': 1 - test_dist
+    })
+
+    plt.figure(figsize=(16, 6))
+    df.plot(kind='bar', stacked=True, width=0.8)
+    plt.title('Label Distribution Across Splits')
+    plt.xlabel('Labels')
+    plt.ylabel('Proportion')
+    plt.xticks(rotation=45, ha='right')
+    plt.legend(title='Split and Class', bbox_to_anchor=(1.05, 1), loc='upper left')
+    plt.tight_layout()
+    plt.savefig(save_path, dpi=300, bbox_inches='tight')
+    plt.close()
+
+
+def plot_sample_counts(train_df, val_df, test_df, save_path):
+    """
+    Plot the number of samples in each split and save as PNG.
+    """
+    counts = [len(train_df), len(val_df), len(test_df)]
+    splits = ['Train', 'Validation', 'Test']
+
+    plt.figure(figsize=(4, 6))
+    bars = plt.bar(splits, counts)
+    plt.title('Number of Samples in Each Split')
+    plt.ylabel('Number of Samples')
+
+    # Add value labels on the bars
+    for bar in bars:
+        height = bar.get_height()
+        plt.text(bar.get_x() + bar.get_width() / 2., height,
+                 f'{height:,}',
+                 ha='center', va='bottom')
+
+    plt.tight_layout()
+    plt.savefig(save_path, dpi=300, bbox_inches='tight')
+    plt.close()
+
+
+# Get the directory of the JSON files
+json_dir = os.path.dirname('/Volumes/SSD/BVIArtefact_8_crops_all_videos/train_labels.json')
+
+# Load the data
+train_data = load_json_labels(os.path.join(json_dir, 'train_labels.json'))
+val_data = load_json_labels(os.path.join(json_dir, 'val_labels.json'))
+test_data = load_json_labels(os.path.join(json_dir, 'test_labels.json'))
+
+# Create DataFrames
+train_df = create_label_df(train_data)
+val_df = create_label_df(val_data)
+test_df = create_label_df(test_data)
+
+# Generate and save plots
+plot_label_balance_stacked(train_df, 'Train Set', os.path.join(json_dir, 'label_balance_train.png'))
+plot_label_balance_stacked(val_df, 'Validation Set', os.path.join(json_dir, 'label_balance_val.png'))
+plot_label_balance_stacked(test_df, 'Test Set', os.path.join(json_dir, 'label_balance_test.png'))
+plot_label_distribution_across_splits_stacked(train_df, val_df, test_df,
+                                              os.path.join(json_dir, 'label_distribution_across_splits.png'))
+plot_sample_counts(train_df, val_df, test_df, os.path.join(json_dir, 'sample_counts.png'))
+
+print(f"Plots have been saved in the directory: {json_dir}")
-- 
GitLab