diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6b3df28b9504a9941041ea9aa7b38100c07068dc
--- /dev/null
+++ b/README.md
@@ -0,0 +1,57 @@
+# Deep Reinforcement Learning
+## Project: Train AI to play Snake
+
+## Introduction
+The goal of this project is to develop an AI Bot to learn and play the popular game Snake from scratch. The implementation includes playing by human player, using rule-based, Q-learn, and finally Deep Reinforcement Learning algorithms. For Q-learning and Deep Reinforcement Learning, no rules about the game are given, and initially the Bot has to try exploring all options to learn what to do to get a good reward.
+
+## Install
+This project requires Python 3.8 with the pygame library installed, as well as Keras with Tensorflow backend.
+```bash
+git clone gitlab@gitlab.eps.surrey.ac.uk:cf0014/snake.git
+git clone https://gitlab.eps.surrey.ac.uk/cf0014/snake.git
+
+```
+
+## Run
+To run the game, executes in the folder:
+```python
+python main.py
+```
+
+The program is set to human player by default. Type the following to see available options:
+```python
+python main.py --help
+```
+
+To change to a Bot, modify main.py by uncommenting the appropriate algorithm to run:
+```
+    ## AI selector, pick one:
+    algo = AI_Player0() # do nothing, let human player control
+    #algo = AI_RuleBased() # rule-based algorithm
+    #algo = AI_RLQ()       # Q-learning - training mode
+    #algo = AI_RLQ(False)  # Q-learning - testing mode, no exploration
+    #algo = AI_DQN()       # DQN - training mode
+    #algo = AI_DQN(False)  # DQN - testing mode, no exploration
+```
+
+## Trained Data (for Q-Learning)
+The trained data (i.e. Q-table) will be stored in the following file. If one already exists, it will be overwritten.
+```
+q-table.json
+```
+
+When running, the program will read the following Q-table. To use the previously trained data, simply rename it to the following filename.
+```
+q-table-learned.json
+```
+
+## Trained Data (for DQN)
+The trained data (i.e. weights) will be stored in the following file. If one already exists, it will be overwritten.
+```
+weights.hdf5
+```
+
+When running, the program will read the following weight file. To use the previously trained data, simply rename it to the following filename.
+```
+weights-learned.hdf5
+```
diff --git a/main.py b/main.py
index 756873488a56c3632c17fc2e3f7b030886c427f4..00bad90ec08699725d5954653ed308f039367823 100644
--- a/main.py
+++ b/main.py
@@ -285,7 +285,7 @@ if __name__ == "__main__" :
     parser: ArgumentParser = argparse.ArgumentParser()
     parser.add_argument("--nodisplay", help="Run in no GUI mode", action="store_true")
     parser.add_argument("--nopause", help="Run without pausing", action="store_true")
-    parser.add_argument("--speed", type=int, default=5)
+    parser.add_argument("--speed", type=int, default=300)
     args: Namespace = parser.parse_args()
 
     ## welcome info
@@ -293,16 +293,16 @@ if __name__ == "__main__" :
 
     ## do some hardcoding for debugging 
     #args.nodisplay = True  # <-- hardcoding no GUI mode
-    args.nopause = True  # <-- hardcoding no pausing mode
-    args.speed = 10     # <-- hardcoding the speed
+    #args.nopause = True  # <-- hardcoding no pausing mode
+    #args.speed = 10     # <-- hardcoding the speed
 
     ## AI selector, pick one:
-    #algo = AI_Player0() # do nothing, let human player control
+    algo = AI_Player0() # do nothing, let human player control
     #algo = AI_RuleBased() # rule-based algorithm
-    #algo = AI_RLQ()      # Q-learning - training mode
-    #algo = AI_RLQ(False) # Q-learning - testing mode, no exploration
-    #algo = AI_DQN()        # DQN - training mode
-    algo = AI_DQN(False)   # DQN - testing mode, no exploration
+    #algo = AI_RLQ()       # Q-learning - training mode
+    #algo = AI_RLQ(False)  # Q-learning - testing mode, no exploration
+    #algo = AI_DQN()       # DQN - training mode
+    #algo = AI_DQN(False)  # DQN - testing mode, no exploration
 
     ## for human/algo setting adjustment
     if "Human" in algo.get_name():