Sfoglia il codice sorgente

fixes for code (#206)

* updated .gitignore

* removed unused GELU import

* fixed model_configs, fixed all tensors on same device

* removed unused tiktoken

* update

* update hparam search

* remove redundant tokenizer argument

---------

Co-authored-by: rasbt <mail@sebastianraschka.com>
Daniel Kleine 1 anno fa
parent
commit
dcbdc1d2e5

+ 2 - 0
.gitignore

@@ -20,6 +20,7 @@ ch07/01_main-chapter-code/loss-plot.pdf
 
 # Checkpoint files
 appendix-A/01_main-chapter-code/model.pth
+
 appendix-E/01_main-chapter-code/gpt2
 
 ch05/01_main-chapter-code/gpt2/
@@ -33,6 +34,7 @@ ch06/02_bonus_additional-experiments/gpt2
 ch06/03_bonus_imdb-classification/gpt2
 
 ch07/01_main-chapter-code/gpt2-medium355M-sft.pth
+ch07/01_main-chapter-code/gpt2/
 
 # Datasets
 appendix-E/01_main-chapter-code/sms_spam_collection.zip

+ 1 - 2
appendix-E/01_main-chapter-code/appendix-E.ipynb

@@ -1370,7 +1370,6 @@
     "train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n",
     "    model, train_loader, val_loader, optimizer, device,\n",
     "    num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n",
-    "    tokenizer=tokenizer\n",
     ")\n",
     "\n",
     "end_time = time.time()\n",
@@ -1495,7 +1494,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.10.6"
+   "version": "3.11.4"
   }
  },
  "nbformat": 4,

+ 1 - 1
appendix-E/01_main-chapter-code/previous_chapters.py

@@ -484,7 +484,7 @@ def calc_loss_batch(input_batch, target_batch, model, device):
 
 # Overall the same as `train_model_simple` in chapter 5
 def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
-                            eval_freq, eval_iter, tokenizer):
+                            eval_freq, eval_iter):
     # Initialize lists to track losses and tokens seen
     train_losses, val_losses, train_accs, val_accs = [], [], [], []
     examples_seen, global_step = 0, -1

+ 4 - 4
ch04/01_main-chapter-code/exercise-solutions.ipynb

@@ -262,7 +262,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": 6,
    "id": "5fee2cf5-61c3-4167-81b5-44ea155bbaf2",
    "metadata": {},
    "outputs": [],
@@ -282,13 +282,13 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 7,
    "id": "5aa1b0c1-d78a-48fc-ad08-4802458b43f7",
    "metadata": {},
    "outputs": [],
    "source": [
     "import torch.nn as nn\n",
-    "from gpt import MultiHeadAttention, LayerNorm, GELU, FeedForward\n",
+    "from gpt import MultiHeadAttention, LayerNorm, FeedForward\n",
     "\n",
     "\n",
     "class TransformerBlock(nn.Module):\n",
@@ -351,7 +351,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 8,
    "id": "1d013d32-c275-4f42-be21-9010f1537227",
    "metadata": {},
    "outputs": [],

+ 1 - 3
ch04/02_performance-analysis/flops-analysis.ipynb

@@ -62,12 +62,10 @@
     "from importlib.metadata import version\n",
     "\n",
     "import matplotlib\n",
-    "import tiktoken\n",
     "import torch\n",
     "\n",
     "print(\"thop version:\", version(\"thop\"))\n",
-    "print(\"torch version:\", version(\"torch\"))\n",
-    "print(\"tiktoken version:\", version(\"tiktoken\"))"
+    "print(\"torch version:\", version(\"torch\"))"
    ]
   },
   {

+ 12 - 23
ch05/02_alternative_weight_loading/weight-loading-hf-transformers.ipynb

@@ -65,9 +65,9 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "numpy version: 1.25.2\n",
-      "torch version: 2.2.1\n",
-      "transformers version: 4.33.2\n"
+      "numpy version: 1.24.3\n",
+      "torch version: 2.3.0\n",
+      "transformers version: 4.41.2\n"
      ]
     }
    ],
@@ -85,16 +85,6 @@
    "id": "ffc17d7d-bcd8-42ee-82a9-04fd55acf15d",
    "metadata": {},
    "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
-      "  torch.utils._pytree._register_pytree_node(\n",
-      "/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
-      "  torch.utils._pytree._register_pytree_node(\n"
-     ]
-    },
     {
      "data": {
       "text/plain": [
@@ -162,10 +152,10 @@
     "}\n",
     "\n",
     "model_configs = {\n",
-    "    \"gpt2-small\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
-    "    \"gpt2-medium\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
-    "    \"gpt2-large\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
-    "    \"gpt2-xl\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
+    "    \"gpt2-small (124M)\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
+    "    \"gpt2-medium (355M)\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
+    "    \"gpt2-large (774M)\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
+    "    \"gpt2-xl (1558M)\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
     "}\n",
     "\n",
     "\n",
@@ -242,7 +232,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "/var/folders/jg/tpqyh1fd5js5wsr1d138k3n40000gn/T/ipykernel_32618/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
+      "/tmp/ipykernel_9385/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
       "  return torch.nn.Parameter(torch.tensor(right))\n"
      ]
     }
@@ -255,13 +245,12 @@
     "gpt = GPTModel(BASE_CONFIG)\n",
     "\n",
     "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
-    "load_weights(gpt, gpt_hf)\n",
-    "gpt.to(device);"
+    "load_weights(gpt, gpt_hf)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": 9,
    "id": "4ddd0d51-3ade-4890-9bab-d63f141d095f",
    "metadata": {},
    "outputs": [
@@ -285,8 +274,8 @@
     "tokenizer = tiktoken.get_encoding(\"gpt2\")\n",
     "\n",
     "token_ids = generate(\n",
-    "    model=gpt,\n",
-    "    idx=text_to_token_ids(\"Every effort moves\", tokenizer),\n",
+    "    model=gpt.to(device),\n",
+    "    idx=text_to_token_ids(\"Every effort moves\", tokenizer).to(device),\n",
     "    max_new_tokens=30,\n",
     "    context_size=BASE_CONFIG[\"context_length\"],\n",
     "    top_k=1,\n",

+ 2 - 2
ch05/05_bonus_hparam_tuning/hparam_search.py

@@ -53,8 +53,8 @@ def calc_loss_batch(input_batch, target_batch, model, device):
 def evaluate_model(model, train_loader, val_loader, device, eval_iter):
     model.eval()
     with torch.no_grad():
-        train_loss = calc_loss_loader(train_loader, model, device, num_iters=eval_iter)
-        val_loss = calc_loss_loader(val_loader, model, device, num_iters=eval_iter)
+        train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
+        val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
     model.train()
     return train_loss, val_loss
 

+ 2 - 2
ch05/05_bonus_hparam_tuning/previous_chapters.py

@@ -40,12 +40,12 @@ class GPTDatasetV1(Dataset):
 
 
 def create_dataloader_v1(txt, batch_size=4, max_length=256,
-                         stride=128, shuffle=True, drop_last=True):
+                         stride=128, shuffle=True, drop_last=True, num_workers=0):
     # Initialize the tokenizer
     tokenizer = tiktoken.get_encoding("gpt2")
 
     # Create dataset
-    dataset = GPTDatasetV1(txt, tokenizer, max_length, stride, num_workers=0)
+    dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)
 
     # Create dataloader
     dataloader = DataLoader(

+ 2 - 3
ch06/01_main-chapter-code/ch06.ipynb

@@ -1861,7 +1861,7 @@
    "source": [
     "# Overall the same as `train_model_simple` in chapter 5\n",
     "def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,\n",
-    "                            eval_freq, eval_iter, tokenizer):\n",
+    "                            eval_freq, eval_iter):\n",
     "    # Initialize lists to track losses and examples seen\n",
     "    train_losses, val_losses, train_accs, val_accs = [], [], [], []\n",
     "    examples_seen, global_step = 0, -1\n",
@@ -1982,7 +1982,6 @@
     "train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n",
     "    model, train_loader, val_loader, optimizer, device,\n",
     "    num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n",
-    "    tokenizer=tokenizer\n",
     ")\n",
     "\n",
     "end_time = time.time()\n",
@@ -2371,7 +2370,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.10.6"
+   "version": "3.11.4"
   }
  },
  "nbformat": 4,

+ 2 - 2
ch06/02_bonus_additional-experiments/additional-experiments.py

@@ -235,7 +235,7 @@ def evaluate_model(model, train_loader, val_loader, device,
 
 
 def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
-                            eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token_pos=-1,
+                            eval_freq, eval_iter, max_steps=None, trainable_token_pos=-1,
                             accumulation_steps=1, ignore_index=-100):
     # Initialize lists to track losses and tokens seen
     train_losses, val_losses, train_accs, val_accs = [], [], [], []
@@ -565,7 +565,7 @@ if __name__ == "__main__":
     train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
         model, train_loader, val_loader, optimizer, device,
         num_epochs=args.num_epochs, eval_freq=50, eval_iter=5,
-        tokenizer=tokenizer, max_steps=None, trainable_token_pos=args.trainable_token_pos,
+        max_steps=None, trainable_token_pos=args.trainable_token_pos,
         accumulation_steps=args.accumulation_steps
     )
 

+ 2 - 2
ch06/03_bonus_imdb-classification/train-bert-hf.py

@@ -110,7 +110,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter):
 
 
 def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
-                            eval_freq, eval_iter, tokenizer, max_steps=None):
+                            eval_freq, eval_iter, max_steps=None):
     # Initialize lists to track losses and tokens seen
     train_losses, val_losses, train_accs, val_accs = [], [], [], []
     examples_seen, global_step = 0, -1
@@ -279,7 +279,7 @@ if __name__ == "__main__":
     train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
         model, train_loader, val_loader, optimizer, device,
         num_epochs=num_epochs, eval_freq=50, eval_iter=20,
-        tokenizer=tokenizer, max_steps=None
+        max_steps=None
     )
 
     end_time = time.time()

+ 2 - 2
ch06/03_bonus_imdb-classification/train-gpt.py

@@ -139,7 +139,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter, trainable
 
 
 def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
-                            eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token=-1):
+                            eval_freq, eval_iter, max_steps=None, trainable_token=-1):
     # Initialize lists to track losses and tokens seen
     train_losses, val_losses, train_accs, val_accs = [], [], [], []
     examples_seen, global_step = 0, -1
@@ -344,7 +344,7 @@ if __name__ == "__main__":
     train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
         model, train_loader, val_loader, optimizer, device,
         num_epochs=num_epochs, eval_freq=50, eval_iter=20,
-        tokenizer=tokenizer, max_steps=None, trainable_token=args.trainable_token
+        max_steps=None, trainable_token=args.trainable_token
     )
 
     end_time = time.time()