Explorar o código

fix typo in comment

rasbt hai 1 ano
pai
achega
1b1fd21d64

+ 1 - 1
appendix-E/01_main-chapter-code/previous_chapters.py

@@ -494,7 +494,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device,
         model.train()  # Set model to training mode
 
         for input_batch, target_batch in train_loader:
-            optimizer.zero_grad()  # Reset loss gradients from previous epoch
+            optimizer.zero_grad()  # Reset loss gradients from previous batch iteration
             loss = calc_loss_batch(input_batch, target_batch, model, device)
             loss.backward()  # Calculate loss gradients
             optimizer.step()  # Update model weights using loss gradients

+ 2 - 2
ch05/01_main-chapter-code/ch05.ipynb

@@ -1230,7 +1230,7 @@
     "        model.train()  # Set model to training mode\n",
     "        \n",
     "        for input_batch, target_batch in train_loader:\n",
-    "            optimizer.zero_grad() # Reset loss gradients from previous epoch\n",
+    "            optimizer.zero_grad() # Reset loss gradients from previous batch iteration\n",
     "            loss = calc_loss_batch(input_batch, target_batch, model, device)\n",
     "            loss.backward() # Calculate loss gradients\n",
     "            optimizer.step() # Update model weights using loss gradients\n",
@@ -2477,7 +2477,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.4"
+   "version": "3.10.6"
   }
  },
  "nbformat": 4,

+ 1 - 1
ch05/01_main-chapter-code/gpt_train.py

@@ -84,7 +84,7 @@ def train_model_simple(model, train_loader, val_loader, optimizer, device, num_e
         model.train()  # Set model to training mode
 
         for input_batch, target_batch in train_loader:
-            optimizer.zero_grad()  # Reset loss gradients from previous epoch
+            optimizer.zero_grad()  # Reset loss gradients from previous batch iteration
             loss = calc_loss_batch(input_batch, target_batch, model, device)
             loss.backward()  # Calculate loss gradients
             optimizer.step()  # Update model weights using loss gradients

+ 2 - 2
ch06/01_main-chapter-code/ch06.ipynb

@@ -1871,7 +1871,7 @@
     "        model.train()  # Set model to training mode\n",
     "\n",
     "        for input_batch, target_batch in train_loader:\n",
-    "            optimizer.zero_grad() # Reset loss gradients from previous epoch\n",
+    "            optimizer.zero_grad() # Reset loss gradients from previous batch iteration\n",
     "            loss = calc_loss_batch(input_batch, target_batch, model, device)\n",
     "            loss.backward() # Calculate loss gradients\n",
     "            optimizer.step() # Update model weights using loss gradients\n",
@@ -2371,7 +2371,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.4"
+   "version": "3.10.6"
   }
  },
  "nbformat": 4,

+ 1 - 1
ch06/01_main-chapter-code/gpt-class-finetune.py

@@ -201,7 +201,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device,
         model.train()  # Set model to training mode
 
         for input_batch, target_batch in train_loader:
-            optimizer.zero_grad()  # Reset loss gradients from previous epoch
+            optimizer.zero_grad()  # Reset loss gradients from previous batch iteration
             loss = calc_loss_batch(input_batch, target_batch, model, device)
             loss.backward()  # Calculate loss gradients
             optimizer.step()  # Update model weights using loss gradients

+ 1 - 1
ch06/03_bonus_imdb-classification/train-bert-hf.py

@@ -120,7 +120,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device,
         model.train()  # Set model to training mode
 
         for input_batch, target_batch in train_loader:
-            optimizer.zero_grad()  # Reset loss gradients from previous epoch
+            optimizer.zero_grad()  # Reset loss gradients from previous batch iteration
             loss = calc_loss_batch(input_batch, target_batch, model, device)
             loss.backward()  # Calculate loss gradients
             optimizer.step()  # Update model weights using loss gradients

+ 1 - 1
ch06/03_bonus_imdb-classification/train-gpt.py

@@ -149,7 +149,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device,
         model.train()  # Set model to training mode
 
         for input_batch, target_batch in train_loader:
-            optimizer.zero_grad()  # Reset loss gradients from previous epoch
+            optimizer.zero_grad()  # Reset loss gradients from previous batch iteration
             loss = calc_loss_batch(input_batch, target_batch, model, device, trainable_token=trainable_token)
             loss.backward()  # Calculate loss gradients
             optimizer.step()  # Update model weights using loss gradients