Răsfoiți Sursa

minor fixes (#248)

* removed duplicated numpy req

* labels to array

* autopep8 NBs
Daniel Kleine 1 an în urmă
părinte
comite
1db1999951

+ 1 - 1
.github/ISSUE_TEMPLATE/ask-a-question.md

@@ -2,7 +2,7 @@
 name: Ask a Question
 name: Ask a Question
 about: Ask questions related to the book
 about: Ask questions related to the book
 title: ''
 title: ''
-labels: question
+labels: [question]
 assignees: rasbt
 assignees: rasbt
 
 
 ---
 ---

+ 7 - 5
ch07/03_model-evaluation/llm-instruction-eval-ollama.ipynb

@@ -70,7 +70,7 @@
     "from importlib.metadata import version\n",
     "from importlib.metadata import version\n",
     "\n",
     "\n",
     "pkgs = [\"tqdm\",    # Progress bar\n",
     "pkgs = [\"tqdm\",    # Progress bar\n",
-    "       ]\n",
+    "        ]\n",
     "\n",
     "\n",
     "for p in pkgs:\n",
     "for p in pkgs:\n",
     "    print(f\"{p} version: {version(p)}\")"
     "    print(f\"{p} version: {version(p)}\")"
@@ -218,12 +218,13 @@
     "import urllib.request\n",
     "import urllib.request\n",
     "import json\n",
     "import json\n",
     "\n",
     "\n",
+    "\n",
     "def query_model(prompt, model=\"llama3\", url=\"http://localhost:11434/api/chat\"):\n",
     "def query_model(prompt, model=\"llama3\", url=\"http://localhost:11434/api/chat\"):\n",
     "    # Create the data payload as a dictionary\n",
     "    # Create the data payload as a dictionary\n",
     "    data = {\n",
     "    data = {\n",
     "        \"model\": model,\n",
     "        \"model\": model,\n",
-    "        \"seed\":123,        # for deterministic responses\n",
-    "        \"temperature\":0,   # for deterministic responses\n",
+    "        \"seed\": 123,        # for deterministic responses\n",
+    "        \"temperature\": 0,   # for deterministic responses\n",
     "        \"messages\": [\n",
     "        \"messages\": [\n",
     "            {\"role\": \"user\", \"content\": prompt}\n",
     "            {\"role\": \"user\", \"content\": prompt}\n",
     "        ]\n",
     "        ]\n",
@@ -290,7 +291,7 @@
     "\n",
     "\n",
     "with open(json_file, \"r\") as file:\n",
     "with open(json_file, \"r\") as file:\n",
     "    json_data = json.load(file)\n",
     "    json_data = json.load(file)\n",
-    "    \n",
+    "\n",
     "print(\"Number of entries:\", len(json_data))"
     "print(\"Number of entries:\", len(json_data))"
    ]
    ]
   },
   },
@@ -520,7 +521,7 @@
     "              f\"and correct output `{entry['output']}`, \"\n",
     "              f\"and correct output `{entry['output']}`, \"\n",
     "              f\"score the model response `{entry['model 1 response']}`\"\n",
     "              f\"score the model response `{entry['model 1 response']}`\"\n",
     "              f\" on a scale from 0 to 100, where 100 is the best score. \"\n",
     "              f\" on a scale from 0 to 100, where 100 is the best score. \"\n",
-    "    )\n",
+    "              )\n",
     "    print(\"\\nDataset response:\")\n",
     "    print(\"\\nDataset response:\")\n",
     "    print(\">>\", entry['output'])\n",
     "    print(\">>\", entry['output'])\n",
     "    print(\"\\nModel response:\")\n",
     "    print(\"\\nModel response:\")\n",
@@ -547,6 +548,7 @@
    "source": [
    "source": [
     "from tqdm import tqdm\n",
     "from tqdm import tqdm\n",
     "\n",
     "\n",
+    "\n",
     "def generate_model_scores(json_data, json_key):\n",
     "def generate_model_scores(json_data, json_key):\n",
     "    scores = []\n",
     "    scores = []\n",
     "    for entry in tqdm(json_data, desc=\"Scoring entries\"):\n",
     "    for entry in tqdm(json_data, desc=\"Scoring entries\"):\n",

+ 5 - 4
ch07/03_model-evaluation/llm-instruction-eval-openai.ipynb

@@ -80,7 +80,7 @@
     "\n",
     "\n",
     "pkgs = [\"openai\",  # OpenAI API\n",
     "pkgs = [\"openai\",  # OpenAI API\n",
     "        \"tqdm\",    # Progress bar\n",
     "        \"tqdm\",    # Progress bar\n",
-    "       ]\n",
+    "        ]\n",
     "\n",
     "\n",
     "for p in pkgs:\n",
     "for p in pkgs:\n",
     "    print(f\"{p} version: {version(p)}\")"
     "    print(f\"{p} version: {version(p)}\")"
@@ -125,7 +125,7 @@
     "import json\n",
     "import json\n",
     "from openai import OpenAI\n",
     "from openai import OpenAI\n",
     "\n",
     "\n",
-    "# Load API key from a JSON file. \n",
+    "# Load API key from a JSON file.\n",
     "# Make sure to replace \"sk-...\" with your actual API key from https://platform.openai.com/api-keys\n",
     "# Make sure to replace \"sk-...\" with your actual API key from https://platform.openai.com/api-keys\n",
     "with open(\"config.json\", \"r\") as config_file:\n",
     "with open(\"config.json\", \"r\") as config_file:\n",
     "    config = json.load(config_file)\n",
     "    config = json.load(config_file)\n",
@@ -209,7 +209,7 @@
     "\n",
     "\n",
     "with open(json_file, \"r\") as file:\n",
     "with open(json_file, \"r\") as file:\n",
     "    json_data = json.load(file)\n",
     "    json_data = json.load(file)\n",
-    "    \n",
+    "\n",
     "print(\"Number of entries:\", len(json_data))"
     "print(\"Number of entries:\", len(json_data))"
    ]
    ]
   },
   },
@@ -409,7 +409,7 @@
     "              f\"and correct output `{entry['output']}`, \"\n",
     "              f\"and correct output `{entry['output']}`, \"\n",
     "              f\"score the model response `{entry['model 1 response']}`\"\n",
     "              f\"score the model response `{entry['model 1 response']}`\"\n",
     "              f\" on a scale from 0 to 100, where 100 is the best score. \"\n",
     "              f\" on a scale from 0 to 100, where 100 is the best score. \"\n",
-    "    )\n",
+    "              )\n",
     "    print(\"\\nDataset response:\")\n",
     "    print(\"\\nDataset response:\")\n",
     "    print(\">>\", entry['output'])\n",
     "    print(\">>\", entry['output'])\n",
     "    print(\"\\nModel response:\")\n",
     "    print(\"\\nModel response:\")\n",
@@ -436,6 +436,7 @@
    "source": [
    "source": [
     "from tqdm import tqdm\n",
     "from tqdm import tqdm\n",
     "\n",
     "\n",
+    "\n",
     "def generate_model_scores(json_data, json_key, client):\n",
     "def generate_model_scores(json_data, json_key, client):\n",
     "    scores = []\n",
     "    scores = []\n",
     "    for entry in tqdm(json_data, desc=\"Scoring entries\"):\n",
     "    for entry in tqdm(json_data, desc=\"Scoring entries\"):\n",

+ 0 - 1
requirements.txt

@@ -2,7 +2,6 @@ torch >= 2.0.1        # all
 jupyterlab >= 4.0     # all
 jupyterlab >= 4.0     # all
 tiktoken >= 0.5.1     # ch02; ch04; ch05
 tiktoken >= 0.5.1     # ch02; ch04; ch05
 matplotlib >= 3.7.1   # ch04; ch05
 matplotlib >= 3.7.1   # ch04; ch05
-numpy >= 1.24.3       # ch05
 tensorflow >= 2.15.0  # ch05
 tensorflow >= 2.15.0  # ch05
 tqdm >= 4.66.1        # ch05; ch07
 tqdm >= 4.66.1        # ch05; ch07
 numpy >= 1.25, < 2.0  # dependency of several other libraries like torch and pandas
 numpy >= 1.25, < 2.0  # dependency of several other libraries like torch and pandas