|
|
@@ -1252,7 +1252,7 @@
|
|
|
"tokenizer_file_path = hf_hub_download(\n",
|
|
|
" repo_id=\"meta-llama/Meta-Llama-3-8B\",\n",
|
|
|
" filename=\"original/tokenizer.model\",\n",
|
|
|
- " local_dir=\"llama3-files\"\n",
|
|
|
+ " local_dir=\"Llama-3-8B\"\n",
|
|
|
")"
|
|
|
]
|
|
|
},
|
|
|
@@ -1458,7 +1458,7 @@
|
|
|
" weights_file = hf_hub_download(\n",
|
|
|
" repo_id=\"meta-llama/Meta-Llama-3-8B\",\n",
|
|
|
" filename=f\"model-0000{i}-of-00004.safetensors\",\n",
|
|
|
- " local_dir=\"llama3-files\"\n",
|
|
|
+ " local_dir=\"Llama-3-8B\"\n",
|
|
|
" )\n",
|
|
|
" current_weights = load_file(weights_file)\n",
|
|
|
" combined_weights.update(current_weights)"
|
|
|
@@ -1677,7 +1677,7 @@
|
|
|
"id": "akyo7WNyF_YL"
|
|
|
},
|
|
|
"source": [
|
|
|
- "- Above, we used the pretrained base model; if you want to use a model capable of following instructions, use the `\"meta-llama/Llama-3-8b-Instruct\"` model instead, as shown below"
|
|
|
+ "- Above, we used the pretrained base model; if you want to use a model capable of following instructions, use the `\"meta-llama/Llama-3-8B-Instruct\"` model instead, as shown below"
|
|
|
]
|
|
|
},
|
|
|
{
|
|
|
@@ -1824,7 +1824,7 @@
|
|
|
" weights_file = hf_hub_download(\n",
|
|
|
" repo_id=\"meta-llama/Meta-Llama-3-8B-Instruct\",\n",
|
|
|
" filename=f\"model-0000{i}-of-00004.safetensors\",\n",
|
|
|
- " local_dir=\"llama3-files\"\n",
|
|
|
+ " local_dir=\"Llama-3-8B-Instruct\"\n",
|
|
|
" )\n",
|
|
|
" current_weights = load_file(weights_file)\n",
|
|
|
" combined_weights.update(current_weights)\n",
|
|
|
@@ -2157,7 +2157,7 @@
|
|
|
"tokenizer_file_path = hf_hub_download(\n",
|
|
|
" repo_id=\"meta-llama/Llama-3.1-8B\",\n",
|
|
|
" filename=\"original/tokenizer.model\",\n",
|
|
|
- " local_dir=\"llama31-files\"\n",
|
|
|
+ " local_dir=\"Llama-3.1-8B\"\n",
|
|
|
")\n",
|
|
|
"\n",
|
|
|
"tokenizer = Tokenizer(tokenizer_file_path)"
|
|
|
@@ -2313,7 +2313,7 @@
|
|
|
" weights_file = hf_hub_download(\n",
|
|
|
" repo_id=\"meta-llama/Llama-3.1-8B\",\n",
|
|
|
" filename=f\"model-0000{i}-of-00004.safetensors\",\n",
|
|
|
- " local_dir=\"llama31-files\"\n",
|
|
|
+ " local_dir=\"Llama-3.1-8B\"\n",
|
|
|
" )\n",
|
|
|
" current_weights = load_file(weights_file)\n",
|
|
|
" combined_weights.update(current_weights)\n",
|
|
|
@@ -2512,7 +2512,7 @@
|
|
|
"tokenizer_file_path = hf_hub_download(\n",
|
|
|
" repo_id=\"meta-llama/Llama-3.2-1B\",\n",
|
|
|
" filename=\"original/tokenizer.model\",\n",
|
|
|
- " local_dir=\"llama32-files\"\n",
|
|
|
+ " local_dir=\"Llama-3.2-1B\"\n",
|
|
|
")\n",
|
|
|
"\n",
|
|
|
"tokenizer = Tokenizer(tokenizer_file_path)"
|
|
|
@@ -2589,7 +2589,7 @@
|
|
|
"weights_file = hf_hub_download(\n",
|
|
|
" repo_id=\"meta-llama/Llama-3.2-1B\",\n",
|
|
|
" filename=f\"model.safetensors\",\n",
|
|
|
- " local_dir=\"llama32-files\"\n",
|
|
|
+ " local_dir=\"Llama-3.2-1B\"\n",
|
|
|
")\n",
|
|
|
"current_weights = load_file(weights_file)\n",
|
|
|
"\n",
|
|
|
@@ -2687,7 +2687,7 @@
|
|
|
"provenance": []
|
|
|
},
|
|
|
"kernelspec": {
|
|
|
- "display_name": "Python 3 (ipykernel)",
|
|
|
+ "display_name": "pt",
|
|
|
"language": "python",
|
|
|
"name": "python3"
|
|
|
},
|
|
|
@@ -2701,7 +2701,7 @@
|
|
|
"name": "python",
|
|
|
"nbconvert_exporter": "python",
|
|
|
"pygments_lexer": "ipython3",
|
|
|
- "version": "3.10.6"
|
|
|
+ "version": "3.11.9"
|
|
|
},
|
|
|
"widgets": {
|
|
|
"application/vnd.jupyter.widget-state+json": {
|