previous_chapters.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. # Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
  2. # Source for "Build a Large Language Model From Scratch"
  3. # - https://www.manning.com/books/build-a-large-language-model-from-scratch
  4. # Code: https://github.com/rasbt/LLMs-from-scratch
  5. import tiktoken
  6. import torch
  7. import torch.nn as nn
  8. from torch.utils.data import Dataset, DataLoader
  9. class GPTDatasetV1(Dataset):
  10. def __init__(self, txt, tokenizer, max_length, stride):
  11. self.input_ids = []
  12. self.target_ids = []
  13. # Tokenize the entire text
  14. token_ids = tokenizer.encode(txt, allowed_special={"<|endoftext|>"})
  15. # Use a sliding window to chunk the book into overlapping sequences of max_length
  16. for i in range(0, len(token_ids) - max_length, stride):
  17. input_chunk = token_ids[i:i + max_length]
  18. target_chunk = token_ids[i + 1: i + max_length + 1]
  19. self.input_ids.append(torch.tensor(input_chunk))
  20. self.target_ids.append(torch.tensor(target_chunk))
  21. def __len__(self):
  22. return len(self.input_ids)
  23. def __getitem__(self, idx):
  24. return self.input_ids[idx], self.target_ids[idx]
  25. def create_dataloader_v1(txt, batch_size=4, max_length=256,
  26. stride=128, shuffle=True, drop_last=True, num_workers=0):
  27. # Initialize the tokenizer
  28. tokenizer = tiktoken.get_encoding("gpt2")
  29. # Create dataset
  30. dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)
  31. # Create dataloader
  32. dataloader = DataLoader(
  33. dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, num_workers=num_workers)
  34. return dataloader
  35. class MultiHeadAttention(nn.Module):
  36. def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
  37. super().__init__()
  38. assert d_out % num_heads == 0, "d_out must be divisible by num_heads"
  39. self.d_out = d_out
  40. self.num_heads = num_heads
  41. self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim
  42. self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
  43. self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
  44. self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
  45. self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs
  46. self.dropout = nn.Dropout(dropout)
  47. self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
  48. def forward(self, x):
  49. b, num_tokens, d_in = x.shape
  50. keys = self.W_key(x) # Shape: (b, num_tokens, d_out)
  51. queries = self.W_query(x)
  52. values = self.W_value(x)
  53. # We implicitly split the matrix by adding a `num_heads` dimension
  54. # Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
  55. keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
  56. values = values.view(b, num_tokens, self.num_heads, self.head_dim)
  57. queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
  58. # Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim)
  59. keys = keys.transpose(1, 2)
  60. queries = queries.transpose(1, 2)
  61. values = values.transpose(1, 2)
  62. # Compute scaled dot-product attention (aka self-attention) with a causal mask
  63. attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head
  64. # Original mask truncated to the number of tokens and converted to boolean
  65. mask_bool = self.mask.bool()[:num_tokens, :num_tokens]
  66. # Use the mask to fill attention scores
  67. attn_scores.masked_fill_(mask_bool, -torch.inf)
  68. attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
  69. attn_weights = self.dropout(attn_weights)
  70. # Shape: (b, num_tokens, num_heads, head_dim)
  71. context_vec = (attn_weights @ values).transpose(1, 2)
  72. # Combine heads, where self.d_out = self.num_heads * self.head_dim
  73. context_vec = context_vec.contiguous().view(b, num_tokens, self.d_out)
  74. context_vec = self.out_proj(context_vec) # optional projection
  75. return context_vec