previous_chapters.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. # Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
  2. # Source for "Build a Large Language Model From Scratch"
  3. # - https://www.manning.com/books/build-a-large-language-model-from-scratch
  4. # Code: https://github.com/rasbt/LLMs-from-scratch
  5. import tiktoken
  6. import torch
  7. import torch.nn as nn
  8. from torch.utils.data import Dataset, DataLoader
  9. class GPTDatasetV1(Dataset):
  10. def __init__(self, txt, tokenizer, max_length, stride):
  11. self.tokenizer = tokenizer
  12. self.input_ids = []
  13. self.target_ids = []
  14. # Tokenize the entire text
  15. token_ids = tokenizer.encode(txt)
  16. # Use a sliding window to chunk the book into overlapping sequences of max_length
  17. for i in range(0, len(token_ids) - max_length, stride):
  18. input_chunk = token_ids[i:i + max_length]
  19. target_chunk = token_ids[i + 1: i + max_length + 1]
  20. self.input_ids.append(torch.tensor(input_chunk))
  21. self.target_ids.append(torch.tensor(target_chunk))
  22. def __len__(self):
  23. return len(self.input_ids)
  24. def __getitem__(self, idx):
  25. return self.input_ids[idx], self.target_ids[idx]
  26. def create_dataloader_v1(txt, batch_size=4, max_length=256,
  27. stride=128, shuffle=True, drop_last=True):
  28. # Initialize the tokenizer
  29. tokenizer = tiktoken.get_encoding("gpt2")
  30. # Create dataset
  31. dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)
  32. # Create dataloader
  33. dataloader = DataLoader(
  34. dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last)
  35. return dataloader
  36. class MultiHeadAttention(nn.Module):
  37. def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
  38. super().__init__()
  39. assert d_out % num_heads == 0, "d_out must be divisible by num_heads"
  40. self.d_out = d_out
  41. self.num_heads = num_heads
  42. self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim
  43. self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
  44. self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
  45. self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
  46. self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs
  47. self.dropout = nn.Dropout(dropout)
  48. self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
  49. def forward(self, x):
  50. b, num_tokens, d_in = x.shape
  51. keys = self.W_key(x) # Shape: (b, num_tokens, d_out)
  52. queries = self.W_query(x)
  53. values = self.W_value(x)
  54. # We implicitly split the matrix by adding a `num_heads` dimension
  55. # Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
  56. keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
  57. values = values.view(b, num_tokens, self.num_heads, self.head_dim)
  58. queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
  59. # Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim)
  60. keys = keys.transpose(1, 2)
  61. queries = queries.transpose(1, 2)
  62. values = values.transpose(1, 2)
  63. # Compute scaled dot-product attention (aka self-attention) with a causal mask
  64. attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head
  65. # Original mask truncated to the number of tokens and converted to boolean
  66. mask_bool = self.mask.bool()[:num_tokens, :num_tokens]
  67. # Use the mask to fill attention scores
  68. attn_scores.masked_fill_(mask_bool, -torch.inf)
  69. attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
  70. attn_weights = self.dropout(attn_weights)
  71. # Shape: (b, num_tokens, num_heads, head_dim)
  72. context_vec = (attn_weights @ values).transpose(1, 2)
  73. # Combine heads, where self.d_out = self.num_heads * self.head_dim
  74. context_vec = context_vec.contiguous().view(b, num_tokens, self.d_out)
  75. context_vec = self.out_proj(context_vec) # optional projection
  76. return context_vec