Datasets:

Modalities:
Text
ArXiv:
Maple222 commited on
Commit
3eed45a
·
verified ·
1 Parent(s): 3fa082a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. litgpt/__pycache__/__init__.cpython-310.pyc +0 -0
  2. litgpt/__pycache__/__main__.cpython-310.pyc +0 -0
  3. litgpt/__pycache__/adapter.cpython-310.pyc +0 -0
  4. litgpt/__pycache__/adapter_v2.cpython-310.pyc +0 -0
  5. litgpt/__pycache__/api.cpython-310.pyc +0 -0
  6. litgpt/__pycache__/args.cpython-310.pyc +0 -0
  7. litgpt/__pycache__/config.cpython-310.pyc +0 -0
  8. litgpt/__pycache__/lora.cpython-310.pyc +0 -0
  9. litgpt/__pycache__/model.cpython-310.pyc +0 -0
  10. litgpt/__pycache__/perplexity.cpython-310.pyc +0 -0
  11. litgpt/__pycache__/pretrain.cpython-310.pyc +0 -0
  12. litgpt/__pycache__/prompts.cpython-310.pyc +0 -0
  13. litgpt/__pycache__/tokenizer.cpython-310.pyc +0 -0
  14. litgpt/__pycache__/utils.cpython-310.pyc +0 -0
  15. litgpt/chat/__init__.py +0 -0
  16. litgpt/chat/base.py +272 -0
  17. litgpt/data/__init__.py +39 -0
  18. litgpt/data/alpaca.py +141 -0
  19. litgpt/data/alpaca_2k.py +53 -0
  20. litgpt/data/alpaca_gpt4.py +23 -0
  21. litgpt/data/arxiv.py +81 -0
  22. litgpt/data/base.py +143 -0
  23. litgpt/data/count.py +43 -0
  24. litgpt/data/deita.py +115 -0
  25. litgpt/data/flan.py +190 -0
  26. litgpt/data/json_data.py +149 -0
  27. litgpt/data/lima.py +131 -0
  28. litgpt/data/lit_data.py +64 -0
  29. litgpt/data/longform.py +90 -0
  30. litgpt/data/microllama.py +14 -0
  31. litgpt/data/openwebtext.py +107 -0
  32. litgpt/data/prepare_arxiv.py +62 -0
  33. litgpt/data/prepare_slimpajama.py +63 -0
  34. litgpt/data/prepare_starcoder.py +81 -0
  35. litgpt/data/sharding.py +45 -0
  36. litgpt/data/split_trainval.py +58 -0
  37. litgpt/data/text_files.py +148 -0
  38. litgpt/data/tinyllama.py +103 -0
  39. litgpt/data/tinystories.py +141 -0
  40. litgpt/deploy/__init__.py +0 -0
  41. litgpt/deploy/__pycache__/__init__.cpython-310.pyc +0 -0
  42. litgpt/deploy/serve.py +308 -0
  43. litgpt/eval/evaluate.py +155 -0
  44. litgpt/finetune/__init__.py +0 -0
  45. litgpt/finetune/__pycache__/__init__.cpython-310.pyc +0 -0
  46. litgpt/finetune/__pycache__/adapter.cpython-310.pyc +0 -0
  47. litgpt/finetune/__pycache__/adapter_v2.cpython-310.pyc +0 -0
  48. litgpt/finetune/adapter.py +491 -0
  49. litgpt/finetune/adapter_v2.py +514 -0
  50. litgpt/finetune/full.py +457 -0
litgpt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (814 Bytes). View file
 
litgpt/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
litgpt/__pycache__/adapter.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
litgpt/__pycache__/adapter_v2.cpython-310.pyc ADDED
Binary file (9.33 kB). View file
 
litgpt/__pycache__/api.cpython-310.pyc ADDED
Binary file (21.9 kB). View file
 
litgpt/__pycache__/args.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
litgpt/__pycache__/config.cpython-310.pyc ADDED
Binary file (37.1 kB). View file
 
litgpt/__pycache__/lora.cpython-310.pyc ADDED
Binary file (24.5 kB). View file
 
litgpt/__pycache__/model.cpython-310.pyc ADDED
Binary file (26.5 kB). View file
 
litgpt/__pycache__/perplexity.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
litgpt/__pycache__/pretrain.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
litgpt/__pycache__/prompts.cpython-310.pyc ADDED
Binary file (21.5 kB). View file
 
litgpt/__pycache__/tokenizer.cpython-310.pyc ADDED
Binary file (5.13 kB). View file
 
litgpt/__pycache__/utils.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
litgpt/chat/__init__.py ADDED
File without changes
litgpt/chat/base.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import sys
4
+ import time
5
+ from pathlib import Path
6
+ from pprint import pprint
7
+ from typing import Iterator, List, Literal, Optional, Tuple
8
+
9
+ import lightning as L
10
+ import torch
11
+ from lightning.fabric.plugins import BitsandbytesPrecision
12
+
13
+ from litgpt.config import Config
14
+ from litgpt.model import GPT
15
+ from litgpt.prompts import PromptStyle, has_prompt_style, load_prompt_style
16
+ from litgpt.scripts.merge_lora import merge_lora
17
+ from litgpt.tokenizer import Tokenizer
18
+ from litgpt.utils import (
19
+ auto_download_checkpoint,
20
+ check_file_size_on_cpu_and_warn,
21
+ extend_checkpoint_dir,
22
+ get_default_supported_precision,
23
+ load_checkpoint,
24
+ )
25
+
26
+
27
+ @torch.inference_mode()
28
+ def generate(
29
+ model: GPT,
30
+ prompt: torch.Tensor,
31
+ max_returned_tokens: int,
32
+ *,
33
+ temperature: float = 1.0,
34
+ top_k: Optional[int] = None,
35
+ top_p: float = 1.0,
36
+ stop_tokens: Tuple[List[int], ...] = (),
37
+ ) -> Iterator[torch.Tensor]:
38
+ """Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as possible.
39
+
40
+ Arguments:
41
+ model: The model to use.
42
+ prompt: Tensor of shape (T) with indices of the prompt sequence.
43
+ max_returned_tokens: The maximum number of tokens to return (given plus generated).
44
+ temperature: Scales the predicted logits by 1 / temperature
45
+ top_k: If specified, only sample among the tokens with the k highest probabilities.
46
+ top_p: If specified, it represents the cumulative probability threshold to consider in the sampling process.
47
+ In top-p sampling, the next token is sampled from the highest probability tokens
48
+ whose cumulative probability exceeds the threshold `top_p`. When specified,
49
+ it must be `0 <= top_p <= 1`. Here, `top_p=0` is equivalent
50
+ to sampling the most probable token, while `top_p=1` samples from the whole distribution.
51
+ It can be used in conjunction with `top_k` and `temperature` with the following order
52
+ of application:
53
+
54
+ 1. `top_k` sampling
55
+ 2. `temperature` scaling
56
+ 3. `top_p` sampling
57
+
58
+ For more details, see https://arxiv.org/abs/1904.09751
59
+ or https://huyenchip.com/2024/01/16/sampling.html#top_p
60
+ stop_tokens: If specified, stop generating any more token once one of this list is generated.
61
+ """
62
+ from litgpt.generate.base import generate_fn
63
+
64
+ return generate_fn(
65
+ include_prompt=False,
66
+ include_eos=False,
67
+ model=model,
68
+ prompt=prompt,
69
+ max_returned_tokens=max_returned_tokens,
70
+ temperature=temperature,
71
+ top_k=top_k,
72
+ top_p=top_p,
73
+ stop_tokens=stop_tokens,
74
+ )
75
+
76
+
77
+ def process_prompt(
78
+ prompt, model, tokenizer, prompt_style, fabric, temperature, max_new_tokens, top_k, top_p, stop_tokens
79
+ ):
80
+ prompt = prompt_style.apply(prompt=prompt)
81
+ encoded_prompt = tokenizer.encode(prompt, device=fabric.device)
82
+
83
+ if max_new_tokens is None:
84
+ max_returned_tokens = model.max_seq_length
85
+ else:
86
+ first_turn = model.mask_cache is None
87
+ max_returned_tokens = encoded_prompt.size(0) + max_new_tokens
88
+ if first_turn or max_returned_tokens > model.max_seq_length:
89
+ model.max_seq_length = max_returned_tokens
90
+ model.set_kv_cache(batch_size=1, device=fabric.device)
91
+
92
+ y: Iterator[torch.Tensor] = generate(
93
+ model,
94
+ encoded_prompt,
95
+ max_returned_tokens,
96
+ temperature=temperature,
97
+ top_k=top_k,
98
+ top_p=top_p,
99
+ stop_tokens=stop_tokens,
100
+ )
101
+ token_generator: Iterator[str] = tokenizer.decode_stream(y, device=fabric.device)
102
+
103
+ fabric.print(">> Reply: ", end="")
104
+
105
+ t0 = time.perf_counter()
106
+
107
+ tokens_generated = 0
108
+ for tok in token_generator:
109
+ tokens_generated += 1
110
+ fabric.print(tok, end="", flush=True)
111
+
112
+ t = time.perf_counter() - t0
113
+
114
+ for block in model.transformer.h:
115
+ block.attn.kv_cache.reset_parameters()
116
+ fabric.print(
117
+ f"\nTime for inference: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec, {tokens_generated} tokens",
118
+ file=sys.stderr,
119
+ )
120
+ fabric.print()
121
+
122
+
123
+ def interact(multiline, model, tokenizer, prompt_style, fabric, temperature, max_new_tokens, top_k, top_p, stop_tokens):
124
+ while True:
125
+ try:
126
+ if not multiline:
127
+ prompt = input(">> Prompt: ")
128
+ else:
129
+ print(">> Prompt: (Type '!submit' on a new line to end input).")
130
+ prompt_lines = []
131
+ while True:
132
+ line = input()
133
+ if line.strip().lower() in ("!submit", "!quit", "!exit"):
134
+ break
135
+ prompt_lines.append(line)
136
+ prompt = "\n".join(prompt_lines)
137
+
138
+ except KeyboardInterrupt:
139
+ break
140
+
141
+ prompt = prompt.lower().strip()
142
+ if not prompt or prompt in ("!quit", "!exit"):
143
+ break
144
+
145
+ process_prompt(
146
+ prompt, model, tokenizer, prompt_style, fabric, temperature, max_new_tokens, top_k, top_p, stop_tokens
147
+ )
148
+
149
+
150
+ @torch.inference_mode()
151
+ def main(
152
+ checkpoint_dir: Path,
153
+ *,
154
+ max_new_tokens: int = 50,
155
+ top_k: Optional[int] = 50,
156
+ top_p: float = 1.0,
157
+ temperature: float = 0.8,
158
+ quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8"]] = None,
159
+ precision: Optional[str] = None,
160
+ compile: bool = False,
161
+ multiline: bool = False,
162
+ access_token: Optional[str] = None,
163
+ ) -> None:
164
+ """Chat with a model.
165
+
166
+ Args:
167
+ checkpoint_dir: A local path to a directory containing the model weights or a valid model name.
168
+ You can get a list of valid model names via the `litgpt download list` command line argument.
169
+ max_new_tokens: The number of generation steps to take.
170
+ top_k: The number of top most probable tokens to consider in the sampling process.
171
+ top_p: If specified, it represents the cumulative probability threshold to consider in the sampling process.
172
+ In top-p sampling, the next token is sampled from the highest probability tokens
173
+ whose cumulative probability exceeds the threshold `top_p`. When specified,
174
+ it must be `0 <= top_p <= 1`. Here, `top_p=0` is equivalent
175
+ to sampling the most probable token, while `top_p=1` samples from the whole distribution.
176
+ It can be used in conjunction with `top_k` and `temperature` with the following order
177
+ of application:
178
+
179
+ 1. `top_k` sampling
180
+ 2. `temperature` scaling
181
+ 3. `top_p` sampling
182
+
183
+ For more details, see https://arxiv.org/abs/1904.09751
184
+ or https://huyenchip.com/2024/01/16/sampling.html#top_p
185
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
186
+ samples.
187
+ quantize: Whether to quantize the model and using which method:
188
+ - bnb.nf4, bnb.nf4-dq, bnb.fp4, bnb.fp4-dq: 4-bit quantization from bitsandbytes
189
+ - bnb.int8: 8-bit quantization from bitsandbytes
190
+ for more details, see https://github.com/Lightning-AI/litgpt/blob/main/tutorials/quantize.md
191
+ precision: Indicates the Fabric precision setting to use.
192
+ compile: Whether to use compilation to speed up token generation. Will increase startup time.
193
+ multiline: Whether to support multiline input prompts.
194
+ access_token: Optional API token to access models with restrictions.
195
+ """
196
+ checkpoint_dir = extend_checkpoint_dir(checkpoint_dir)
197
+ pprint(locals())
198
+
199
+ precision = precision or get_default_supported_precision(training=False)
200
+
201
+ plugins = None
202
+ if quantize is not None and quantize.startswith("bnb."):
203
+ if "mixed" in precision:
204
+ raise ValueError("Quantization and mixed precision is not supported.")
205
+ dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
206
+ plugins = BitsandbytesPrecision(quantize[4:], dtype)
207
+ precision = None
208
+
209
+ fabric = L.Fabric(devices=1, precision=precision, plugins=plugins)
210
+
211
+ # Merge if this is a raw LoRA checkpoint
212
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
213
+ if (checkpoint_dir / "lit_model.pth.lora").is_file() and not checkpoint_path.is_file():
214
+ print("Merging LoRA weights with the base model. This won't take long and is a one-time-only thing.")
215
+ merge_lora(checkpoint_dir)
216
+
217
+ if not checkpoint_path.is_file():
218
+ checkpoint_dir = auto_download_checkpoint(model_name=checkpoint_dir, access_token=access_token)
219
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
220
+
221
+ check_file_size_on_cpu_and_warn(checkpoint_path, fabric.device)
222
+ config = Config.from_file(checkpoint_dir / "model_config.yaml")
223
+
224
+ with fabric.init_module(empty_init=True):
225
+ model = GPT(config)
226
+ if compile:
227
+ print(
228
+ "IMPORTANT: with enabled compilation the KV-cache size is determined by model's maximum context size, which leads to "
229
+ "a higher memory consumption. In case of an OOM error, try to set `--compile=False`."
230
+ )
231
+ model.set_kv_cache(batch_size=1)
232
+ load_checkpoint(fabric, model, checkpoint_path)
233
+ model.eval()
234
+
235
+ if compile:
236
+ torch._dynamo.config.automatic_dynamic_shapes = True
237
+ torch._inductor.config.triton.unique_kernel_names = True
238
+ torch._inductor.config.coordinate_descent_tuning = True
239
+ global next_token
240
+ next_token = torch.compile(next_token, mode="reduce-overhead", dynamic=True)
241
+
242
+ model = fabric.setup_module(model)
243
+
244
+ tokenizer = Tokenizer(checkpoint_dir)
245
+ prompt_style = (
246
+ load_prompt_style(checkpoint_dir) if has_prompt_style(checkpoint_dir) else PromptStyle.from_config(config)
247
+ )
248
+ stop_tokens = prompt_style.stop_tokens(tokenizer)
249
+
250
+ if multiline:
251
+ exit_instruction = "To exit, enter '!quit' or '!exit' on an empty prompt and press 'Enter'."
252
+ else:
253
+ exit_instruction = "To exit, press 'Enter' on an empty prompt."
254
+
255
+ print(f"Now chatting with {config.name}.\n{exit_instruction}\n")
256
+ L.seed_everything(1234)
257
+
258
+ interact(
259
+ multiline=multiline,
260
+ model=model,
261
+ tokenizer=tokenizer,
262
+ prompt_style=prompt_style,
263
+ fabric=fabric,
264
+ temperature=temperature,
265
+ max_new_tokens=(None if compile else max_new_tokens),
266
+ top_k=top_k,
267
+ top_p=top_p,
268
+ stop_tokens=stop_tokens,
269
+ )
270
+
271
+ if fabric.device.type == "cuda":
272
+ fabric.print(f"\nMemory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)
litgpt/data/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ from litgpt.data.alpaca import Alpaca
4
+ from litgpt.data.alpaca_2k import Alpaca2k
5
+ from litgpt.data.alpaca_gpt4 import AlpacaGPT4
6
+ from litgpt.data.base import DataModule, SFTDataset, get_sft_collate_fn
7
+ from litgpt.data.deita import Deita
8
+ from litgpt.data.flan import FLAN
9
+ from litgpt.data.json_data import JSON
10
+ from litgpt.data.lima import LIMA
11
+ from litgpt.data.lit_data import LitData
12
+ from litgpt.data.longform import LongForm
13
+ from litgpt.data.microllama import MicroLlama
14
+ from litgpt.data.openwebtext import OpenWebText
15
+ from litgpt.data.text_files import TextFiles
16
+ from litgpt.data.tinyllama import TinyLlama
17
+ from litgpt.data.tinystories import TinyStories
18
+ from litgpt.data.arxiv import Arxiv
19
+
20
+ __all__ = [
21
+ "Alpaca",
22
+ "Alpaca2k",
23
+ "AlpacaGPT4",
24
+ "Arxiv",
25
+ "Deita",
26
+ "FLAN",
27
+ "JSON",
28
+ "LIMA",
29
+ "LitData",
30
+ "DataModule",
31
+ "LongForm",
32
+ "OpenWebText",
33
+ "SFTDataset",
34
+ "TextFiles",
35
+ "TinyLlama",
36
+ "TinyStories",
37
+ "MicroLlama",
38
+ "get_sft_collate_fn",
39
+ ]
litgpt/data/alpaca.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ """Implementation derived from https://github.com/tloen/alpaca-lora"""
3
+
4
+ import json
5
+ from dataclasses import dataclass, field
6
+ from pathlib import Path
7
+ from typing import Optional, Union
8
+
9
+ import torch
10
+ from lightning_utilities.core.imports import RequirementCache
11
+ from torch.utils.data import DataLoader, random_split
12
+
13
+ from litgpt.data.base import DataModule, SFTDataset, get_sft_collate_fn
14
+ from litgpt.prompts import PromptStyle
15
+ from litgpt.tokenizer import Tokenizer
16
+
17
+ _URL = "https://raw.githubusercontent.com/tloen/alpaca-lora/main/alpaca_data_cleaned_archive.json"
18
+
19
+
20
+ @dataclass
21
+ class Alpaca(DataModule):
22
+ """Alpaca data module for supervised finetuning."""
23
+
24
+ mask_prompt: bool = False
25
+ """Whether to mask the prompt section from the label (with ``ignore_index``)."""
26
+ val_split_fraction: float = 0.03865 # to get exactly 2000 validation samples,
27
+ """The fraction of the dataset to use for the validation dataset. The rest is used for training."""
28
+ prompt_style: Union[str, PromptStyle] = "alpaca"
29
+ """The style to apply to instruction prompts. See `litgpt.prompts` for a list of available styles."""
30
+ ignore_index: int = -100
31
+ """The index to use for elements to be ignored in the label."""
32
+ seed: int = 42
33
+ """The random seed for creating the train/val splits and shuffling the dataset."""
34
+ num_workers: int = 4
35
+ """How many DataLoader processes to use for loading."""
36
+ download_dir: Path = Path("./data/alpaca")
37
+ """The directory in which the downloaded dataset gets saved."""
38
+ file_url: str = field(repr=False, default=_URL)
39
+ """The URL from where to download the dataset."""
40
+ file_name: str = field(repr=False, default="alpaca_data_cleaned_archive.json")
41
+ """The name of the dataset file to download."""
42
+
43
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
44
+ batch_size: int = field(default=1, init=False, repr=False)
45
+ max_seq_length: int = field(default=-1, init=False, repr=False)
46
+ train_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
47
+ test_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
48
+
49
+ def __post_init__(self) -> None:
50
+ super().__init__()
51
+ if isinstance(self.prompt_style, str):
52
+ self.prompt_style = PromptStyle.from_name(self.prompt_style)
53
+
54
+ def connect(
55
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
56
+ ) -> None:
57
+ self.tokenizer = tokenizer
58
+ self.batch_size = batch_size
59
+ self.max_seq_length = -1 if max_seq_length is None else max_seq_length
60
+
61
+ def prepare_data(self) -> None:
62
+ self.download_dir.mkdir(parents=True, exist_ok=True)
63
+ download_if_missing(self.download_dir / self.file_name, self.file_url)
64
+
65
+ def setup(self, stage: str = "") -> None:
66
+ with open(self.download_dir / self.file_name, encoding="utf-8") as file:
67
+ data = json.load(file)
68
+
69
+ # Partition the dataset into train and test
70
+ train_data, test_data = random_split(
71
+ data,
72
+ [1.0 - self.val_split_fraction, self.val_split_fraction],
73
+ generator=torch.Generator().manual_seed(self.seed),
74
+ )
75
+ train_data, test_data = list(train_data), list(test_data)
76
+
77
+ self.train_dataset = SFTDataset(
78
+ data=train_data,
79
+ tokenizer=self.tokenizer,
80
+ prompt_style=self.prompt_style,
81
+ max_seq_length=self.max_seq_length,
82
+ mask_prompt=self.mask_prompt,
83
+ ignore_index=self.ignore_index,
84
+ )
85
+ self.test_dataset = SFTDataset(
86
+ data=test_data,
87
+ tokenizer=self.tokenizer,
88
+ prompt_style=self.prompt_style,
89
+ max_seq_length=self.max_seq_length,
90
+ mask_prompt=self.mask_prompt,
91
+ ignore_index=self.ignore_index,
92
+ )
93
+
94
+ def train_dataloader(self) -> DataLoader:
95
+ return DataLoader(
96
+ self.train_dataset,
97
+ batch_size=self.batch_size,
98
+ shuffle=True,
99
+ generator=torch.Generator().manual_seed(self.seed),
100
+ num_workers=self.num_workers,
101
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
102
+ )
103
+
104
+ def val_dataloader(self) -> DataLoader:
105
+ return DataLoader(
106
+ self.test_dataset,
107
+ batch_size=self.batch_size,
108
+ shuffle=False,
109
+ num_workers=self.num_workers,
110
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
111
+ )
112
+
113
+
114
+ def download_if_missing(file_path: Path, file_url: str, mode: str = "w", stream: bool = False) -> None:
115
+ """Downloads the raw json data file and saves it in the given destination."""
116
+ if file_path.exists() and file_path.stat().st_size > 0:
117
+ return
118
+ requests_available = RequirementCache("requests")
119
+ if not requests_available:
120
+ raise ModuleNotFoundError(str(requests_available))
121
+ import requests
122
+
123
+ response = requests.get(file_url, stream=stream)
124
+ with open(file_path, mode, encoding=None if mode == "wb" else "utf-8") as f:
125
+ if stream:
126
+ # credit: https://github.com/karpathy/llama2.c/blob/b3c4b6/tinystories.py#L25-L38
127
+ from tqdm import tqdm
128
+
129
+ pbar = tqdm(
130
+ desc=str(file_path),
131
+ total=int(response.headers.get("content-length", 0)),
132
+ unit="iB",
133
+ unit_scale=True,
134
+ unit_divisor=1024,
135
+ )
136
+ for data in response.iter_content(chunk_size=1024):
137
+ size = f.write(data)
138
+ pbar.update(size)
139
+ pbar.close()
140
+ else:
141
+ f.write(response.text)
litgpt/data/alpaca_2k.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+
7
+ from litgpt.data.alpaca import Alpaca
8
+ from litgpt.data.base import SFTDataset
9
+
10
+
11
+ @dataclass
12
+ class Alpaca2k(Alpaca):
13
+ """Alpaca2k data module for supervised finetuning."""
14
+
15
+ val_split_fraction: float = 0.05 # to get exactly 100 validation samples,
16
+ """The fraction of the dataset to use for the validation dataset. The rest is used for training."""
17
+ download_dir: Path = Path("./data/alpaca2k")
18
+ """The directory in which the downloaded datasetgets saved."""
19
+ repo_id: str = field(repr=False, default="mhenrichsen/alpaca_2k_test")
20
+ """The URL from where to download the dataset."""
21
+ file_name: str = field(repr=False, default="alpaca2k_data_cleaned_archive.json")
22
+ """The name of the dataset file to download."""
23
+
24
+ def prepare_data(self) -> None:
25
+ from datasets import load_dataset
26
+
27
+ load_dataset(self.repo_id, cache_dir=self.download_dir)
28
+
29
+ def setup(self, stage: str = "") -> None:
30
+ from datasets import load_dataset
31
+
32
+ dataset = load_dataset(self.repo_id, cache_dir=self.download_dir)
33
+
34
+ train_validation_split = dataset["train"].train_test_split(test_size=self.val_split_fraction, seed=self.seed)
35
+ train_data = train_validation_split["train"]
36
+ test_data = train_validation_split["test"]
37
+
38
+ self.train_dataset = SFTDataset(
39
+ data=train_data,
40
+ tokenizer=self.tokenizer,
41
+ prompt_style=self.prompt_style,
42
+ max_seq_length=self.max_seq_length,
43
+ mask_prompt=self.mask_prompt,
44
+ ignore_index=self.ignore_index,
45
+ )
46
+ self.test_dataset = SFTDataset(
47
+ data=test_data,
48
+ tokenizer=self.tokenizer,
49
+ prompt_style=self.prompt_style,
50
+ max_seq_length=self.max_seq_length,
51
+ mask_prompt=self.mask_prompt,
52
+ ignore_index=self.ignore_index,
53
+ )
litgpt/data/alpaca_gpt4.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+
7
+ from litgpt.data.alpaca import Alpaca
8
+
9
+ _URL = "https://raw.githubusercontent.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM/main/data/alpaca_gpt4_data.json"
10
+
11
+
12
+ @dataclass
13
+ class AlpacaGPT4(Alpaca):
14
+ """AlpacaGPT4 data module for supervised finetuning."""
15
+
16
+ val_split_fraction: float = 0.03847 # to get exactly 2000 test samples,
17
+ """The fraction of the dataset to use for the validation dataset. The rest is used for training."""
18
+ download_dir: Path = Path("./data/alpacagpt4")
19
+ """The directory in which the downloaded datasetgets saved."""
20
+ file_url: str = field(repr=False, default=_URL)
21
+ """The URL from where to download the dataset."""
22
+ file_name: str = field(repr=False, default="alpacagpt4_data_cleaned_archive.json")
23
+ """The name of the dataset file to download."""
litgpt/data/arxiv.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ from dataclasses import dataclass, field
3
+ from pathlib import Path
4
+ from typing import Optional, Union
5
+
6
+ from torch.utils.data import DataLoader
7
+
8
+ from litgpt.data import DataModule
9
+ from litgpt.tokenizer import Tokenizer
10
+
11
+
12
+ @dataclass
13
+ class Arxiv(DataModule):
14
+ """The TinyLlama data module is composed of a mix of SlimPajama and Starcoder data.
15
+
16
+ Provides training and validation streaming dataloaders that return batches of tokens.
17
+ """
18
+
19
+ data_path: Union[str, Path] = Path("litgpt/data/arxiv/")
20
+ seed: int = 42
21
+ """The random seed for shuffling the dataset."""
22
+ num_workers: int = 0
23
+ """How many DataLoader processes to use for loading."""
24
+ use_starcoder: bool = True
25
+ """Toggle for using Starcoder data."""
26
+ ppl: bool = False
27
+
28
+ batch_size: int = field(init=False, repr=False, default=1)
29
+ seq_length: int = field(init=False, repr=False, default=1024)
30
+
31
+ def __post_init__(self):
32
+ super().__init__()
33
+ # Could be a remote path (s3://) or a local path
34
+ self.arxiv_train = str(self.data_path).rstrip("/") + "/train"
35
+ self.arxiv_val = str(self.data_path).rstrip("/") + "/val"
36
+ self.required_paths = [self.arxiv_train, self.arxiv_val]
37
+
38
+
39
+ def connect(
40
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
41
+ ) -> None:
42
+ self.batch_size = batch_size
43
+ self.seq_length = max_seq_length + 1 # Increase by one because we need the next token as well
44
+
45
+
46
+ def train_dataloader(self) -> DataLoader:
47
+ from litdata.streaming import CombinedStreamingDataset, StreamingDataLoader, StreamingDataset, TokensLoader
48
+
49
+ arxiv_train_data = StreamingDataset(
50
+ input_dir=self.arxiv_train,
51
+ item_loader=TokensLoader(block_size=self.seq_length),
52
+ shuffle=True,
53
+ drop_last=True,
54
+ )
55
+ train_data = arxiv_train_data
56
+
57
+ train_dataloader = StreamingDataLoader(
58
+ train_data, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
59
+ )
60
+ return train_dataloader
61
+
62
+ def val_dataloader(self) -> DataLoader:
63
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
64
+
65
+ if self.ppl:
66
+ val_dataset = StreamingDataset(
67
+ input_dir=self.arxiv_val,
68
+ item_loader=TokensLoader(block_size=self.seq_length),
69
+ shuffle=True
70
+ )
71
+ else:
72
+ val_dataset = StreamingDataset(
73
+ input_dir=self.arxiv_val,
74
+ item_loader=TokensLoader(block_size=self.seq_length),
75
+ shuffle=False
76
+ )
77
+
78
+ val_dataloader = StreamingDataLoader(
79
+ val_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
80
+ )
81
+ return val_dataloader
litgpt/data/base.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ from abc import abstractmethod
3
+ from functools import partial
4
+ from typing import Any, Callable, Dict, List, Optional, Union
5
+
6
+ import torch
7
+ from lightning import LightningDataModule
8
+ from torch import Tensor
9
+ from torch.utils.data import Dataset
10
+
11
+ from litgpt.prompts import PromptStyle
12
+ from litgpt.tokenizer import Tokenizer
13
+
14
+
15
+ class DataModule(LightningDataModule):
16
+ """Base class for all data modules in LitGPT."""
17
+
18
+ @abstractmethod
19
+ def connect(
20
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
21
+ ) -> None:
22
+ """All settings that can't be determined at the time of instantiation need to be passed through here
23
+ before any dataloaders can be accessed.
24
+ """
25
+
26
+ def setup(self, stage: str = "") -> None:
27
+ # Stub is to redefine the default signature, because the concept of 'stage' does not exist in LitGPT
28
+ pass
29
+
30
+ def __repr__(self) -> str:
31
+ return f"{self.__class__.__name__}()"
32
+
33
+
34
+ class SFTDataset(Dataset):
35
+ """An in-memory dataset for supervised finetuning with `input_ids` and `labels`.
36
+
37
+ Args:
38
+ data: A list of samples (dicts). The target/label must be stored under the key 'output' and the instruction
39
+ or other data can be stored under any key as long as it is compatible with the given prompt template.
40
+ tokenizer: The tokenizer to use. Should match the one that was used to pretrain the model.
41
+ prompt_style: The style to apply to prompts. See `litgpt.prompts` for a list of available styles.
42
+ max_seq_length: Truncate sequences that are longer than this value. By default, no truncation is applied.
43
+ mask_prompt: Whether to mask the prompt section from the label (with ``ignore_index``).
44
+ ignore_index: The index to use for elements to be ignored in the label.
45
+ transform: An optional transform to apply to the sample before it gets tokenized. Use this to rename the
46
+ keys in the dataset to the expected 'instruction' and 'output' keys.
47
+
48
+ Returns a dict with two keys:
49
+ input_ids: The encoded prompt + response
50
+ labels: Same as input_ids, unless ``mask_prompt=True`` in which case the 'prompt' part is replaced with
51
+ the ``ignore_index``.
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ data: List[Dict[str, str]],
57
+ tokenizer: Tokenizer,
58
+ prompt_style: Union[str, PromptStyle],
59
+ max_seq_length: int = -1,
60
+ mask_prompt: bool = True,
61
+ ignore_index: int = -100,
62
+ transform: Optional[Callable[[Any], Any]] = None,
63
+ ) -> None:
64
+ self.data = data
65
+ self.tokenizer = tokenizer
66
+ self.prompt_style = (
67
+ prompt_style if isinstance(prompt_style, PromptStyle) else PromptStyle.from_name(prompt_style)
68
+ )
69
+ self.max_seq_length = max_seq_length
70
+ self.mask_prompt = mask_prompt
71
+ self.ignore_index = ignore_index
72
+ self.transform = transform
73
+
74
+ def __len__(self) -> int:
75
+ return len(self.data)
76
+
77
+ def __getitem__(self, idx: int) -> Dict[str, Union[Tensor, Dict[str, int]]]:
78
+ example = self.data[idx]
79
+ if self.transform is not None:
80
+ example = self.transform(example)
81
+ prompt = self.prompt_style.apply(prompt=example["instruction"], **example)
82
+ encoded_prompt = self.tokenizer.encode(prompt, max_length=self.max_seq_length)
83
+ encoded_response = self.tokenizer.encode(example["output"], bos=False, eos=True, max_length=self.max_seq_length)
84
+ encoded_prompt_and_response = torch.cat((encoded_prompt, encoded_response)).type(torch.int64)
85
+ if self.max_seq_length > 0: # do not slice off last token when self.max_seq_length = -1
86
+ encoded_prompt_and_response = encoded_prompt_and_response[: self.max_seq_length]
87
+
88
+ # The labels are the full prompt with response, but with the prompt masked out
89
+ labels = encoded_prompt_and_response.clone()
90
+ if self.mask_prompt:
91
+ labels[: len(encoded_prompt)] = self.ignore_index
92
+
93
+ raw_token_count = len(self.tokenizer.encode(example["instruction"], max_length=self.max_seq_length)) + len(
94
+ encoded_response
95
+ )
96
+
97
+ return {
98
+ "input_ids": encoded_prompt_and_response,
99
+ "labels": labels,
100
+ "token_counts": {
101
+ "raw": raw_token_count,
102
+ "raw_plus_prompt_template": len(encoded_prompt_and_response),
103
+ },
104
+ }
105
+
106
+
107
+ def get_sft_collate_fn(max_seq_length: int = -1, pad_id: int = 0, ignore_index: int = -100):
108
+ """Returns the collate function for supervised finetuning (needed in the DataLoader).
109
+
110
+ The collate function gets a list of dicts with keys `input_ids` and `labels`.
111
+ It returns a dict with batched `input_ids` and `labels`. Also pads short sequences to the longest element in
112
+ the batch. Optionally truncates all sequences to the specified maximum length.
113
+ """
114
+ return partial(_sft_collate_fn, max_seq_length=max_seq_length, pad_id=pad_id, ignore_index=ignore_index)
115
+
116
+
117
+ def _sft_collate_fn(
118
+ samples: List[Dict[str, Tensor]], max_seq_length: int = -1, pad_id: int = 0, ignore_index: int = -100
119
+ ) -> Dict[str, Tensor]:
120
+ batched = {}
121
+ for key in ("input_ids", "labels"):
122
+ pad_value = pad_id if key == "input_ids" else ignore_index
123
+
124
+ # Pad right based on the longest sequence
125
+ batched[key] = torch.nn.utils.rnn.pad_sequence(
126
+ [sample[key] for sample in samples], batch_first=True, padding_value=pad_value
127
+ )
128
+
129
+ # Truncate if needed
130
+ if max_seq_length > 0:
131
+ batched[key] = batched[key][:, :max_seq_length]
132
+
133
+ batched["token_counts"] = {}
134
+ batched["token_counts"]["raw"] = torch.tensor( # Token count without padding and without prompt template
135
+ [sample["token_counts"]["raw"] for sample in samples], dtype=torch.int64
136
+ ).unsqueeze(1)
137
+ batched["token_counts"]["raw_plus_prompt_template"] = (
138
+ torch.tensor( # Token count without padding but with prompt template
139
+ [sample["token_counts"]["raw_plus_prompt_template"] for sample in samples], dtype=torch.int64
140
+ ).unsqueeze(1)
141
+ )
142
+
143
+ return batched
litgpt/data/count.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import json
4
+
5
+ # 数据根目录
6
+ base_dir = '/mnt/data/litgpt/litgpt/data/arxiv'
7
+
8
+ # 生成 2407-2412 和 2501-2506 的文件夹列表
9
+ months = [f'24{m:02d}' for m in range(7, 13)] + [f'25{m:02d}' for m in range(1, 7)]
10
+
11
+ overall_total = 0
12
+ print(f"{'Month':<6} {'Train':>12} {'Val':>12} {'Total':>12}")
13
+ print("-" * 46)
14
+
15
+ for month in months:
16
+ month_dir = os.path.join(base_dir, month)
17
+ if not os.path.isdir(month_dir):
18
+ continue
19
+
20
+ train_dim = 0
21
+ val_dim = 0
22
+
23
+ # 读取 train/index.json
24
+ train_idx = os.path.join(month_dir, 'train', 'index.json')
25
+ if os.path.isfile(train_idx):
26
+ with open(train_idx, 'r') as f:
27
+ data = json.load(f)
28
+ train_dim = sum(chunk.get('dim', 0) for chunk in data.get('chunks', []))
29
+
30
+ # 读取 val/index.json
31
+ val_idx = os.path.join(month_dir, 'val', 'index.json')
32
+ if os.path.isfile(val_idx):
33
+ with open(val_idx, 'r') as f:
34
+ data = json.load(f)
35
+ val_dim = sum(chunk.get('dim', 0) for chunk in data.get('chunks', []))
36
+
37
+ total_dim = train_dim + val_dim
38
+ overall_total += total_dim
39
+
40
+ print(f"{month:<6} {train_dim:12,} {val_dim:12,} {total_dim:12,}")
41
+
42
+ print("-" * 46)
43
+ print(f"{'Overall':<6} {overall_total:12,}")
litgpt/data/deita.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ """Implementation derived from https://github.com/tloen/alpaca-lora"""
3
+
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ from typing import List, Optional, Union
7
+
8
+ import torch
9
+ from torch.utils.data import DataLoader
10
+
11
+ from litgpt.data import DataModule, SFTDataset, get_sft_collate_fn
12
+ from litgpt.prompts import PromptStyle
13
+ from litgpt.tokenizer import Tokenizer
14
+
15
+
16
+ @dataclass
17
+ class Deita(DataModule):
18
+ """Deita data module for supervised finetuning."""
19
+
20
+ mask_prompt: bool = False
21
+ """Whether to mask the prompt section from the label (with ``ignore_index``)."""
22
+ prompt_style: Union[str, PromptStyle] = "alpaca"
23
+ """The style to apply to instruction prompts. See `litgpt.prompts` for a list of available styles."""
24
+ ignore_index: int = -100
25
+ """The index to use for elements to be ignored in the label."""
26
+ seed: int = 42
27
+ """The random seed for shuffling the dataset."""
28
+ num_workers: int = 4
29
+ """How many DataLoader processes to use for loading."""
30
+ include_multiturn_conversations: bool = False
31
+ """Whether to include multi-turn conversations in the dataset."""
32
+ download_dir: Path = Path("./data/deita")
33
+ """The directory in which the downloaded dataset gets saved."""
34
+ repo_id: str = "HuggingFaceH4/deita-10k-v0-sft"
35
+ """The repo from where the data is downloaded"""
36
+
37
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
38
+ batch_size: int = field(default=1, init=False, repr=False)
39
+ max_seq_length: int = field(default=-1, init=False, repr=False)
40
+ train_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
41
+ test_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
42
+
43
+ def __post_init__(self) -> None:
44
+ super().__init__()
45
+ if isinstance(self.prompt_style, str):
46
+ self.prompt_style = PromptStyle.from_name(self.prompt_style)
47
+
48
+ def connect(
49
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
50
+ ) -> None:
51
+ self.tokenizer = tokenizer
52
+ self.batch_size = batch_size
53
+ self.max_seq_length = -1 if max_seq_length is None else max_seq_length
54
+
55
+ def prepare_data(self) -> None:
56
+ from datasets import load_dataset
57
+
58
+ load_dataset(self.repo_id, split=["train_sft", "test_sft"], cache_dir=self.download_dir)
59
+
60
+ def setup(self, stage: str = "") -> None:
61
+ from datasets import load_dataset
62
+
63
+ dataset = load_dataset(self.repo_id, split=["train_sft", "test_sft"])
64
+ train_data = format_dataset(dataset[0], self.include_multiturn_conversations)
65
+ test_data = format_dataset(dataset[1], self.include_multiturn_conversations)
66
+
67
+ self.train_dataset = SFTDataset(
68
+ data=train_data,
69
+ tokenizer=self.tokenizer,
70
+ prompt_style=self.prompt_style,
71
+ max_seq_length=self.max_seq_length,
72
+ mask_prompt=self.mask_prompt,
73
+ ignore_index=self.ignore_index,
74
+ )
75
+ self.test_dataset = SFTDataset(
76
+ data=test_data,
77
+ tokenizer=self.tokenizer,
78
+ prompt_style=self.prompt_style,
79
+ max_seq_length=self.max_seq_length,
80
+ mask_prompt=self.mask_prompt,
81
+ ignore_index=self.ignore_index,
82
+ )
83
+
84
+ def train_dataloader(self) -> DataLoader:
85
+ return DataLoader(
86
+ self.train_dataset,
87
+ batch_size=self.batch_size,
88
+ shuffle=True,
89
+ generator=torch.Generator().manual_seed(self.seed),
90
+ num_workers=self.num_workers,
91
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
92
+ )
93
+
94
+ def val_dataloader(self) -> DataLoader:
95
+ return DataLoader(
96
+ self.test_dataset,
97
+ batch_size=self.batch_size,
98
+ shuffle=False,
99
+ num_workers=self.num_workers,
100
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
101
+ )
102
+
103
+
104
+ def format_dataset(dataset: List[dict], include_multi_turn_conversations: bool) -> List[dict]:
105
+ formatted = []
106
+
107
+ for entry in dataset:
108
+ convo = entry["messages"]
109
+ if include_multi_turn_conversations:
110
+ for i in range(0, len(convo) - 1, 2):
111
+ formatted.append({"instruction": convo[i]["content"], "input": "", "output": convo[i + 1]["content"]})
112
+ else:
113
+ formatted.append({"instruction": convo[0]["content"], "input": "", "output": convo[1]["content"]})
114
+
115
+ return formatted
litgpt/data/flan.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import json
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ from typing import Dict, List, Optional, Set, Union
7
+
8
+ import torch
9
+ from torch.utils.data import DataLoader
10
+
11
+ from litgpt.data import DataModule, SFTDataset, get_sft_collate_fn
12
+ from litgpt.data.alpaca import download_if_missing
13
+ from litgpt.prompts import PromptStyle
14
+ from litgpt.tokenizer import Tokenizer
15
+
16
+ _URL = "https://huggingface.co/datasets/Muennighoff/flan/resolve/main"
17
+
18
+
19
+ # TODO: Including all subsets, FLAN is too large to be loaded in memory. Switch the implementation to cache
20
+ # on disk or use Lightning Data
21
+ @dataclass
22
+ class FLAN(DataModule):
23
+ """FLAN data module for supervised finetuning."""
24
+
25
+ mask_prompt: bool = False
26
+ """Whether to mask the prompt section from the label (with ``ignore_index``)."""
27
+ prompt_style: Union[str, PromptStyle] = "flan"
28
+ """The style to apply to instruction prompts. See `litgpt.prompts` for a list of available styles."""
29
+ ignore_index: int = -100
30
+ """The index to use for elements to be ignored in the label."""
31
+ seed: int = 42
32
+ """The random seed for shuffling the dataset."""
33
+ num_workers: int = 4
34
+ """How many DataLoader processes to use for loading."""
35
+ download_dir: Path = Path("./data/flan")
36
+ """The directory in which the downloaded dataset gets saved."""
37
+ url: str = _URL
38
+ """The URL from where to download the dataset."""
39
+ subsets: Optional[str] = None
40
+ """A comma separated list of subsets to use. If None, all subsets are used."""
41
+
42
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
43
+ batch_size: int = field(default=1, init=False, repr=False)
44
+ max_seq_length: int = field(default=-1, init=False, repr=False)
45
+ train_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
46
+ test_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
47
+
48
+ def __post_init__(self):
49
+ super().__init__()
50
+ if isinstance(self.prompt_style, str):
51
+ self.prompt_style = PromptStyle.from_name(self.prompt_style)
52
+
53
+ supported_subsets = _supported_subsets()
54
+ if self.subsets is not None:
55
+ self.subsets = self.subsets.split(",")
56
+ for subset in self.subsets:
57
+ if subset not in supported_subsets:
58
+ raise ValueError(f"{subset} not in {supported_subsets}")
59
+ else:
60
+ self.subsets = list(supported_subsets)
61
+
62
+ def connect(
63
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
64
+ ) -> None:
65
+ self.tokenizer = tokenizer
66
+ self.batch_size = batch_size
67
+ self.max_seq_length = -1 if max_seq_length is None else max_seq_length
68
+
69
+ def prepare_data(self) -> None:
70
+ self.download_dir.mkdir(parents=True, exist_ok=True)
71
+ for subset in self.subsets:
72
+ for split in ("train", "test"):
73
+ data_file_path = self.download_dir / f"{subset}_{split}.jsonl"
74
+ data_file_url = f"{self.url}/{split}/{subset}_{split}.jsonl"
75
+ download_if_missing(data_file_path, data_file_url)
76
+
77
+ def train_dataloader(self):
78
+ return self._dataloader("train")
79
+
80
+ def val_dataloader(self):
81
+ return self._dataloader("test")
82
+
83
+ def _dataloader(self, split: str) -> DataLoader:
84
+ data = []
85
+ for subset in self.subsets:
86
+ data_file_path = self.download_dir / f"{subset}_{split}.jsonl"
87
+ data.extend(load_jsonl(data_file_path))
88
+
89
+ dataset = SFTDataset(
90
+ data=data,
91
+ tokenizer=self.tokenizer,
92
+ prompt_style=self.prompt_style,
93
+ max_seq_length=self.max_seq_length,
94
+ mask_prompt=self.mask_prompt,
95
+ ignore_index=self.ignore_index,
96
+ transform=_transform,
97
+ )
98
+ return DataLoader(
99
+ dataset=dataset,
100
+ batch_size=self.batch_size,
101
+ shuffle=(split == "train"),
102
+ generator=torch.Generator().manual_seed(self.seed),
103
+ num_workers=self.num_workers,
104
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
105
+ )
106
+
107
+
108
+ def load_jsonl(filename: Path) -> List[Dict[str, str]]:
109
+ data = []
110
+ with open(filename, encoding="utf-8") as f:
111
+ for line in f:
112
+ data.append(json.loads(line))
113
+ return data
114
+
115
+
116
+ def _transform(item: dict) -> dict:
117
+ item["instruction"] = item.pop("inputs")
118
+ item["output"] = item.pop("targets")
119
+ return item
120
+
121
+
122
+ def _supported_subsets() -> Set[str]:
123
+ return {
124
+ "aeslc_10templates",
125
+ "ag_news_subset_10templates",
126
+ "anli_r1_10templates",
127
+ "anli_r2_10templates",
128
+ "anli_r3_10templates",
129
+ "arc_challenge_10templates",
130
+ "arc_easy_10templates",
131
+ "bool_q_10templates",
132
+ "cb_10templates",
133
+ "cnn_dailymail_10templates",
134
+ "cola_10templates",
135
+ "common_gen_10templates",
136
+ "copa_10templates",
137
+ "coqa_10templates",
138
+ "cosmos_qa_10templates",
139
+ "dart_10templates",
140
+ "definite_pronoun_resolution_10templates",
141
+ "drop_10templates",
142
+ "e2e_nlg_10templates",
143
+ "fix_punct_10templates",
144
+ "gigaword_10templates",
145
+ "glue_mrpc_10templates",
146
+ "glue_qqp_10templates",
147
+ "hellaswag_10templates",
148
+ "imdb_reviews_10templates",
149
+ "math_dataset_10templates",
150
+ "mnli_matched_10templates",
151
+ "mnli_mismatched_10templates",
152
+ "multi_news_10templates",
153
+ "multirc_10templates",
154
+ "natural_questions_10templates",
155
+ "openbookqa_10templates",
156
+ "opinion_abstracts_idebate_10templates",
157
+ "opinion_abstracts_rotten_tomatoes_10templates",
158
+ "para_crawl_enes_10templates",
159
+ "paws_wiki_10templates",
160
+ "piqa_10templates",
161
+ "qnli_10templates",
162
+ "quac_10templates",
163
+ "record_10templates",
164
+ "rte_10templates",
165
+ "samsum_10templates",
166
+ "sentiment140_10templates",
167
+ "snli_10templates",
168
+ "squad_v1_10templates",
169
+ "squad_v2_10templates",
170
+ "sst2_10templates",
171
+ "story_cloze_10templates",
172
+ "stsb_10templates",
173
+ "trec_10templates",
174
+ "trivia_qa_10templates",
175
+ "true_case_10templates",
176
+ "web_nlg_en_10templates",
177
+ "wic_10templates",
178
+ "wiki_lingua_english_en_10templates",
179
+ "wmt14_enfr_10templates",
180
+ "wmt16_translate_csen_10templates",
181
+ "wmt16_translate_deen_10templates",
182
+ "wmt16_translate_fien_10templates",
183
+ "wmt16_translate_roen_10templates",
184
+ "wmt16_translate_ruen_10templates",
185
+ "wmt16_translate_tren_10templates",
186
+ "wnli_10templates",
187
+ "word_segment_10templates",
188
+ "wsc_10templates",
189
+ "yelp_polarity_reviews_10templates",
190
+ }
litgpt/data/json_data.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import json
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ from typing import Any, Optional, Tuple, Union
7
+
8
+ import torch
9
+ from torch.utils.data import DataLoader, random_split
10
+
11
+ from litgpt.data import DataModule, SFTDataset, get_sft_collate_fn
12
+ from litgpt.prompts import PromptStyle
13
+ from litgpt.tokenizer import Tokenizer
14
+ from litgpt.prompts import Default
15
+
16
+
17
+ @dataclass
18
+ class JSON(DataModule):
19
+ """Loads JSON or JSONL data for supervised finetuning."""
20
+
21
+ json_path: Path
22
+ """A path to a JSON file or a directory with `train.json` and `val.json` containing the data.
23
+ The file(s) should contain a list of samples (dicts). Each dict must have the keys 'instruction' and 'output',
24
+ and can optionally have a key 'input' (see Alpaca)."""
25
+ mask_prompt: bool = False
26
+ """Whether to mask the prompt section from the label (with ``ignore_index``)."""
27
+ val_split_fraction: Optional[float] = None
28
+ """The fraction of the dataset to use for the validation dataset. The rest is used for training.
29
+ Only applies if you passed in a single file to `json_path`."""
30
+ prompt_style: Union[str, PromptStyle] = "default"
31
+ """The style to apply to instruction prompts. See `litgpt.prompts` for a list of available styles."""
32
+ ignore_index: int = -100
33
+ """The index to use for elements to be ignored in the label."""
34
+ seed: int = 42
35
+ """The random seed for creating the train/val splits and shuffling the dataset."""
36
+ num_workers: int = 4
37
+ """How many DataLoader processes to use for loading."""
38
+
39
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
40
+ batch_size: int = field(default=1, init=False, repr=False)
41
+ max_seq_length: int = field(default=-1, init=False, repr=False)
42
+ train_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
43
+ val_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
44
+
45
+ def __post_init__(self):
46
+ super().__init__()
47
+ if self.json_path.is_file() and self.val_split_fraction is None:
48
+ raise ValueError(
49
+ "If `json_path` is a file, you must set `val_split_fraction` to a value between 0 and 1 to split the"
50
+ " data into train and validation sets."
51
+ )
52
+ if self.json_path.is_dir() and self.val_split_fraction is not None:
53
+ raise ValueError(
54
+ "If `json_path` is a directory, it must contain 'train.json' and 'val.json' files and"
55
+ f" hence `val_split_fraction` should not be set. Got `{self.val_split_fraction=}`."
56
+ )
57
+ if not self.json_path.exists():
58
+ raise FileNotFoundError(
59
+ "The `json_path` must be a file or a directory containing 'train.json' and 'val.json' files,"
60
+ f" but '{self.json_path!s}' does not exist."
61
+ )
62
+ if isinstance(self.prompt_style, str):
63
+ self.prompt_style = PromptStyle.from_name(self.prompt_style)
64
+
65
+ def connect(
66
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
67
+ ) -> None:
68
+ self.tokenizer = tokenizer
69
+ self.batch_size = batch_size
70
+ self.max_seq_length = -1 if max_seq_length is None else max_seq_length
71
+
72
+ def setup(self, stage: str = "") -> None:
73
+ train_data, test_data = self.get_splits()
74
+
75
+ self.train_dataset = SFTDataset(
76
+ data=train_data,
77
+ tokenizer=self.tokenizer,
78
+ prompt_style=self.prompt_style,
79
+ max_seq_length=self.max_seq_length,
80
+ mask_prompt=self.mask_prompt,
81
+ ignore_index=self.ignore_index,
82
+ )
83
+ self.test_dataset = SFTDataset(
84
+ data=test_data,
85
+ tokenizer=self.tokenizer,
86
+ prompt_style=self.prompt_style,
87
+ max_seq_length=self.max_seq_length,
88
+ mask_prompt=self.mask_prompt,
89
+ ignore_index=self.ignore_index,
90
+ )
91
+
92
+ def train_dataloader(self) -> DataLoader:
93
+ return DataLoader(
94
+ self.train_dataset,
95
+ batch_size=self.batch_size,
96
+ shuffle=True,
97
+ generator=torch.Generator().manual_seed(self.seed),
98
+ num_workers=self.num_workers,
99
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
100
+ )
101
+
102
+ def val_dataloader(self) -> DataLoader:
103
+ return DataLoader(
104
+ self.test_dataset,
105
+ batch_size=self.batch_size,
106
+ shuffle=False,
107
+ num_workers=self.num_workers,
108
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
109
+ )
110
+
111
+ def get_splits(self) -> Tuple:
112
+ # A single file (gets split into train and test)
113
+ if self.json_path.is_file():
114
+ data = load_split(self.json_path)
115
+
116
+ # Partition the dataset into train and test
117
+ train_data, test_data = random_split(
118
+ data,
119
+ [1.0 - self.val_split_fraction, self.val_split_fraction],
120
+ generator=torch.Generator().manual_seed(self.seed),
121
+ )
122
+ return train_data, test_data
123
+
124
+ # A directory containing train.json and val.json
125
+ if (train_file := self.find_split("train")) and (val_file := self.find_split("val")):
126
+ train_data = load_split(train_file)
127
+ test_data = load_split(val_file)
128
+ return train_data, test_data
129
+
130
+ raise FileNotFoundError(
131
+ "The `json_path` must be a file or a directory containing 'train.json' and 'val.json' files."
132
+ )
133
+
134
+ def find_split(self, split_name: str) -> Optional[Path]:
135
+ for suffix in (".json", ".jsonl"):
136
+ if (file := self.json_path / f"{split_name}{suffix}").is_file():
137
+ return file
138
+ return None
139
+
140
+
141
+ def load_split(json_path: Path) -> Any:
142
+ if json_path.suffix == ".json":
143
+ with open(json_path, encoding="utf-8") as file:
144
+ return json.load(file)
145
+ if json_path.suffix == ".jsonl":
146
+ with open(json_path, encoding="utf-8") as file:
147
+ return [json.loads(line) for line in file]
148
+ else:
149
+ raise ValueError(f"Unsupported file format: {json_path.suffix}. Expected `.json` or `.jsonl`.")
litgpt/data/lima.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ """Implementation derived from https://github.com/tloen/alpaca-lora"""
3
+
4
+ import os
5
+ from dataclasses import dataclass, field
6
+ from typing import List, Optional, Union
7
+
8
+ import torch
9
+ from torch.utils.data import DataLoader, random_split
10
+
11
+ from litgpt.data import DataModule, SFTDataset, get_sft_collate_fn
12
+ from litgpt.prompts import PromptStyle
13
+ from litgpt.tokenizer import Tokenizer
14
+
15
+
16
+ @dataclass
17
+ class LIMA(DataModule):
18
+ """LIMA data module for supervised finetuning."""
19
+
20
+ mask_prompt: bool = False
21
+ """Whether to mask the prompt section from the label (with ``ignore_index``)."""
22
+ val_split_fraction: float = 0.1
23
+ """The fraction of the dataset to use for the validation dataset. The rest is used for training."""
24
+ prompt_style: Union[str, PromptStyle] = "alpaca"
25
+ """The style to apply to instruction prompts. See `litgpt.prompts` for a list of available styles."""
26
+ ignore_index: int = -100
27
+ """The index to use for elements to be ignored in the label."""
28
+ seed: int = 42
29
+ """The random seed for creating the train/val splits and shuffling the dataset."""
30
+ num_workers: int = 4
31
+ """How many DataLoader processes to use for loading."""
32
+ include_multiturn_conversations: bool = False
33
+ """Whether to include multi-turn conversations in the dataset."""
34
+ repo_id: str = "GAIR/lima"
35
+ """The Hugging Face dataset repository ID from where to download the data."""
36
+ access_token: Optional[str] = field(repr=False, default=os.getenv("HF_TOKEN"))
37
+ """The Hugging Face API token to use for authentication. Can also be set through the
38
+ `HF_TOKEN` environment variable."""
39
+
40
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
41
+ batch_size: int = field(default=1, init=False, repr=False)
42
+ max_seq_length: int = field(default=-1, init=False, repr=False)
43
+ train_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
44
+ test_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
45
+
46
+ def __post_init__(self):
47
+ super().__init__()
48
+ if self.access_token is None:
49
+ raise ValueError(
50
+ "LIMA requires authentication, please set the `HF_TOKEN=your_token` environment"
51
+ " variable or pass --access_token=your_token. You can find your token by visiting"
52
+ " https://huggingface.co/settings/tokens"
53
+ )
54
+ if isinstance(self.prompt_style, str):
55
+ self.prompt_style = PromptStyle.from_name(self.prompt_style)
56
+
57
+ def connect(
58
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
59
+ ) -> None:
60
+ self.tokenizer = tokenizer
61
+ self.batch_size = batch_size
62
+ self.max_seq_length = -1 if max_seq_length is None else max_seq_length
63
+
64
+ def prepare_data(self) -> None:
65
+ from datasets import load_dataset
66
+
67
+ load_dataset(self.repo_id, token=self.access_token)
68
+
69
+ def setup(self, stage: str = "") -> None:
70
+ from datasets import load_dataset
71
+
72
+ dataset = load_dataset(self.repo_id, token=self.access_token)
73
+ data = format_dataset(dataset["train"], self.include_multiturn_conversations)
74
+
75
+ # Partition the dataset into train and test
76
+ train_data, test_data = random_split(
77
+ data,
78
+ [1.0 - self.val_split_fraction, self.val_split_fraction],
79
+ generator=torch.Generator().manual_seed(self.seed),
80
+ )
81
+ train_data, test_data = list(train_data), list(test_data)
82
+
83
+ self.train_dataset = SFTDataset(
84
+ data=train_data,
85
+ tokenizer=self.tokenizer,
86
+ prompt_style=self.prompt_style,
87
+ max_seq_length=self.max_seq_length,
88
+ mask_prompt=self.mask_prompt,
89
+ ignore_index=self.ignore_index,
90
+ )
91
+ self.test_dataset = SFTDataset(
92
+ data=test_data,
93
+ tokenizer=self.tokenizer,
94
+ prompt_style=self.prompt_style,
95
+ max_seq_length=self.max_seq_length,
96
+ mask_prompt=self.mask_prompt,
97
+ ignore_index=self.ignore_index,
98
+ )
99
+
100
+ def train_dataloader(self) -> DataLoader:
101
+ return DataLoader(
102
+ self.train_dataset,
103
+ batch_size=self.batch_size,
104
+ shuffle=True,
105
+ generator=torch.Generator().manual_seed(self.seed),
106
+ num_workers=self.num_workers,
107
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
108
+ )
109
+
110
+ def val_dataloader(self) -> DataLoader:
111
+ return DataLoader(
112
+ self.test_dataset,
113
+ batch_size=self.batch_size,
114
+ shuffle=False,
115
+ num_workers=self.num_workers,
116
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
117
+ )
118
+
119
+
120
+ def format_dataset(dataset_partition: dict, include_multi_turn_conversations: bool) -> List[dict]:
121
+ formatted_ds = []
122
+
123
+ for entry in dataset_partition:
124
+ convo = entry["conversations"]
125
+ if include_multi_turn_conversations:
126
+ for i in range(0, len(convo) - 1, 2):
127
+ formatted_ds.append({"instruction": convo[i], "input": "", "output": convo[i + 1]})
128
+ else:
129
+ formatted_ds.append({"instruction": convo[0], "input": "", "output": convo[1]})
130
+
131
+ return formatted_ds
litgpt/data/lit_data.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import os
3
+ from dataclasses import dataclass, field
4
+ from pathlib import Path
5
+ from typing import Optional, Tuple, Union
6
+
7
+ from torch.utils.data import DataLoader
8
+
9
+ from litgpt.data import DataModule
10
+ from litgpt.tokenizer import Tokenizer
11
+
12
+
13
+ @dataclass
14
+ class LitData(DataModule):
15
+ """Loads data using LitData's StreamingDataset given a path to a folder of preprocessed data (chunks)."""
16
+
17
+ data_path: Union[str, Path] = Path("data/")
18
+ """The path to the data directory containing the preprocessed chunks for the streaming dataset
19
+ The path can also be a remote path (e.g., s3://). See also ``split_names`` if this path contains subfolders
20
+ for training- and validation splits."""
21
+ split_names: Optional[Tuple[str, str]] = None
22
+ """Optional tuple for names of subfolders for training and validation under ``data_path``. If not provided,
23
+ all data under data_path will be used for training, and the validation dataloader will be identical to the
24
+ train dataloader."""
25
+ seed: int = 42
26
+ """The random seed for shuffling the dataset."""
27
+ num_workers: int = 8
28
+ """How many DataLoader processes to use for loading."""
29
+
30
+ batch_size: int = field(init=False, repr=False, default=1)
31
+ seq_length: int = field(init=False, repr=False, default=2048)
32
+
33
+ def __post_init__(self) -> None:
34
+ super().__init__()
35
+ if self.split_names is not None and len(self.split_names) != 2:
36
+ raise ValueError("If provided `split_names` must be a tuple of two strings, for example: ('train', 'val').")
37
+
38
+ def connect(
39
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
40
+ ) -> None:
41
+ self.batch_size = batch_size
42
+ self.seq_length = max_seq_length + 1 # Increase by one because we need the next token as well
43
+
44
+ def train_dataloader(self) -> DataLoader:
45
+ input_dir = os.path.join(self.data_path, self.split_names[0]) if self.split_names else str(self.data_path)
46
+ return self._dataloader(input_dir=input_dir, train=True)
47
+
48
+ def val_dataloader(self) -> DataLoader:
49
+ input_dir = os.path.join(self.data_path, self.split_names[1]) if self.split_names else str(self.data_path)
50
+ return self._dataloader(input_dir=input_dir, train=False)
51
+
52
+ def _dataloader(self, input_dir: str, train: bool):
53
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
54
+
55
+ dataset = StreamingDataset(
56
+ input_dir=input_dir,
57
+ item_loader=TokensLoader(block_size=self.seq_length),
58
+ shuffle=train,
59
+ seed=self.seed,
60
+ )
61
+ dataloader = StreamingDataLoader(
62
+ dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
63
+ )
64
+ return dataloader
litgpt/data/longform.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import json
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ from typing import Optional, Union
7
+
8
+ import torch
9
+ from torch.utils.data import DataLoader
10
+
11
+ from litgpt.data import DataModule, SFTDataset, get_sft_collate_fn
12
+ from litgpt.data.alpaca import download_if_missing
13
+ from litgpt.prompts import PromptStyle
14
+ from litgpt.tokenizer import Tokenizer
15
+
16
+ _URL = "https://raw.githubusercontent.com/akoksal/LongForm/main/dataset"
17
+
18
+
19
+ @dataclass
20
+ class LongForm(DataModule):
21
+ """LongForm data module for supervised finetuning."""
22
+
23
+ mask_prompt: bool = False
24
+ """Whether to mask the prompt section from the label (with ``ignore_index``)."""
25
+ prompt_style: Union[str, PromptStyle] = "longform"
26
+ """The style to apply to instruction prompts. See `litgpt.prompts` for a list of available styles."""
27
+ ignore_index: int = -100
28
+ """The index to use for elements to be ignored in the label."""
29
+ seed: int = 42
30
+ """The random seed for shuffling the dataset."""
31
+ num_workers: int = 4
32
+ """How many DataLoader processes to use for loading."""
33
+ download_dir: Path = Path("./data/longform")
34
+ """The directory in which the downloaded dataset gets saved."""
35
+
36
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
37
+ batch_size: int = field(default=1, init=False, repr=False)
38
+ max_seq_length: int = field(default=-1, init=False, repr=False)
39
+ train_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
40
+ test_dataset: Optional[SFTDataset] = field(default=None, init=False, repr=False)
41
+
42
+ def __post_init__(self) -> None:
43
+ super().__init__()
44
+ if isinstance(self.prompt_style, str):
45
+ self.prompt_style = PromptStyle.from_name(self.prompt_style)
46
+
47
+ def connect(
48
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
49
+ ) -> None:
50
+ self.tokenizer = tokenizer
51
+ self.batch_size = batch_size
52
+ self.max_seq_length = -1 if max_seq_length is None else max_seq_length
53
+
54
+ def prepare_data(self) -> None:
55
+ self.download_dir.mkdir(parents=True, exist_ok=True)
56
+ download_if_missing(self.download_dir / "train.json", f"{_URL}/train.json")
57
+ download_if_missing(self.download_dir / "val.json", f"{_URL}/val.json")
58
+
59
+ def train_dataloader(self):
60
+ return self._dataloader("train")
61
+
62
+ def val_dataloader(self):
63
+ return self._dataloader("val")
64
+
65
+ def _dataloader(self, split: str) -> DataLoader:
66
+ with open(self.download_dir / f"{split}.json", encoding="utf-8") as file:
67
+ data = json.load(file)
68
+
69
+ dataset = SFTDataset(
70
+ data=data,
71
+ tokenizer=self.tokenizer,
72
+ prompt_style=self.prompt_style,
73
+ max_seq_length=self.max_seq_length,
74
+ mask_prompt=self.mask_prompt,
75
+ ignore_index=self.ignore_index,
76
+ transform=_transform,
77
+ )
78
+ return DataLoader(
79
+ dataset=dataset,
80
+ batch_size=self.batch_size,
81
+ shuffle=(split == "train"),
82
+ generator=torch.Generator().manual_seed(self.seed),
83
+ num_workers=self.num_workers,
84
+ collate_fn=get_sft_collate_fn(max_seq_length=self.max_seq_length, ignore_index=self.ignore_index),
85
+ )
86
+
87
+
88
+ def _transform(item: dict) -> dict:
89
+ item["instruction"] = item.pop("input")
90
+ return item
litgpt/data/microllama.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ from dataclasses import dataclass
3
+ from pathlib import Path
4
+ from typing import Union
5
+
6
+ from litgpt.data.tinyllama import TinyLlama
7
+
8
+
9
+ @dataclass
10
+ class MicroLlama(TinyLlama):
11
+ """The MicroLlama data module is composed of only SlimPajama data."""
12
+
13
+ def __init__(self, data_path: Union[str, Path] = Path("data/"), seed: int = 42, num_workers: int = 8):
14
+ super().__init__(data_path=data_path, seed=seed, num_workers=num_workers, use_starcoder=False)
litgpt/data/openwebtext.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import os
3
+ from dataclasses import dataclass, field
4
+ from functools import partial
5
+ from pathlib import Path
6
+ from typing import Optional, Union
7
+
8
+ from torch.utils.data import DataLoader
9
+
10
+ from litgpt.data import DataModule
11
+ from litgpt.tokenizer import Tokenizer
12
+
13
+
14
+ @dataclass
15
+ class OpenWebText(DataModule):
16
+ """The OpenWebText data module for pretraining."""
17
+
18
+ data_path: Union[str, Path] = Path("data/openwebtext")
19
+ """The path to the data directory, containing two folders 'train' and 'val'
20
+ which are the output of the preprocessing step. The path can also be a remote path (e.g., s3://)."""
21
+ val_split_fraction: float = 0.0005
22
+ """The fraction of data that should be put aside for validation."""
23
+ seed: int = 42
24
+ """The seed to use for shuffling the training data."""
25
+ num_workers: int = 8
26
+ """The number of workers to use for the dataloaders."""
27
+
28
+ tokenizer: Optional[Tokenizer] = field(default=None, repr=False, init=False)
29
+ batch_size: int = field(default=1, repr=False, init=False)
30
+ seq_length: int = field(default=2048, repr=False, init=False)
31
+
32
+ def __post_init__(self) -> None:
33
+ super().__init__()
34
+ # Could be a remote path (s3://) or a local path
35
+ self.data_path_train = str(self.data_path).rstrip("/") + "/train"
36
+ self.data_path_val = str(self.data_path).rstrip("/") + "/val"
37
+
38
+ def connect(
39
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = 2048
40
+ ) -> None:
41
+ self.tokenizer = tokenizer
42
+ self.batch_size = batch_size
43
+ self.seq_length = max_seq_length + 1 # Increase by one because we need the next token as well
44
+
45
+ def prepare_data(self) -> None:
46
+ from datasets import Dataset, load_dataset
47
+ from litdata import optimize
48
+
49
+ if str(self.data_path).startswith("s3://"):
50
+ print(f"The OpenWebText data path points to an S3 location: {self.data_path}. Skipping preprocessing.")
51
+ return
52
+
53
+ if Path(self.data_path_train).is_dir() and Path(self.data_path_val).is_dir():
54
+ print(f"Found OpenWebText train and val dir: {self.data_path}. Skipping preprocessing.")
55
+ return
56
+
57
+ dataset = load_dataset("openwebtext", num_proc=(os.cpu_count() // 2), trust_remote_code=True)
58
+
59
+ # Split the data in training and validation
60
+ split_dataset = dataset["train"].train_test_split(
61
+ test_size=self.val_split_fraction, seed=self.seed, shuffle=True
62
+ )
63
+ split_dataset["val"] = split_dataset.pop("test") # rename the test split to val
64
+
65
+ def tokenize(data: Dataset, index: int):
66
+ yield self.tokenizer.encode(data[index]["text"], eos=True)
67
+
68
+ optimize(
69
+ fn=partial(tokenize, split_dataset["train"]),
70
+ inputs=list(range(len(split_dataset["train"]))),
71
+ output_dir=self.data_path_train,
72
+ num_workers=min(64, os.cpu_count() - 1),
73
+ chunk_bytes="200MB",
74
+ )
75
+ optimize(
76
+ fn=partial(tokenize, split_dataset["val"]),
77
+ inputs=list(range(len(split_dataset["val"]))),
78
+ output_dir=self.data_path_val,
79
+ num_workers=min(8, os.cpu_count() - 1),
80
+ chunk_bytes="200MB",
81
+ )
82
+
83
+ def train_dataloader(self) -> DataLoader:
84
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
85
+
86
+ train_dataset = StreamingDataset(
87
+ input_dir=self.data_path_train,
88
+ item_loader=TokensLoader(block_size=self.seq_length),
89
+ shuffle=True,
90
+ )
91
+ train_dataloader = StreamingDataLoader(
92
+ train_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
93
+ )
94
+ return train_dataloader
95
+
96
+ def val_dataloader(self) -> DataLoader:
97
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
98
+
99
+ val_dataset = StreamingDataset(
100
+ input_dir=self.data_path_val,
101
+ item_loader=TokensLoader(block_size=self.seq_length),
102
+ shuffle=True,
103
+ )
104
+ val_dataloader = StreamingDataLoader(
105
+ val_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
106
+ )
107
+ return val_dataloader
litgpt/data/prepare_arxiv.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import json
4
+ import os
5
+ import time
6
+ from pathlib import Path
7
+
8
+ from litgpt.data.prepare_starcoder import DataChunkRecipe
9
+ from litgpt.tokenizer import Tokenizer
10
+ from litgpt.utils import CLI, extend_checkpoint_dir
11
+
12
+
13
+ class ArxivDataRecipe(DataChunkRecipe):
14
+ is_generator = True
15
+
16
+ def __init__(self, tokenizer: Tokenizer, chunk_size: int):
17
+ super().__init__(chunk_size)
18
+ self.tokenizer = tokenizer
19
+
20
+ def prepare_structure(self, input_dir):
21
+ files = Path(input_dir).rglob("*.jsonl")
22
+ return [str(file) for file in files]
23
+
24
+ def prepare_item(self, filepath):
25
+ with open(filepath, "r", encoding="utf-8") as f:
26
+ for row in f:
27
+ obj = json.loads(row)
28
+ text = obj["text"]
29
+ text_ids = self.tokenizer.encode(string=text, bos=False, eos=True)
30
+ yield text_ids
31
+
32
+
33
+ def prepare(
34
+ input_dir: Path = Path("data/arxiv_split/2407/train"),
35
+ output_dir: Path = Path("data/arxiv/2407/train"),
36
+ tokenizer_path: Path = Path("checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T"),
37
+ chunk_size: int = (2049 * 16384),
38
+ fast_dev_run: bool = False,
39
+ ) -> None:
40
+ from litdata.processing.data_processor import DataProcessor
41
+ from litdata.streaming import TokensLoader
42
+
43
+ tokenizer_path = extend_checkpoint_dir(tokenizer_path)
44
+ tokenizer = Tokenizer(tokenizer_path)
45
+ data_recipe = ArxivDataRecipe(tokenizer=tokenizer, chunk_size=chunk_size)
46
+ data_processor = DataProcessor(
47
+ input_dir=str(input_dir),
48
+ output_dir=str(output_dir),
49
+ fast_dev_run=fast_dev_run,
50
+ num_workers=1,
51
+ item_loader=TokensLoader(),
52
+ num_downloaders=1,
53
+ )
54
+
55
+ start_time = time.time()
56
+ data_processor.run(data_recipe)
57
+ elapsed_time = time.time() - start_time
58
+ print(f"Time taken: {elapsed_time:.2f} seconds")
59
+
60
+
61
+ if __name__ == "__main__":
62
+ CLI(prepare)
litgpt/data/prepare_slimpajama.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import json
4
+ import os
5
+ import time
6
+ from pathlib import Path
7
+
8
+ from litgpt.data.prepare_starcoder import DataChunkRecipe
9
+ from litgpt.tokenizer import Tokenizer
10
+ from litgpt.utils import CLI, extend_checkpoint_dir
11
+
12
+
13
+ class SlimPajamaDataRecipe(DataChunkRecipe):
14
+ is_generator = True
15
+
16
+ def __init__(self, tokenizer: Tokenizer, chunk_size: int):
17
+ super().__init__(chunk_size)
18
+ self.tokenizer = tokenizer
19
+
20
+ def prepare_structure(self, input_dir):
21
+ files = Path(input_dir).rglob("*.zst")
22
+ return [str(file) for file in files]
23
+
24
+ def prepare_item(self, filepath):
25
+ import zstandard as zstd
26
+
27
+ with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
28
+ for row in f:
29
+ text = json.loads(row)["text"]
30
+ if json.loads(row)["meta"]["redpajama_set_name"] == "RedPajamaGithub":
31
+ continue # exclude the GitHub data since it overlaps with starcoder
32
+ text_ids = self.tokenizer.encode(string=text, bos=False, eos=True)
33
+ yield text_ids
34
+
35
+
36
+ def prepare(
37
+ input_dir: Path = Path("data/SlimPajama-627B/train"),
38
+ output_dir: Path = Path("data/slimpajama/train"),
39
+ tokenizer_path: Path = Path("checkpoints/Llama-2-7b-hf/"),
40
+ chunk_size: int = (2049 * 16384),
41
+ fast_dev_run: bool = False,
42
+ ) -> None:
43
+ from litdata.processing.data_processor import DataProcessor
44
+
45
+ tokenizer_path = extend_checkpoint_dir(tokenizer_path)
46
+ tokenizer = Tokenizer(tokenizer_path)
47
+ data_recipe = SlimPajamaDataRecipe(tokenizer=tokenizer, chunk_size=chunk_size)
48
+ data_processor = DataProcessor(
49
+ input_dir=str(input_dir),
50
+ output_dir=str(output_dir),
51
+ fast_dev_run=fast_dev_run,
52
+ num_workers=os.cpu_count(),
53
+ num_downloaders=1,
54
+ )
55
+
56
+ start_time = time.time()
57
+ data_processor.run(data_recipe)
58
+ elapsed_time = time.time() - start_time
59
+ print(f"Time taken: {elapsed_time:.2f} seconds")
60
+
61
+
62
+ if __name__ == "__main__":
63
+ CLI(prepare)
litgpt/data/prepare_starcoder.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import os
4
+ import time
5
+ import traceback
6
+ from pathlib import Path
7
+
8
+ from lightning_utilities.core.imports import RequirementCache
9
+
10
+ from litgpt.tokenizer import Tokenizer
11
+ from litgpt.utils import CLI, extend_checkpoint_dir
12
+
13
+ _LITDATA_AVAILABLE = RequirementCache("litdata")
14
+ if _LITDATA_AVAILABLE:
15
+ from litdata.processing.data_processor import DataChunkRecipe
16
+ else:
17
+ DataChunkRecipe = object
18
+
19
+
20
+ class StarcoderDataRecipe(DataChunkRecipe):
21
+ is_generator = True
22
+
23
+ def __init__(self, tokenizer: Tokenizer, chunk_size: int):
24
+ super().__init__(chunk_size)
25
+ self.tokenizer = tokenizer
26
+
27
+ def prepare_structure(self, input_dir):
28
+ files = Path(input_dir).rglob("*.parquet")
29
+ return [str(file) for file in files]
30
+
31
+ def prepare_item(self, item_metadata):
32
+ import pyarrow.parquet as pq
33
+
34
+ filepath = item_metadata
35
+ start = time.time()
36
+
37
+ try:
38
+ parquet_file = pq.ParquetFile(filepath)
39
+ # reduce RAM usage
40
+ for batch in parquet_file.iter_batches(batch_size=8192, columns=["content"]):
41
+ for text in batch.to_pandas()["content"]:
42
+ yield self.tokenizer.encode(text, bos=False, eos=True)
43
+
44
+ except Exception:
45
+ print(traceback.format_exc())
46
+ print(f"Error reading {filepath}")
47
+ return
48
+
49
+ parquet_file.close()
50
+ end = time.time()
51
+ print(f"Took {end - start:.2f} seconds total", filepath)
52
+
53
+
54
+ def prepare(
55
+ input_dir: Path = Path("data/starcoderdata"),
56
+ output_dir: Path = Path("data/starcoder"),
57
+ tokenizer_path: Path = Path("checkpoints/Llama-2-7b-hf/"),
58
+ chunk_size: int = (2049 * 8192),
59
+ fast_dev_run: bool = False,
60
+ ) -> None:
61
+ from litdata.processing.data_processor import DataProcessor
62
+
63
+ tokenizer_path = extend_checkpoint_dir(tokenizer_path)
64
+ tokenizer = Tokenizer(tokenizer_path)
65
+ data_recipe = StarcoderDataRecipe(tokenizer=tokenizer, chunk_size=chunk_size)
66
+ data_processor = DataProcessor(
67
+ input_dir=str(input_dir),
68
+ output_dir=str(output_dir),
69
+ fast_dev_run=fast_dev_run,
70
+ num_workers=os.cpu_count(),
71
+ num_downloaders=1,
72
+ )
73
+
74
+ start_time = time.time()
75
+ data_processor.run(data_recipe)
76
+ elapsed_time = time.time() - start_time
77
+ print(f"Time taken: {elapsed_time:.2f} seconds")
78
+
79
+
80
+ if __name__ == "__main__":
81
+ CLI(prepare)
litgpt/data/sharding.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+
4
+ # 根目录:指向 arxiv_split
5
+ processed_base = './litgpt/data/arxiv_split'
6
+ # 每个分片约 16MB
7
+ chunk_size_bytes = 16 * 1024 * 1024
8
+
9
+ for month in os.listdir(processed_base):
10
+ month_dir = os.path.join(processed_base, month)
11
+ if not os.path.isdir(month_dir):
12
+ continue
13
+
14
+ for split in ('train', 'val'):
15
+ # 原地切片:直接在 split 目录下写 chunk_*.jsonl
16
+ src_dir = os.path.join(month_dir, split)
17
+ src_file = os.path.join(src_dir, f'{month}_{split}.jsonl')
18
+ if not os.path.isfile(src_file):
19
+ print(f'[跳过] 找不到 {src_file}')
20
+ continue
21
+
22
+ out_dir = src_dir # 就在原目录下输出
23
+ # (src_dir 肯定存在,无需再创建)
24
+
25
+ print(f'[{month}/{split}] 在 {out_dir} 目录开始切分…')
26
+ chunk_idx = 0
27
+ cur_size = 0
28
+ out_path = os.path.join(out_dir, f'chunk_{chunk_idx:03d}.jsonl')
29
+ out_f = open(out_path, 'w', encoding='utf-8')
30
+
31
+ with open(src_file, 'r', encoding='utf-8') as rf:
32
+ for line in rf:
33
+ line_bytes = len(line.encode('utf-8'))
34
+ if cur_size + line_bytes > chunk_size_bytes and cur_size > 0:
35
+ out_f.close()
36
+ chunk_idx += 1
37
+ cur_size = 0
38
+ out_path = os.path.join(out_dir, f'chunk_{chunk_idx:03d}.jsonl')
39
+ out_f = open(out_path, 'w', encoding='utf-8')
40
+
41
+ out_f.write(line)
42
+ cur_size += line_bytes
43
+
44
+ out_f.close()
45
+ print(f'[{month}/{split}] 切分完成,共 {chunk_idx+1} 个分片。\n')
litgpt/data/split_trainval.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import random
4
+
5
+ # 配置区
6
+ processed_base = './litgpt/data/arxiv_processed'
7
+ output_base = './litgpt/data/arxiv_split'
8
+ split_ratio = 0.01 # 验证集比例
9
+ seed = 42 # 随机种子
10
+
11
+ random.seed(seed)
12
+
13
+ # 遍历每个子目录
14
+ for month in os.listdir(processed_base):
15
+ src_dir = os.path.join(processed_base, month)
16
+ if not os.path.isdir(src_dir):
17
+ continue
18
+
19
+ src_file = os.path.join(src_dir, f'{month}.jsonl')
20
+ if not os.path.isfile(src_file):
21
+ print(f'[跳过] 找不到文件 {src_file}')
22
+ continue
23
+
24
+ # 1) 统计行数
25
+ print(f'[{month}] 统计总行数…')
26
+ with open(src_file, 'r', encoding='utf-8') as f:
27
+ total = sum(1 for _ in f)
28
+
29
+ n_val = int(total * split_ratio)
30
+ print(f'[{month}] 总行数={total},验证集行数={n_val}')
31
+
32
+ # 2) 随机抽样验证集索引
33
+ val_idx = set(random.sample(range(total), n_val))
34
+
35
+ # 3) 准备输出目录
36
+ out_dir = os.path.join(output_base, month)
37
+ os.makedirs(out_dir, exist_ok=True)
38
+ train_dir = os.path.join(out_dir, 'train')
39
+ val_dir = os.path.join(out_dir, 'val')
40
+ os.makedirs(train_dir, exist_ok=True)
41
+ os.makedirs(val_dir, exist_ok=True)
42
+ train_path = os.path.join(train_dir, f'{month}_train.jsonl')
43
+ val_path = os.path.join(val_dir, f'{month}_val.jsonl')
44
+
45
+ # 4) 写入训练/验证文件
46
+ print(f'[{month}] 开始切分并写入到 {out_dir} …')
47
+ with open(src_file, 'r', encoding='utf-8') as rf, \
48
+ open(train_path, 'w', encoding='utf-8') as tf, \
49
+ open(val_path, 'w', encoding='utf-8') as vf:
50
+
51
+ for i, line in enumerate(rf):
52
+ if i in val_idx:
53
+ vf.write(line)
54
+ else:
55
+ tf.write(line)
56
+
57
+ print(f'[{month}] 完成:train -> {train_path},val -> {val_path}\n')
58
+
litgpt/data/text_files.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import glob
3
+ import os
4
+ from dataclasses import dataclass, field
5
+ from functools import partial
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from torch.utils.data import DataLoader
10
+
11
+ from litgpt.data import DataModule
12
+ from litgpt.tokenizer import Tokenizer
13
+
14
+
15
+ @dataclass
16
+ class TextFiles(DataModule):
17
+ """The TextFile data module used for pretraining.
18
+
19
+ Reads in text data from plaintext files contained in a data folder
20
+ and provides training and validation dataloaders that return batches of tokens.
21
+ Every sample is set to a fixed length.
22
+ """
23
+
24
+ train_data_path: Path
25
+ """The path to the data directory used for training that contains .txt files"""
26
+ val_data_path: Optional[Path] = None
27
+ """The path to the data directory used for validation that
28
+ contains .txt files. Splits off data for validation from the
29
+ training set if None."""
30
+ seed: int = 42
31
+ """The seed to use for shuffling the dataset."""
32
+ num_workers: int = 4
33
+ """The number of workers to use for data loading."""
34
+
35
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
36
+ batch_size: int = field(default=1, init=False, repr=False)
37
+ max_seq_length: int = field(default=-1, init=False, repr=False)
38
+
39
+ def __post_init__(self) -> None:
40
+ super().__init__()
41
+ self.out_path_train = self.train_data_path / "train"
42
+ if self.val_data_path is None:
43
+ self.out_path_val = self.train_data_path / "val"
44
+ else:
45
+ self.out_path_val = Path(self.val_data_path) / "val"
46
+
47
+ def connect(self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: int = -1) -> None:
48
+ self.tokenizer = tokenizer
49
+ self.batch_size = batch_size
50
+ self.max_seq_length = max_seq_length + 1 # Increase by one because we need the next token as well
51
+
52
+ def prepare_data(self) -> None:
53
+ from litdata import optimize
54
+ from litdata.streaming import TokensLoader
55
+
56
+ train_files = sorted(glob.glob(str(self.train_data_path / "*.txt")))
57
+ assert len(train_files) > 0, f"No .txt files found in train data {train_files}"
58
+
59
+ if self.val_data_path is not None:
60
+ self.val_data_path = Path(self.val_data_path)
61
+ val_files = sorted(glob.glob(str(self.val_data_path / "*.txt")))
62
+ assert len(val_files) > 0, f"No .txt files found in validation data {val_files}"
63
+ # train/test split. let's use only shard 0 for test split, rest train
64
+ else:
65
+ assert len(train_files) > 1, f"Expected at least two .txt files in {train_files}"
66
+ val_files, *train_files = train_files
67
+ val_files = [val_files]
68
+
69
+ # It's ok to use almost all CPUs here because this runs in a single process
70
+ num_workers = os.cpu_count() - 1
71
+ use_workers = min(num_workers, len(train_files))
72
+ if not Path(self.out_path_train).is_dir():
73
+ validate_tokenizer(self.tokenizer)
74
+ optimize(
75
+ fn=partial(tokenize, tokenizer=self.tokenizer),
76
+ inputs=train_files,
77
+ output_dir=str(self.out_path_train),
78
+ num_workers=use_workers,
79
+ chunk_bytes="50MB",
80
+ item_loader=TokensLoader(block_size=self.max_seq_length),
81
+ )
82
+ else:
83
+ print(
84
+ f"\nWarning: Preprocessed training data found in {self.out_path_train}."
85
+ " For efficiency, reprocessing is skipped. If your text input has changed since"
86
+ " the last `litgpt pretrain` command, remove the preprocessed file(s) to trigger"
87
+ f" reprocessing: `rm -rf {self.out_path_train}`\n"
88
+ )
89
+ use_workers = min(num_workers, len(val_files))
90
+ if not Path(self.out_path_val).is_dir():
91
+ validate_tokenizer(self.tokenizer)
92
+ optimize(
93
+ fn=partial(tokenize, tokenizer=self.tokenizer),
94
+ inputs=val_files,
95
+ output_dir=str(self.out_path_val),
96
+ num_workers=use_workers,
97
+ chunk_bytes="50MB",
98
+ item_loader=TokensLoader(block_size=self.max_seq_length),
99
+ )
100
+ else:
101
+ print(
102
+ f"\nWarning: Preprocessed validation data found in {self.out_path_val}."
103
+ " For efficiency, reprocessing is skipped. If your text input has changed since"
104
+ " the last `litgpt pretrain` command, remove the preprocessed file(s) to trigger"
105
+ f" reprocessing: `rm -rf {self.out_path_val}`\n"
106
+ )
107
+
108
+ def train_dataloader(self) -> DataLoader:
109
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
110
+
111
+ train_dataset = StreamingDataset(
112
+ input_dir=str(self.out_path_train),
113
+ item_loader=TokensLoader(block_size=self.max_seq_length),
114
+ shuffle=True,
115
+ )
116
+
117
+ train_dataloader = StreamingDataLoader(
118
+ train_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
119
+ )
120
+ return train_dataloader
121
+
122
+ def val_dataloader(self) -> DataLoader:
123
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
124
+
125
+ val_dataset = StreamingDataset(
126
+ input_dir=str(self.out_path_val),
127
+ item_loader=TokensLoader(block_size=self.max_seq_length),
128
+ shuffle=True,
129
+ )
130
+ val_dataloader = StreamingDataLoader(
131
+ val_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
132
+ )
133
+ return val_dataloader
134
+
135
+
136
+ def tokenize(filename: str, tokenizer: Tokenizer):
137
+ with open(filename, encoding="utf-8") as file:
138
+ text = file.read()
139
+ text = text.strip()
140
+ yield tokenizer.encode(text, bos=True, eos=False)
141
+
142
+
143
+ def validate_tokenizer(tokenizer: Tokenizer) -> None:
144
+ if tokenizer is None:
145
+ raise ValueError(
146
+ "Tokenizer is None. If you are using this data module via `litgpt pretrain`, "
147
+ "please provide a valid `--tokenizer_dir` path."
148
+ )
litgpt/data/tinyllama.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ from dataclasses import dataclass, field
3
+ from pathlib import Path
4
+ from typing import Optional, Union
5
+
6
+ from torch.utils.data import DataLoader
7
+
8
+ from litgpt.data import DataModule
9
+ from litgpt.tokenizer import Tokenizer
10
+
11
+
12
+ @dataclass
13
+ class TinyLlama(DataModule):
14
+ """The TinyLlama data module is composed of a mix of SlimPajama and Starcoder data.
15
+
16
+ Provides training and validation streaming dataloaders that return batches of tokens.
17
+ """
18
+
19
+ data_path: Union[str, Path] = Path("data/")
20
+ """The path to the data directory, containing two folders 'slimpajama' and 'starcoder'
21
+ which are the output of the preprocessing step done in advance. See the `tutorial/pretrain_tinyllama.md`
22
+ for instructions. The path can also be a remote path (e.g., s3://)."""
23
+ seed: int = 42
24
+ """The random seed for shuffling the dataset."""
25
+ num_workers: int = 8
26
+ """How many DataLoader processes to use for loading."""
27
+ use_starcoder: bool = True
28
+ """Toggle for using Starcoder data."""
29
+
30
+ batch_size: int = field(init=False, repr=False, default=1)
31
+ seq_length: int = field(init=False, repr=False, default=2048)
32
+
33
+ def __post_init__(self):
34
+ super().__init__()
35
+ # Could be a remote path (s3://) or a local path
36
+ self.slimpajama_train = str(self.data_path).rstrip("/") + "/slimpajama/train"
37
+ self.slimpajama_val = str(self.data_path).rstrip("/") + "/slimpajama/val"
38
+ self.required_paths = [self.slimpajama_train, self.slimpajama_val]
39
+
40
+ if self.use_starcoder:
41
+ self.starcoder_train = str(self.data_path).rstrip("/") + "/starcoder"
42
+ self.required_paths += [self.starcoder_train]
43
+
44
+ def connect(
45
+ self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: Optional[int] = None
46
+ ) -> None:
47
+ self.batch_size = batch_size
48
+ self.seq_length = max_seq_length + 1 # Increase by one because we need the next token as well
49
+
50
+ def prepare_data(self) -> None:
51
+ for path in self.required_paths:
52
+ if not path.startswith("s3://") and not Path(path).is_dir():
53
+ raise FileNotFoundError(
54
+ "The data path for TinyLlama is expected to be the directory containing these subdirectories:"
55
+ f" `slimpajama/train`, `slimpajama/val`, `starcoder`. The directory {path} does not exist."
56
+ " Set it via `--data.data_path=...`"
57
+ )
58
+
59
+ def train_dataloader(self) -> DataLoader:
60
+ from litdata.streaming import CombinedStreamingDataset, StreamingDataLoader, StreamingDataset, TokensLoader
61
+
62
+ slim_train_data = StreamingDataset(
63
+ input_dir=self.slimpajama_train,
64
+ item_loader=TokensLoader(block_size=self.seq_length),
65
+ shuffle=True,
66
+ drop_last=True,
67
+ )
68
+ train_data = slim_train_data
69
+
70
+ if self.use_starcoder:
71
+ train_datasets = [
72
+ slim_train_data,
73
+ StreamingDataset(
74
+ input_dir=self.starcoder_train,
75
+ item_loader=TokensLoader(block_size=self.seq_length),
76
+ shuffle=True,
77
+ drop_last=True,
78
+ ),
79
+ ]
80
+
81
+ # Mix SlimPajama data and Starcoder data with these proportions:
82
+ weights = (0.693584, 0.306416)
83
+ train_data = CombinedStreamingDataset(
84
+ datasets=train_datasets, seed=self.seed, weights=weights, iterate_over_all=False
85
+ )
86
+
87
+ train_dataloader = StreamingDataLoader(
88
+ train_data, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
89
+ )
90
+ return train_dataloader
91
+
92
+ def val_dataloader(self) -> DataLoader:
93
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
94
+
95
+ val_dataset = StreamingDataset(
96
+ input_dir=self.slimpajama_val,
97
+ item_loader=TokensLoader(block_size=self.seq_length),
98
+ shuffle=True,
99
+ )
100
+ val_dataloader = StreamingDataLoader(
101
+ val_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
102
+ )
103
+ return val_dataloader
litgpt/data/tinystories.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import glob
3
+ import json
4
+ import os
5
+ from dataclasses import dataclass, field
6
+ from functools import partial
7
+ from pathlib import Path
8
+ from typing import Optional
9
+
10
+ from torch.utils.data import DataLoader
11
+ from tqdm import tqdm
12
+
13
+ from litgpt.data import DataModule
14
+ from litgpt.data.alpaca import download_if_missing
15
+ from litgpt.data.text_files import validate_tokenizer
16
+ from litgpt.tokenizer import Tokenizer
17
+
18
+
19
+ @dataclass
20
+ class TinyStories(DataModule):
21
+ """The TinyStories data module: https://huggingface.co/datasets/roneneldan/TinyStories
22
+
23
+ Provides training and validation dataloaders that return batches of tokens. Every sample is set to a fixed length.
24
+ """
25
+
26
+ data_path: Path = Path("data/tinystories")
27
+ """The path to the data directory, containing two folders 'train' and 'val'
28
+ which are the output of the preprocessing step."""
29
+ seed: int = 42
30
+ """The seed to use for shuffling the dataset."""
31
+ num_workers: int = 8
32
+ """The number of workers to use for the dataloaders."""
33
+
34
+ tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False)
35
+ batch_size: int = field(default=1, init=False, repr=False)
36
+ max_seq_length: int = field(default=-1, init=False, repr=False)
37
+
38
+ def __post_init__(self) -> None:
39
+ super().__init__()
40
+ self.data_path_train = self.data_path / "train"
41
+ self.data_path_val = self.data_path / "val"
42
+
43
+ def connect(self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: int = -1) -> None:
44
+ self.tokenizer = tokenizer
45
+ self.batch_size = batch_size
46
+ self.max_seq_length = max_seq_length + 1 # Increase by one because we need the next token as well
47
+
48
+ def prepare_data(self) -> None:
49
+ from litdata import optimize
50
+
51
+ download(self.data_path)
52
+
53
+ files = sorted(glob.glob(str(self.data_path / "TinyStories_all_data" / "*.json")))
54
+ assert len(files) > 0, f"No json files found in {files}"
55
+ assert len(files) > 1, f"Expected at least two json files in {files}"
56
+ # train/test split. let's use only shard 0 for test split, rest train
57
+ val_file, *train_files = files
58
+ num_workers = os.cpu_count() - 1
59
+
60
+ if not Path(self.data_path_train).is_dir():
61
+ validate_tokenizer(self.tokenizer)
62
+ optimize(
63
+ fn=partial(tokenize, tokenizer=self.tokenizer),
64
+ inputs=train_files,
65
+ output_dir=str(self.data_path_train),
66
+ num_workers=num_workers,
67
+ chunk_bytes="200MB",
68
+ )
69
+ if not Path(self.data_path_val).is_dir():
70
+ validate_tokenizer(self.tokenizer)
71
+ optimize(
72
+ fn=partial(tokenize, tokenizer=self.tokenizer),
73
+ inputs=[val_file],
74
+ output_dir=str(self.data_path_val),
75
+ num_workers=1, # there's only 1 file
76
+ chunk_bytes="200MB",
77
+ )
78
+
79
+ def train_dataloader(self) -> DataLoader:
80
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
81
+
82
+ train_dataset = StreamingDataset(
83
+ input_dir=str(self.data_path_train),
84
+ item_loader=TokensLoader(block_size=self.max_seq_length),
85
+ shuffle=True,
86
+ )
87
+ train_dataloader = StreamingDataLoader(
88
+ train_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
89
+ )
90
+ return train_dataloader
91
+
92
+ def val_dataloader(self) -> DataLoader:
93
+ from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader
94
+
95
+ val_dataset = StreamingDataset(
96
+ input_dir=str(self.data_path_val),
97
+ item_loader=TokensLoader(block_size=self.max_seq_length),
98
+ shuffle=True,
99
+ )
100
+ val_dataloader = StreamingDataLoader(
101
+ val_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True
102
+ )
103
+ return val_dataloader
104
+
105
+
106
+ def tokenize(filename: str, tokenizer: Tokenizer):
107
+ with open(filename, encoding="utf-8") as f:
108
+ data = json.load(f)
109
+ global_rank = int(os.environ["DATA_OPTIMIZER_GLOBAL_RANK"])
110
+ num_workers = int(os.environ["DATA_OPTIMIZER_NUM_WORKERS"])
111
+ local_rank = global_rank % num_workers
112
+ for example in tqdm(data, position=local_rank):
113
+ text = example["story"]
114
+ text = text.strip() # get rid of leading/trailing whitespace
115
+ tokens = tokenizer.encode(text, bos=True, eos=False) # encode the text, use BOS
116
+ yield tokens
117
+
118
+
119
+ _URL = "https://huggingface.co/datasets/roneneldan/TinyStories/resolve/main/TinyStories_all_data.tar.gz"
120
+
121
+
122
+ def download(data_dir: Path):
123
+ data_dir.mkdir(exist_ok=True, parents=True)
124
+
125
+ data_tar = data_dir / "TinyStories_all_data.tar.gz"
126
+ data_dir = data_dir / "TinyStories_all_data"
127
+ shard_filenames = sorted(glob.glob(str(data_dir / "*.json")))
128
+ if shard_filenames:
129
+ print(f"{data_dir} already exists, skipping unpacking...")
130
+ return
131
+
132
+ # download the TinyStories dataset, unless it's already downloaded
133
+ download_if_missing(data_tar, _URL, stream=True, mode="wb")
134
+
135
+ # unpack the tar.gz file into all the data shards (json files)
136
+ data_dir.mkdir(exist_ok=False)
137
+ tar_command = f"tar -xzf {data_tar} -C {data_dir}"
138
+ print(tar_command)
139
+ os.system(tar_command)
140
+ shard_filenames = sorted(glob.glob(str(data_dir / "*.json")))
141
+ print(f"Number of shards: {len(shard_filenames)}")
litgpt/deploy/__init__.py ADDED
File without changes
litgpt/deploy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (135 Bytes). View file
 
litgpt/deploy/serve.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import json
3
+ import sys
4
+ from pathlib import Path
5
+ from pprint import pprint
6
+ from typing import Any, Dict, Literal, Optional
7
+
8
+ import torch
9
+ from lightning_utilities.core.imports import RequirementCache
10
+
11
+ from litgpt.api import LLM
12
+ from litgpt.utils import auto_download_checkpoint
13
+
14
+ _LITSERVE_AVAILABLE = RequirementCache("litserve")
15
+ _JINJA2_AVAILABLE = RequirementCache("jinja2")
16
+ if _LITSERVE_AVAILABLE:
17
+ from litserve import LitAPI, LitServer
18
+ from litserve.specs.openai import ChatCompletionRequest, OpenAISpec
19
+ else:
20
+ LitAPI, LitServer = object, object
21
+
22
+
23
+ class BaseLitAPI(LitAPI):
24
+ def __init__(
25
+ self,
26
+ checkpoint_dir: Path,
27
+ quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8"]] = None,
28
+ precision: Optional[str] = None,
29
+ temperature: float = 0.8,
30
+ top_k: int = 50,
31
+ top_p: float = 1.0,
32
+ max_new_tokens: int = 50,
33
+ devices: int = 1,
34
+ api_path: Optional[str] = None,
35
+ ) -> None:
36
+ if not _LITSERVE_AVAILABLE:
37
+ raise ImportError(str(_LITSERVE_AVAILABLE))
38
+
39
+ super().__init__(api_path=api_path)
40
+
41
+ self.checkpoint_dir = checkpoint_dir
42
+ self.quantize = quantize
43
+ self.precision = precision
44
+ self.temperature = temperature
45
+ self.top_k = top_k
46
+ self.max_new_tokens = max_new_tokens
47
+ self.top_p = top_p
48
+ self.devices = devices
49
+
50
+ def setup(self, device: str) -> None:
51
+ if ":" in device:
52
+ accelerator, device = device.split(":")
53
+ device = f"[{int(device)}]"
54
+ else:
55
+ accelerator = device
56
+ device = 1
57
+
58
+ print("Initializing model...", file=sys.stderr)
59
+ self.llm = LLM.load(model=self.checkpoint_dir, distribute=None)
60
+
61
+ self.llm.distribute(
62
+ devices=self.devices,
63
+ accelerator=accelerator,
64
+ quantize=self.quantize,
65
+ precision=self.precision,
66
+ generate_strategy=("sequential" if self.devices is not None and self.devices > 1 else None),
67
+ )
68
+ print("Model successfully initialized.", file=sys.stderr)
69
+
70
+ def decode_request(self, request: Dict[str, Any]) -> Any:
71
+ prompt = str(request["prompt"])
72
+ return prompt
73
+
74
+
75
+ class SimpleLitAPI(BaseLitAPI):
76
+ def __init__(
77
+ self,
78
+ checkpoint_dir: Path,
79
+ quantize: Optional[str] = None,
80
+ precision: Optional[str] = None,
81
+ temperature: float = 0.8,
82
+ top_k: int = 50,
83
+ top_p: float = 1.0,
84
+ max_new_tokens: int = 50,
85
+ devices: int = 1,
86
+ api_path: Optional[str] = None,
87
+ ):
88
+ super().__init__(
89
+ checkpoint_dir,
90
+ quantize,
91
+ precision,
92
+ temperature,
93
+ top_k,
94
+ top_p,
95
+ max_new_tokens,
96
+ devices,
97
+ api_path=api_path,
98
+ )
99
+
100
+ def setup(self, device: str):
101
+ super().setup(device)
102
+
103
+ def predict(self, inputs: str) -> Any:
104
+ output = self.llm.generate(
105
+ inputs,
106
+ temperature=self.temperature,
107
+ top_k=self.top_k,
108
+ top_p=self.top_p,
109
+ max_new_tokens=self.max_new_tokens,
110
+ )
111
+ return output
112
+
113
+ def encode_response(self, output: str) -> Dict[str, Any]:
114
+ # Convert the model output to a response payload.
115
+ return {"output": output}
116
+
117
+
118
+ class StreamLitAPI(BaseLitAPI):
119
+ def __init__(
120
+ self,
121
+ checkpoint_dir: Path,
122
+ quantize: Optional[str] = None,
123
+ precision: Optional[str] = None,
124
+ temperature: float = 0.8,
125
+ top_k: int = 50,
126
+ top_p: float = 1.0,
127
+ max_new_tokens: int = 50,
128
+ devices: int = 1,
129
+ api_path: Optional[str] = None,
130
+ ):
131
+ super().__init__(
132
+ checkpoint_dir,
133
+ quantize,
134
+ precision,
135
+ temperature,
136
+ top_k,
137
+ top_p,
138
+ max_new_tokens,
139
+ devices,
140
+ api_path=api_path,
141
+ )
142
+
143
+ def setup(self, device: str):
144
+ super().setup(device)
145
+
146
+ def predict(self, inputs: torch.Tensor) -> Any:
147
+ yield from self.llm.generate(
148
+ inputs,
149
+ temperature=self.temperature,
150
+ top_k=self.top_k,
151
+ top_p=self.top_p,
152
+ max_new_tokens=self.max_new_tokens,
153
+ stream=True,
154
+ )
155
+
156
+ def encode_response(self, output):
157
+ for out in output:
158
+ yield {"output": out}
159
+
160
+
161
+ class OpenAISpecLitAPI(BaseLitAPI):
162
+ def __init__(
163
+ self,
164
+ checkpoint_dir: Path,
165
+ quantize: Optional[str] = None,
166
+ precision: Optional[str] = None,
167
+ temperature: float = 0.8,
168
+ top_k: int = 50,
169
+ top_p: float = 1.0,
170
+ max_new_tokens: int = 50,
171
+ devices: int = 1,
172
+ api_path: Optional[str] = None,
173
+ ):
174
+ super().__init__(
175
+ checkpoint_dir,
176
+ quantize,
177
+ precision,
178
+ temperature,
179
+ top_k,
180
+ top_p,
181
+ max_new_tokens,
182
+ devices,
183
+ api_path=api_path,
184
+ )
185
+
186
+ def setup(self, device: str):
187
+ super().setup(device)
188
+ if not _JINJA2_AVAILABLE:
189
+ raise ImportError(str(_JINJA2_AVAILABLE))
190
+ from jinja2 import Template
191
+
192
+ config_path = self.checkpoint_dir / "tokenizer_config.json"
193
+ if not config_path.is_file():
194
+ raise FileNotFoundError(f"Tokenizer config file not found at {config_path}")
195
+
196
+ with open(config_path, encoding="utf-8") as fp:
197
+ config = json.load(fp)
198
+ chat_template = config.get("chat_template", None)
199
+ if chat_template is None:
200
+ print("The tokenizer config does not contain chat_template, falling back to a default.")
201
+ chat_template = "{% for m in messages %}{{ m.role }}: {{ m.content }}\n{% endfor %}Assistant: "
202
+ self.chat_template = chat_template
203
+
204
+ self.template = Template(self.chat_template)
205
+
206
+ def decode_request(self, request: "ChatCompletionRequest") -> Any:
207
+ # Apply chat template to request messages
208
+ return self.template.render(messages=request.messages)
209
+
210
+ def predict(self, inputs: str, context: dict) -> Any:
211
+ # Extract parameters from context with fallback to instance attributes
212
+ temperature = context.get("temperature") or self.temperature
213
+ top_p = context.get("top_p", self.top_p) or self.top_p
214
+ max_new_tokens = context.get("max_completion_tokens") or self.max_new_tokens
215
+
216
+ # Run the model on the input and return the output.
217
+ yield from self.llm.generate(
218
+ inputs,
219
+ temperature=temperature,
220
+ top_k=self.top_k,
221
+ top_p=top_p,
222
+ max_new_tokens=max_new_tokens,
223
+ stream=True,
224
+ )
225
+
226
+
227
+ def run_server(
228
+ checkpoint_dir: Path,
229
+ quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8"]] = None,
230
+ precision: Optional[str] = None,
231
+ temperature: float = 0.8,
232
+ top_k: int = 50,
233
+ top_p: float = 1.0,
234
+ max_new_tokens: int = 50,
235
+ devices: int = 1,
236
+ accelerator: str = "auto",
237
+ port: int = 8000,
238
+ stream: bool = False,
239
+ openai_spec: bool = False,
240
+ access_token: Optional[str] = None,
241
+ api_path: Optional[str] = "/predict",
242
+ ) -> None:
243
+ """Serve a LitGPT model using LitServe.
244
+
245
+ Evaluate a model with the LM Evaluation Harness.
246
+
247
+ Arguments:
248
+ checkpoint_dir: The checkpoint directory to load the model from.
249
+ quantize: Whether to quantize the model and using which method:
250
+ - bnb.nf4, bnb.nf4-dq, bnb.fp4, bnb.fp4-dq: 4-bit quantization from bitsandbytes
251
+ - bnb.int8: 8-bit quantization from bitsandbytes
252
+ for more details, see https://github.com/Lightning-AI/litgpt/blob/main/tutorials/quantize.md
253
+ precision: Optional precision setting to instantiate the model weights in. By default, this will
254
+ automatically be inferred from the metadata in the given ``checkpoint_dir`` directory.
255
+ temperature: Temperature setting for the text generation. Value above 1 increase randomness.
256
+ Values below 1 decrease randomness.
257
+ top_k: The size of the pool of potential next tokens. Values larger than 1 result in more novel
258
+ generated text but can also lead to more incoherent texts.
259
+ top_p: If specified, it represents the cumulative probability threshold to consider in the sampling process.
260
+ In top-p sampling, the next token is sampled from the highest probability tokens
261
+ whose cumulative probability exceeds the threshold `top_p`. When specified,
262
+ it must be `0 <= top_p <= 1`. Here, `top_p=0` is equivalent
263
+ to sampling the most probable token, while `top_p=1` samples from the whole distribution.
264
+ It can be used in conjunction with `top_k` and `temperature` with the following order
265
+ of application:
266
+
267
+ 1. `top_k` sampling
268
+ 2. `temperature` scaling
269
+ 3. `top_p` sampling
270
+
271
+ For more details, see https://arxiv.org/abs/1904.09751
272
+ or https://huyenchip.com/2024/01/16/sampling.html#top_p
273
+ max_new_tokens: The number of generation steps to take.
274
+ devices: How many devices/GPUs to use.
275
+ accelerator: The type of accelerator to use. For example, "auto", "cuda", "cpu", or "mps".
276
+ The "auto" setting (default) chooses a GPU if available, and otherwise uses a CPU.
277
+ port: The network port number on which the model is configured to be served.
278
+ stream: Whether to stream the responses.
279
+ openai_spec: Whether to use the OpenAISpec and enable OpenAI-compatible API endpoints. When True, the server will provide
280
+ `/v1/chat/completions` endpoints that work with the OpenAI SDK and other OpenAI-compatible clients,
281
+ making it easy to integrate with existing applications that use the OpenAI API.
282
+ access_token: Optional API token to access models with restrictions.
283
+ api_path: The custom API path for the endpoint (e.g., "/my_api/classify").
284
+ """
285
+ checkpoint_dir = auto_download_checkpoint(model_name=checkpoint_dir, access_token=access_token)
286
+ pprint(locals())
287
+
288
+ api_class = OpenAISpecLitAPI if openai_spec else StreamLitAPI if stream else SimpleLitAPI
289
+
290
+ server = LitServer(
291
+ api_class(
292
+ checkpoint_dir=checkpoint_dir,
293
+ quantize=quantize,
294
+ precision=precision,
295
+ temperature=temperature,
296
+ top_k=top_k,
297
+ top_p=top_p,
298
+ max_new_tokens=max_new_tokens,
299
+ devices=devices,
300
+ api_path=api_path,
301
+ ),
302
+ spec=OpenAISpec() if openai_spec else None,
303
+ accelerator=accelerator,
304
+ devices=1,
305
+ stream=stream,
306
+ )
307
+
308
+ server.run(port=port, generate_client_file=False)
litgpt/eval/evaluate.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import json
4
+ import os
5
+ from pathlib import Path
6
+ from pprint import pprint
7
+ from typing import Optional, Union
8
+
9
+ import torch
10
+
11
+ from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint
12
+ from litgpt.utils import auto_download_checkpoint, copy_config_files
13
+
14
+
15
+ def prepare_results(results, save_filepath, print_results=True):
16
+ from lm_eval.utils import make_table
17
+
18
+ if print_results:
19
+ print(make_table(results))
20
+ if "groups" in results:
21
+ print(make_table(results, "groups"))
22
+
23
+ json_result = json.dumps(results, indent=2, ensure_ascii=False, default=str)
24
+ save_filepath.open("w", encoding="utf-8").write(json_result)
25
+
26
+
27
+ def convert_and_evaluate(
28
+ checkpoint_dir: Path,
29
+ tasks: Optional[str] = None,
30
+ out_dir: Optional[Path] = None,
31
+ force_conversion: bool = False,
32
+ num_fewshot: Optional[int] = None,
33
+ batch_size: Union[int, str] = 1,
34
+ device: Optional[str] = None,
35
+ dtype: Optional[Union[str, torch.dtype]] = None,
36
+ limit: Optional[float] = None,
37
+ seed: int = 1234,
38
+ save_filepath: Optional[Path] = None,
39
+ access_token: Optional[str] = None,
40
+ ) -> None:
41
+ """Evaluate a model with the LM Evaluation Harness.
42
+
43
+ Arguments:
44
+ checkpoint_dir: Directory where the `lit_model.pth` and tokenizer files are located.
45
+ out_dir: Directory in which to save the converted checkpoints for evaluation.
46
+ Saves to `checkpoint_dir`/evaluate by default.
47
+ force_conversion: Set to `True` to reconvert the model and override
48
+ an existing model.pth from a previous evaluation call.
49
+ tasks: CSV of task names to evaluate. Example: "hellaswag,truthfulqa_mc2,mmlu"
50
+ num_fewshot: Number of examples in few-shot context.
51
+ batch_size: Batch size configuration as positive integer value (default: 1),
52
+ "auto", in the format 'auto:N', where 'auto:4' recomputes the batch size 4 times.
53
+ device: Device to use for evaluation, for example, "cuda" or "cuda:0".
54
+ limit: Limit on number of examples per task.
55
+ seed: Random seed.
56
+ save_filepath: The file where the results will be saved.
57
+ Saves to `out_dir/results.json` by default.
58
+ access_token: Optional API token to access models with restrictions.
59
+ """
60
+ if tasks is None:
61
+ from lm_eval.tasks import TaskManager
62
+
63
+ taskm = TaskManager()
64
+ print("\n".join(taskm.task_index.keys()))
65
+ print(
66
+ "\n\nTo evaluate multiple tasks, you can chain the task names "
67
+ "listed above via a comma-separated list."
68
+ "\nFor example: `--tasks 'hellaswag,truthfulqa_mc2,mmlu'`. "
69
+ "\nTo search for a specific task, use `litgpt evaluate list | grep task_name`."
70
+ )
71
+ return
72
+
73
+ checkpoint_dir = auto_download_checkpoint(model_name=checkpoint_dir, access_token=access_token)
74
+ pprint(locals())
75
+
76
+ if not (isinstance(batch_size, int) and batch_size > 0) and not (
77
+ isinstance(batch_size, str) and batch_size.startswith("auto")
78
+ ):
79
+ raise ValueError("batch_size must be a positive integer, 'auto', or in the format 'auto:N'.")
80
+
81
+ from lm_eval import evaluator
82
+
83
+ if device is None:
84
+ device = "cuda" if torch.cuda.is_available() else "cpu"
85
+
86
+ if out_dir is None:
87
+ out_dir = checkpoint_dir / "evaluate"
88
+ else:
89
+ out_dir = Path(out_dir)
90
+ out_dir.mkdir(parents=True, exist_ok=True)
91
+
92
+ save_filepath = out_dir / Path("results.json") if save_filepath is None else Path(save_filepath)
93
+
94
+ model_path = out_dir / "pytorch_model.bin"
95
+ if not model_path.exists() or force_conversion:
96
+ copy_config_files(source_dir=checkpoint_dir, out_dir=out_dir)
97
+ convert_lit_checkpoint(checkpoint_dir=checkpoint_dir, output_dir=out_dir)
98
+
99
+ # Hack: LitGPT's conversion doesn't save a pickle file that is compatible to be loaded with
100
+ # `torch.load(..., weights_only=True)`, which is a requirement in HFLM.
101
+ # So we're `torch.load`-ing and `torch.save`-ing it again to work around this.
102
+ state_dict = torch.load(out_dir / "model.pth")
103
+ torch.save(state_dict, model_path)
104
+ os.remove(out_dir / "model.pth")
105
+
106
+ from lm_eval.models.huggingface import HFLM
107
+
108
+ model = HFLM(pretrained=str(out_dir.resolve()), device=device, batch_size=batch_size, dtype=dtype)
109
+
110
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
111
+
112
+ results = evaluator.simple_evaluate(
113
+ model=model,
114
+ tasks=tasks.split(","),
115
+ num_fewshot=num_fewshot,
116
+ batch_size=batch_size,
117
+ device=device,
118
+ limit=limit,
119
+ random_seed=seed,
120
+ numpy_random_seed=seed,
121
+ torch_random_seed=seed,
122
+ confirm_run_unsafe_code=True,
123
+ )
124
+
125
+ prepare_results(results, save_filepath)
126
+ output_data = {}
127
+
128
+ tasks_list = []
129
+ for task_name, metrics in results.get("results", {}).items():
130
+ for metric_key, value in metrics.items():
131
+ if metric_key.endswith("_stderr"):
132
+ continue
133
+ tasks_list.append({
134
+ "task": task_name,
135
+ "metric": metric_key.split(",")[0],
136
+ "value": value
137
+ })
138
+ output_data["tasks"] = tasks_list
139
+
140
+ if "groups" in results:
141
+ groups_list = []
142
+ for group_name, metrics in results["groups"].items():
143
+ for metric_key, value in metrics.items():
144
+ if metric_key.endswith("_stderr"):
145
+ continue
146
+ groups_list.append({
147
+ "group": group_name,
148
+ "metric": metric_key.split(",")[0],
149
+ "value": value
150
+ })
151
+ output_data["groups"] = groups_list
152
+
153
+ res_filepath = out_dir / Path("values.json")
154
+ with open(res_filepath, "w", encoding="utf-8") as f:
155
+ json.dump(output_data, f, ensure_ascii=False, indent=2)
litgpt/finetune/__init__.py ADDED
File without changes
litgpt/finetune/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (137 Bytes). View file
 
litgpt/finetune/__pycache__/adapter.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
litgpt/finetune/__pycache__/adapter_v2.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
litgpt/finetune/adapter.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import dataclasses
3
+ import math
4
+ import os
5
+ import time
6
+ import warnings
7
+ from pathlib import Path
8
+ from pprint import pprint
9
+ from typing import Dict, List, Literal, Optional, Tuple, Union
10
+
11
+ import lightning as L
12
+ import torch
13
+ from lightning.fabric.plugins import BitsandbytesPrecision
14
+ from lightning.fabric.strategies import FSDPStrategy
15
+ from lightning.fabric.utilities import ThroughputMonitor
16
+ from lightning_utilities.core.imports import RequirementCache
17
+ from torch.utils.data import ConcatDataset, DataLoader
18
+ from torchmetrics import RunningMean
19
+
20
+ from litgpt.adapter import GPT, Block, Config, adapter_filter, mark_only_adapter_as_trainable
21
+ from litgpt.args import EvalArgs, LogArgs, TrainArgs
22
+ from litgpt.data import Alpaca, DataModule
23
+ from litgpt.generate.base import generate
24
+ from litgpt.prompts import save_prompt_style
25
+ from litgpt.tokenizer import Tokenizer
26
+ from litgpt.utils import (
27
+ CycleIterator,
28
+ auto_download_checkpoint,
29
+ check_nvlink_connectivity,
30
+ check_valid_checkpoint_dir,
31
+ choose_logger,
32
+ chunked_cross_entropy,
33
+ copy_config_files,
34
+ create_finetuning_performance_report,
35
+ get_default_supported_precision,
36
+ init_out_dir,
37
+ instantiate_bnb_optimizer,
38
+ instantiate_torch_optimizer,
39
+ load_checkpoint,
40
+ num_parameters,
41
+ parse_devices,
42
+ save_hyperparameters,
43
+ select_sft_generate_example,
44
+ )
45
+
46
+
47
+ def setup(
48
+ checkpoint_dir: Path,
49
+ out_dir: Path = Path("out/finetune/adapter"),
50
+ precision: Optional[str] = None,
51
+ quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8-training"]] = None,
52
+ devices: Union[int, str] = 1,
53
+ num_nodes: int = 1,
54
+ data: Optional[DataModule] = None,
55
+ train: TrainArgs = TrainArgs(
56
+ save_interval=1000,
57
+ log_interval=1,
58
+ global_batch_size=16,
59
+ micro_batch_size=1,
60
+ lr_warmup_steps=100,
61
+ epochs=5,
62
+ max_seq_length=None,
63
+ ),
64
+ eval: EvalArgs = EvalArgs(interval=100, max_new_tokens=100, max_iters=100),
65
+ log: LogArgs = LogArgs(),
66
+ optimizer: Union[str, Dict] = "AdamW",
67
+ logger_name: Literal["wandb", "tensorboard", "csv", "mlflow"] = "csv",
68
+ seed: int = 1337,
69
+ access_token: Optional[str] = None,
70
+ ) -> None:
71
+ """Finetune a model using the Adapter method.
72
+
73
+ Arguments:
74
+ checkpoint_dir: The path to the base model's checkpoint directory to load for finetuning.
75
+ out_dir: Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
76
+ /teamspace/jobs/<job-name>/share.
77
+ precision: The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true".
78
+ quantize: If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information.
79
+ devices: How many devices/GPUs to use.
80
+ num_nodes: How many nodes the code is being run on.
81
+ data: Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``.
82
+ train: Training-related arguments. See ``litgpt.args.TrainArgs`` for details.
83
+ eval: Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details.
84
+ optimizer: An optimizer name (such as "AdamW") or config.
85
+ logger_name: The name of the logger to send metrics to.
86
+ seed: The random seed to use for reproducibility.
87
+ access_token: Optional API token to access models with restrictions.
88
+ """
89
+ checkpoint_dir = auto_download_checkpoint(model_name=checkpoint_dir, access_token=access_token)
90
+ pprint(locals())
91
+ data = Alpaca() if data is None else data
92
+ devices = parse_devices(devices)
93
+ out_dir = init_out_dir(out_dir)
94
+
95
+ check_valid_checkpoint_dir(checkpoint_dir)
96
+ config = Config.from_file(checkpoint_dir / "model_config.yaml")
97
+
98
+ precision = precision or get_default_supported_precision(training=True)
99
+ logger = choose_logger(
100
+ logger_name,
101
+ out_dir,
102
+ name=f"finetune-{config.name}",
103
+ log_interval=train.log_interval,
104
+ log_args=dataclasses.asdict(log),
105
+ )
106
+
107
+ plugins = None
108
+ if quantize is not None and quantize.startswith("bnb."):
109
+ if "mixed" in precision:
110
+ raise ValueError("Quantization and mixed precision is not supported.")
111
+ if RequirementCache("bitsandbytes != 0.42.0"):
112
+ warnings.warn(
113
+ "LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
114
+ )
115
+ dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
116
+ plugins = BitsandbytesPrecision(quantize[4:], dtype)
117
+ precision = None
118
+
119
+ if devices * num_nodes > 1:
120
+ if quantize:
121
+ raise NotImplementedError(
122
+ "Quantization is currently not supported for multi-GPU training. Please set devices=1 and num_nodes=1"
123
+ " when using the --quantize flag."
124
+ )
125
+ strategy = FSDPStrategy(
126
+ auto_wrap_policy={Block},
127
+ activation_checkpointing_policy={Block},
128
+ state_dict_type="full",
129
+ limit_all_gathers=True,
130
+ cpu_offload=False,
131
+ )
132
+ else:
133
+ strategy = "auto"
134
+
135
+ fabric = L.Fabric(
136
+ devices=devices,
137
+ num_nodes=num_nodes,
138
+ strategy=strategy,
139
+ precision=precision,
140
+ loggers=logger,
141
+ plugins=plugins,
142
+ )
143
+
144
+ if torch.cuda.is_available() and devices > 1:
145
+ check_nvlink_connectivity(fabric)
146
+
147
+ fabric.launch(main, devices, seed, config, data, checkpoint_dir, out_dir, train, eval, optimizer, num_nodes)
148
+
149
+
150
+ def main(
151
+ fabric: L.Fabric,
152
+ devices: int,
153
+ seed: int,
154
+ config: Config,
155
+ data: DataModule,
156
+ checkpoint_dir: Path,
157
+ out_dir: Path,
158
+ train: TrainArgs,
159
+ eval: EvalArgs,
160
+ optimizer: Union[str, Dict],
161
+ num_nodes: int = 1,
162
+ ) -> None:
163
+ validate_args(train, eval)
164
+
165
+ tokenizer = Tokenizer(checkpoint_dir)
166
+ train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train)
167
+ steps_per_epoch = len(train_dataloader) // train.gradient_accumulation_iters(devices, num_nodes)
168
+ lr_max_steps = min(train.epochs * steps_per_epoch, (train.max_steps or float("inf")))
169
+
170
+ fabric.seed_everything(seed) # same seed for every process to init model (FSDP)
171
+
172
+ if fabric.global_rank == 0:
173
+ os.makedirs(out_dir, exist_ok=True)
174
+
175
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
176
+ with fabric.init_module(empty_init=(fabric.world_size > 1)):
177
+ model = GPT(config)
178
+ mark_only_adapter_as_trainable(model)
179
+
180
+ fabric.print(f"Number of trainable parameters: {num_parameters(model, requires_grad=True):,}")
181
+ fabric.print(f"Number of non-trainable parameters: {num_parameters(model, requires_grad=False):,}")
182
+
183
+ model = fabric.setup_module(model)
184
+ if isinstance(fabric.strategy.precision, BitsandbytesPrecision):
185
+ optimizer = instantiate_bnb_optimizer(optimizer, model.parameters())
186
+
187
+ from bitsandbytes.nn import StableEmbedding
188
+
189
+ old_embedding = model.transformer.wte
190
+ model.transformer.wte = StableEmbedding(old_embedding.num_embeddings, old_embedding.embedding_dim)
191
+ with torch.no_grad():
192
+ model.transformer.wte.weight.copy_(old_embedding.weight)
193
+ model.transformer.wte = model.transformer.wte.to(
194
+ device=old_embedding.weight.device, dtype=old_embedding.weight.dtype
195
+ )
196
+ else:
197
+ optimizer = instantiate_torch_optimizer(optimizer, model.parameters())
198
+
199
+ optimizer = fabric.setup_optimizers(optimizer)
200
+ scheduler = get_lr_scheduler(optimizer, warmup_steps=train.lr_warmup_steps, max_steps=lr_max_steps)
201
+
202
+ # strict=False because missing keys due to Adapter weights not contained in state dict
203
+ load_checkpoint(fabric, model, checkpoint_path, strict=False)
204
+
205
+ train_time = time.perf_counter()
206
+ token_counts = fit(
207
+ fabric=fabric,
208
+ model=model,
209
+ optimizer=optimizer,
210
+ scheduler=scheduler,
211
+ train_dataloader=train_dataloader,
212
+ val_dataloader=val_dataloader,
213
+ devices=devices,
214
+ num_nodes=num_nodes,
215
+ checkpoint_dir=checkpoint_dir,
216
+ out_dir=out_dir,
217
+ train=train,
218
+ eval=eval,
219
+ data=data,
220
+ )
221
+ training_time = time.perf_counter() - train_time
222
+ output = create_finetuning_performance_report(training_time, token_counts, fabric.device.type)
223
+ fabric.print(output)
224
+
225
+ # Final evaluation
226
+ if eval.final_validation:
227
+ val_loss = validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=len(val_dataloader)))
228
+ metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)}
229
+ fabric.log_dict(metrics)
230
+ fabric.print(f"Final evaluation | val loss: {val_loss.item():.3f} | val ppl: {math.exp(val_loss):.3f}")
231
+
232
+ # Save the final Adapter checkpoint at the end of training
233
+ save_path = out_dir / "final" / "lit_model.pth.adapter"
234
+ save_path.parent.mkdir(parents=True, exist_ok=True)
235
+ save_adapter_checkpoint(fabric, model, save_path)
236
+ if fabric.global_rank == 0:
237
+ # Copy checkpoint files from original checkpoint dir
238
+ copy_config_files(checkpoint_dir, save_path.parent)
239
+ save_hyperparameters(setup, save_path.parent)
240
+ save_prompt_style(data.prompt_style, save_path.parent)
241
+
242
+
243
+ def fit(
244
+ fabric: L.Fabric,
245
+ model: GPT,
246
+ optimizer: torch.optim.Optimizer,
247
+ scheduler: torch.optim.lr_scheduler,
248
+ train_dataloader: DataLoader,
249
+ val_dataloader: DataLoader,
250
+ devices: int,
251
+ checkpoint_dir: Path,
252
+ out_dir: Path,
253
+ train: TrainArgs,
254
+ eval: EvalArgs,
255
+ data: DataModule,
256
+ num_nodes: int = 1,
257
+ ) -> None:
258
+ tokenizer = Tokenizer(checkpoint_dir)
259
+ longest_seq_length, longest_seq_ix = get_longest_seq_length(
260
+ ConcatDataset([train_dataloader.dataset, val_dataloader.dataset])
261
+ )
262
+ model.max_seq_length = min(longest_seq_length, train.max_seq_length or float("inf"))
263
+ fabric.print(
264
+ f"The longest sequence length in the train data is {longest_seq_length}, the model's maximum sequence length is"
265
+ f" {model.max_seq_length} and context length is {model.config.block_size}"
266
+ )
267
+
268
+ if eval.initial_validation:
269
+ val_loss = validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=len(val_dataloader)))
270
+ val_loss = f"{val_loss:.3f}"
271
+ else:
272
+ fabric.print("Verifying settings ...")
273
+ validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=2), verbose=False) # sanity check
274
+ val_loss = "n/a"
275
+
276
+ train_iterator = CycleIterator(train_dataloader)
277
+ throughput = ThroughputMonitor(fabric, window_size=50)
278
+ running_loss = RunningMean(window=train.gradient_accumulation_iters(devices, num_nodes), sync_on_compute=False).to(
279
+ fabric.device
280
+ )
281
+ max_steps = train.max_steps or float("inf")
282
+ step_count = 0
283
+ iter_num = 0
284
+ total_lengths = 0
285
+ total_t0 = time.perf_counter()
286
+
287
+ token_counts = {
288
+ "raw_tokens": torch.tensor(0, device=fabric.device, dtype=torch.long),
289
+ "raw_tokens_plus_prompt_template": torch.tensor(0, device=fabric.device, dtype=torch.long),
290
+ "raw_tokens_plus_prompt_template_and_padding": torch.tensor(0, device=fabric.device, dtype=torch.long),
291
+ }
292
+
293
+ while step_count < max_steps:
294
+ iter_num += 1
295
+ iter_t0 = time.perf_counter()
296
+ batch = next(train_iterator)
297
+ if train_iterator.epoch >= train.epochs:
298
+ break
299
+ input_ids, targets = batch["input_ids"], batch["labels"]
300
+
301
+ is_accumulating = iter_num % train.gradient_accumulation_iters(devices, num_nodes) != 0
302
+ with fabric.no_backward_sync(model, enabled=is_accumulating):
303
+ logits = model(input_ids, lm_head_chunk_size=128)
304
+ # shift the targets such that output n predicts token n+1
305
+ logits[-1] = logits[-1][..., :-1, :]
306
+ loss = chunked_cross_entropy(logits, targets[..., 1:])
307
+ fabric.backward(loss / train.gradient_accumulation_iters(devices, num_nodes))
308
+
309
+ running_loss.update(loss.detach())
310
+
311
+ if not is_accumulating:
312
+ optimizer.step()
313
+ optimizer.zero_grad()
314
+ scheduler.step()
315
+ step_count += 1
316
+
317
+ token_counts["raw_tokens"] += batch["token_counts"]["raw"].sum().item()
318
+ token_counts["raw_tokens_plus_prompt_template"] += (
319
+ batch["token_counts"]["raw_plus_prompt_template"].sum().item()
320
+ )
321
+ token_counts["raw_tokens_plus_prompt_template_and_padding"] += input_ids.numel()
322
+
323
+ total_lengths += input_ids.numel()
324
+ if iter_num % train.log_interval == 0:
325
+ loss = running_loss.compute().item() # expensive device-to-host synchronization
326
+ t1 = time.perf_counter()
327
+ throughput.update(
328
+ time=t1 - total_t0, batches=iter_num, samples=iter_num * train.micro_batch_size, lengths=total_lengths
329
+ )
330
+ throughput.compute_and_log(step=iter_num)
331
+ metrics = {
332
+ "loss": loss,
333
+ "iter": iter_num,
334
+ "step": step_count,
335
+ "epoch": train_iterator.epoch,
336
+ "iter_time": t1 - iter_t0,
337
+ "tokens": token_counts["raw_tokens_plus_prompt_template"],
338
+ "total_tokens": token_counts["raw_tokens_plus_prompt_template"] * fabric.world_size,
339
+ "learning_rate": scheduler.get_last_lr()[0],
340
+ }
341
+ if isinstance(val_loss, torch.Tensor):
342
+ val_loss = f"{val_loss:.3f}"
343
+ fabric.print(
344
+ f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |"
345
+ f" loss train: {metrics['loss']:.3f},"
346
+ f" val: {val_loss} |"
347
+ f" iter time: {metrics['iter_time'] * 1000:.2f} ms"
348
+ f"{' (step)' if not is_accumulating else ''}"
349
+ )
350
+ fabric.log_dict(metrics, step=iter_num)
351
+
352
+ if not is_accumulating and step_count % eval.interval == 0:
353
+ t0 = time.perf_counter()
354
+ val_loss = validate(fabric, model, val_dataloader, eval)
355
+ generate_example(fabric, model, tokenizer, eval, data)
356
+ t1 = time.perf_counter() - t0
357
+
358
+ val_loss_tensor = val_loss.detach().clone().to(fabric.device)
359
+ val_time_tensor = torch.tensor(t1, device=fabric.device, dtype=torch.float32)
360
+
361
+ fabric.all_reduce(val_loss_tensor, reduce_op="mean")
362
+ fabric.all_reduce(val_time_tensor, reduce_op="mean")
363
+
364
+ fabric.print(
365
+ f"iter {iter_num}: val loss {val_loss_tensor.item():.4f}, val time: {val_time_tensor.item() * 1000:.2f} ms"
366
+ )
367
+ metrics = {"val_loss": val_loss_tensor, "val_ppl": math.exp(val_loss_tensor)}
368
+ fabric.log_dict(metrics, step=iter_num)
369
+ fabric.barrier()
370
+
371
+ if train.save_interval is not None and not is_accumulating and step_count % train.save_interval == 0:
372
+ checkpoint_file = out_dir / f"step-{step_count:06d}" / "lit_model.pth.adapter"
373
+ checkpoint_file.parent.mkdir(parents=True, exist_ok=True)
374
+ save_adapter_checkpoint(fabric, model, checkpoint_file)
375
+ if fabric.global_rank == 0:
376
+ copy_config_files(checkpoint_dir, checkpoint_file.parent)
377
+ save_hyperparameters(setup, checkpoint_file.parent)
378
+ save_prompt_style(data.prompt_style, checkpoint_file.parent)
379
+
380
+ total_token_counts = {}
381
+ for key in token_counts:
382
+ total = fabric.all_reduce(token_counts[key], reduce_op="sum")
383
+ total_token_counts[key] = total.item()
384
+
385
+ return total_token_counts
386
+
387
+
388
+ # FSDP has issues with `inference_mode`
389
+ @torch.no_grad()
390
+ def validate(
391
+ fabric: L.Fabric, model: GPT, val_dataloader: DataLoader, eval: EvalArgs, verbose: bool = True
392
+ ) -> torch.Tensor:
393
+ if verbose:
394
+ fabric.print("Validating ...")
395
+ model.eval()
396
+ losses = torch.zeros(min(len(val_dataloader), eval.max_iters))
397
+ for k, batch in enumerate(val_dataloader):
398
+ if k >= eval.max_iters:
399
+ break
400
+ input_ids, targets = batch["input_ids"], batch["labels"]
401
+ logits = model(input_ids)
402
+ losses[k] = chunked_cross_entropy(logits[..., :-1, :], targets[..., 1:], chunk_size=0)
403
+
404
+ val_loss = losses.mean()
405
+ model.train()
406
+ return val_loss
407
+
408
+
409
+ # the adapter "kv cache" cannot be initialized under `inference_mode`
410
+ @torch.no_grad()
411
+ def generate_example(fabric: L.Fabric, model: GPT, tokenizer: Tokenizer, eval: EvalArgs, data: DataModule):
412
+ instruction = select_sft_generate_example(eval, data)
413
+ fabric.print(instruction)
414
+ prompt = data.prompt_style.apply(instruction)
415
+ encoded = tokenizer.encode(prompt, device=fabric.device)
416
+ model.eval()
417
+
418
+ with fabric.init_tensor():
419
+ # do not set `max_seq_length=max_returned_token` because memory is not a concern here
420
+ model.set_kv_cache(batch_size=1)
421
+
422
+ max_returned_tokens = len(encoded) + eval.max_new_tokens
423
+
424
+ if max_returned_tokens < model.max_seq_length:
425
+ with fabric.init_tensor():
426
+ # do not set `max_seq_length=max_returned_token` because memory is not a concern here
427
+ model.set_kv_cache(batch_size=1)
428
+ output = generate(
429
+ model, encoded, max_returned_tokens=max_returned_tokens, temperature=0.8, eos_id=tokenizer.eos_id
430
+ )
431
+ model.clear_kv_cache()
432
+ model.train()
433
+ output = tokenizer.decode(output)
434
+ fabric.print(f"{output}\n")
435
+ else:
436
+ print(
437
+ f"Length of encoded instruction ({len(encoded)}) and eval.max_new_tokens ({eval.max_new_tokens}) "
438
+ f"exceeds model.max_seq_length ({model.max_seq_length}) used for training. Skipping example generation for efficiency. "
439
+ f"The model's supported context size (post-training) is {model.config.block_size}."
440
+ )
441
+
442
+
443
+ def get_lr_scheduler(optimizer, warmup_steps: int, max_steps: int):
444
+ # linear warmup followed by cosine annealing
445
+ scheduler1 = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step: step / warmup_steps)
446
+ scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(max_steps - warmup_steps))
447
+ return torch.optim.lr_scheduler.SequentialLR(optimizer, [scheduler1, scheduler2], milestones=[warmup_steps])
448
+
449
+
450
+ def get_dataloaders(
451
+ fabric: L.Fabric, data: DataModule, tokenizer: Tokenizer, train: TrainArgs
452
+ ) -> Tuple[DataLoader, DataLoader]:
453
+ data.connect(tokenizer=tokenizer, batch_size=train.micro_batch_size, max_seq_length=train.max_seq_length)
454
+ with fabric.rank_zero_first():
455
+ data.prepare_data()
456
+ data.setup()
457
+ train_dataloader = data.train_dataloader()
458
+ val_dataloader = data.val_dataloader()
459
+ train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader)
460
+ return train_dataloader, val_dataloader
461
+
462
+
463
+ def get_longest_seq_length(data: List[Dict]) -> Tuple[int, int]:
464
+ # find out the minimum max_seq_length required during fine-tuning (saves memory!)
465
+ lengths = [len(d["input_ids"]) for d in data]
466
+ longest_seq_length = max(lengths)
467
+ longest_seq_ix = lengths.index(longest_seq_length)
468
+ return longest_seq_length, longest_seq_ix
469
+
470
+
471
+ def save_adapter_checkpoint(fabric: L.Fabric, model: torch.nn.Module, file_path: Path) -> None:
472
+ fabric.print(f"Saving adapter weights to {str(file_path)!r}")
473
+ fabric.save(file_path, {"model": model}, filter={"model": adapter_filter})
474
+
475
+
476
+ def validate_args(train: TrainArgs, eval: EvalArgs) -> None:
477
+ issues = []
478
+ unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings", "lr_warmup_fraction"])]
479
+ for args, names in unsupported:
480
+ for name in names:
481
+ if getattr(args, name) is not None:
482
+ issues.append(f"{__file__} doesn't support the {name!r} argument. This is set in {args}")
483
+ required = [(train, ["epochs"]), (eval, ["max_new_tokens"])]
484
+ for args, names in required:
485
+ for name in names:
486
+ if getattr(args, name) is None:
487
+ issues.append(f"{__file__} requires the {name!r} argument. This is set in {args}")
488
+ if not train.epochs and not train.max_steps:
489
+ issues.append(f"{__file__} requires either epochs or max_steps to be set. This is set in {train}")
490
+ if issues:
491
+ raise ValueError("\n".join(issues))
litgpt/finetune/adapter_v2.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import dataclasses
3
+ import math
4
+ import os
5
+ import time
6
+ import warnings
7
+ from pathlib import Path
8
+ from pprint import pprint
9
+ from typing import Dict, List, Literal, Optional, Tuple, Union
10
+
11
+ import lightning as L
12
+ import torch
13
+ from lightning.fabric.plugins import BitsandbytesPrecision
14
+ from lightning.fabric.strategies import FSDPStrategy
15
+ from lightning.fabric.utilities import ThroughputMonitor
16
+ from lightning_utilities.core.imports import RequirementCache
17
+ from torch.utils.data import ConcatDataset, DataLoader
18
+ from torchmetrics import RunningMean
19
+
20
+ from litgpt.adapter_v2 import GPT, Block, Config, adapter_filter, mark_only_adapter_v2_as_trainable
21
+ from litgpt.args import EvalArgs, LogArgs, TrainArgs
22
+ from litgpt.data import Alpaca, DataModule
23
+ from litgpt.generate.base import generate
24
+ from litgpt.prompts import save_prompt_style
25
+ from litgpt.tokenizer import Tokenizer
26
+ from litgpt.utils import (
27
+ CycleIterator,
28
+ auto_download_checkpoint,
29
+ check_nvlink_connectivity,
30
+ check_valid_checkpoint_dir,
31
+ choose_logger,
32
+ chunked_cross_entropy,
33
+ copy_config_files,
34
+ create_finetuning_performance_report,
35
+ get_default_supported_precision,
36
+ init_out_dir,
37
+ instantiate_bnb_optimizer,
38
+ instantiate_torch_optimizer,
39
+ load_checkpoint,
40
+ load_checkpoint_update,
41
+ num_parameters,
42
+ parse_devices,
43
+ save_hyperparameters,
44
+ select_sft_generate_example,
45
+ )
46
+
47
+
48
+ def setup(
49
+ checkpoint_dir: Path,
50
+ out_dir: Path = Path("out/finetune/adapter-v2"),
51
+ precision: Optional[str] = None,
52
+ quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8-training"]] = None,
53
+ devices: Union[int, str] = 1,
54
+ num_nodes: int = 1,
55
+ resume: Optional[bool] = False,
56
+ data: Optional[DataModule] = None,
57
+ train: TrainArgs = TrainArgs(
58
+ save_interval=1000,
59
+ log_interval=1,
60
+ global_batch_size=16,
61
+ micro_batch_size=1,
62
+ lr_warmup_steps=100,
63
+ epochs=5,
64
+ max_seq_length=None,
65
+ ),
66
+ eval: EvalArgs = EvalArgs(interval=100, max_new_tokens=100, max_iters=100),
67
+ log: LogArgs = LogArgs(),
68
+ optimizer: Union[str, Dict] = "AdamW",
69
+ logger_name: Literal["wandb", "tensorboard", "csv", "mlflow"] = "csv",
70
+ seed: int = 1337,
71
+ access_token: Optional[str] = None,
72
+ ) -> None:
73
+ """Finetune a model using the Adapter V2 method.
74
+
75
+ Arguments:
76
+ checkpoint_dir: The path to the base model's checkpoint directory to load for finetuning.
77
+ out_dir: Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
78
+ /teamspace/jobs/<job-name>/share.
79
+ precision: The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true".
80
+ quantize: If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information.
81
+ devices: How many devices/GPUs to use.
82
+ num_nodes: How many nodes the code is being run on.
83
+ data: Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``.
84
+ train: Training-related arguments. See ``litgpt.args.TrainArgs`` for details.
85
+ eval: Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details.
86
+ optimizer: An optimizer name (such as "AdamW") or config.
87
+ logger_name: The name of the logger to send metrics to.
88
+ seed: The random seed to use for reproducibility.
89
+ access_token: Optional API token to access models with restrictions.
90
+ """
91
+ checkpoint_dir = auto_download_checkpoint(model_name=checkpoint_dir, access_token=access_token)
92
+ pprint(locals())
93
+ data = Alpaca() if data is None else data
94
+ devices = parse_devices(devices)
95
+ out_dir = init_out_dir(out_dir)
96
+
97
+ check_valid_checkpoint_dir(checkpoint_dir)
98
+ config = Config.from_file(checkpoint_dir / "model_config.yaml")
99
+
100
+ precision = precision or get_default_supported_precision(training=True)
101
+ logger = choose_logger(
102
+ logger_name,
103
+ out_dir,
104
+ name=f"finetune-{config.name}",
105
+ log_interval=train.log_interval,
106
+ log_args=dataclasses.asdict(log),
107
+ )
108
+
109
+ plugins = None
110
+ if quantize is not None and quantize.startswith("bnb."):
111
+ if "mixed" in precision:
112
+ raise ValueError("Quantization and mixed precision is not supported.")
113
+ if RequirementCache("bitsandbytes != 0.42.0"):
114
+ warnings.warn(
115
+ "LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
116
+ )
117
+ dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
118
+ plugins = BitsandbytesPrecision(quantize[4:], dtype)
119
+ precision = None
120
+
121
+ if devices * num_nodes > 1:
122
+ if quantize:
123
+ raise NotImplementedError(
124
+ "Quantization is currently not supported for multi-GPU training. Please set devices=1 and num_nodes=1"
125
+ " when using the --quantize flag."
126
+ )
127
+ strategy = FSDPStrategy(
128
+ auto_wrap_policy={Block},
129
+ activation_checkpointing_policy={Block},
130
+ state_dict_type="full",
131
+ limit_all_gathers=True,
132
+ cpu_offload=False,
133
+ )
134
+ else:
135
+ strategy = "auto"
136
+
137
+ fabric = L.Fabric(
138
+ devices=devices,
139
+ num_nodes=num_nodes,
140
+ strategy=strategy,
141
+ precision=precision,
142
+ loggers=logger,
143
+ plugins=plugins,
144
+ )
145
+
146
+ if torch.cuda.is_available() and devices > 1:
147
+ check_nvlink_connectivity(fabric)
148
+
149
+ fabric.launch(main, devices, seed, config, data, resume, checkpoint_dir, out_dir, train, eval, optimizer, num_nodes)
150
+
151
+
152
+ def main(
153
+ fabric: L.Fabric,
154
+ devices: int,
155
+ seed: int,
156
+ config: Config,
157
+ data: DataModule,
158
+ resume: bool,
159
+ checkpoint_dir: Path,
160
+ out_dir: Path,
161
+ train: TrainArgs,
162
+ eval: EvalArgs,
163
+ optimizer: Union[str, Dict],
164
+ num_nodes: int = 1,
165
+ ) -> None:
166
+ validate_args(train, eval)
167
+
168
+ tokenizer = Tokenizer(checkpoint_dir)
169
+ train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train)
170
+ steps_per_epoch = len(train_dataloader) // train.gradient_accumulation_iters(devices, num_nodes)
171
+ lr_max_steps = min(train.epochs * steps_per_epoch, (train.max_steps or float("inf")))
172
+
173
+ fabric.seed_everything(seed) # same seed for every process to init model (FSDP)
174
+
175
+ if fabric.global_rank == 0:
176
+ os.makedirs(out_dir, exist_ok=True)
177
+
178
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
179
+ with fabric.init_module(empty_init=(fabric.world_size > 1)):
180
+ model = GPT(config)
181
+ mark_only_adapter_v2_as_trainable(model)
182
+
183
+ fabric.print(f"Number of trainable parameters: {num_parameters(model, requires_grad=True):,}")
184
+ fabric.print(f"Number of non-trainable parameters: {num_parameters(model, requires_grad=False):,}")
185
+
186
+ model = fabric.setup_module(model)
187
+ if isinstance(fabric.strategy.precision, BitsandbytesPrecision):
188
+ optimizer = instantiate_bnb_optimizer(optimizer, model.parameters())
189
+
190
+ from bitsandbytes.nn import StableEmbedding
191
+
192
+ old_embedding = model.transformer.wte
193
+ model.transformer.wte = StableEmbedding(old_embedding.num_embeddings, old_embedding.embedding_dim)
194
+ with torch.no_grad():
195
+ model.transformer.wte.weight.copy_(old_embedding.weight)
196
+ model.transformer.wte = model.transformer.wte.to(
197
+ device=old_embedding.weight.device, dtype=old_embedding.weight.dtype
198
+ )
199
+ else:
200
+ optimizer = instantiate_torch_optimizer(optimizer, model.parameters())
201
+
202
+ optimizer = fabric.setup_optimizers(optimizer)
203
+ scheduler = get_lr_scheduler(optimizer, warmup_steps=train.lr_warmup_steps, max_steps=lr_max_steps)
204
+ if resume:
205
+ # Finding last trace of adapter training
206
+ try:
207
+ resume = max(out_dir.rglob("step-*/*.pth.adapter_v2"), key=(lambda p: int(p.parent.name.split("-")[1])))
208
+ fabric.print(f"Resuming training from {resume}")
209
+ load_checkpoint_update(fabric, resume, model, checkpoint_path, strict=False)
210
+ resume = True
211
+ except ValueError:
212
+ fabric.print("No previous adapter found. Finetune from start.")
213
+ resume = False
214
+ load_checkpoint(fabric, model, checkpoint_path, strict=False)
215
+ else:
216
+ # strict=False because missing keys due to Adapter weights not contained in state dict
217
+ load_checkpoint(fabric, model, checkpoint_path, strict=False)
218
+
219
+ mark_only_adapter_v2_as_trainable(model)
220
+
221
+ train_time = time.perf_counter()
222
+ token_counts = fit(
223
+ fabric=fabric,
224
+ model=model,
225
+ optimizer=optimizer,
226
+ scheduler=scheduler,
227
+ train_dataloader=train_dataloader,
228
+ val_dataloader=val_dataloader,
229
+ devices=devices,
230
+ resume=resume,
231
+ num_nodes=num_nodes,
232
+ checkpoint_dir=checkpoint_dir,
233
+ out_dir=out_dir,
234
+ train=train,
235
+ eval=eval,
236
+ data=data,
237
+ )
238
+ training_time = time.perf_counter() - train_time
239
+ output = create_finetuning_performance_report(training_time, token_counts, fabric.device.type)
240
+ fabric.print(output)
241
+
242
+ # Final evaluation
243
+ if eval.final_validation:
244
+ val_loss = validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=len(val_dataloader)))
245
+ metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)}
246
+ fabric.log_dict(metrics)
247
+ fabric.print(f"Final evaluation | val loss: {val_loss.item():.3f} | val ppl: {math.exp(val_loss):.3f}")
248
+
249
+ # Save the final Adapter checkpoint at the end of training
250
+ save_path = out_dir / "final" / "lit_model.pth.adapter_v2"
251
+ save_path.parent.mkdir(parents=True, exist_ok=True)
252
+ save_adapter_v2_checkpoint(fabric, model, save_path)
253
+ if fabric.global_rank == 0:
254
+ # Copy checkpoint files from original checkpoint dir
255
+ copy_config_files(checkpoint_dir, save_path.parent)
256
+ save_hyperparameters(setup, save_path.parent)
257
+ save_prompt_style(data.prompt_style, save_path.parent)
258
+
259
+
260
+ def fit(
261
+ fabric: L.Fabric,
262
+ model: GPT,
263
+ optimizer: torch.optim.Optimizer,
264
+ scheduler: torch.optim.lr_scheduler,
265
+ train_dataloader: DataLoader,
266
+ val_dataloader: DataLoader,
267
+ devices: int,
268
+ resume: bool,
269
+ checkpoint_dir: Path,
270
+ out_dir: Path,
271
+ train: TrainArgs,
272
+ eval: EvalArgs,
273
+ data: DataModule,
274
+ num_nodes: int = 1,
275
+ ) -> None:
276
+ tokenizer = Tokenizer(checkpoint_dir)
277
+ longest_seq_length, longest_seq_ix = get_longest_seq_length(
278
+ ConcatDataset([train_dataloader.dataset, val_dataloader.dataset])
279
+ )
280
+ model.max_seq_length = min(longest_seq_length, train.max_seq_length or float("inf"))
281
+ fabric.print(
282
+ f"The longest sequence length in the train data is {longest_seq_length}, the model's maximum sequence length is"
283
+ f" {model.max_seq_length} and context length is {model.config.block_size}"
284
+ )
285
+
286
+ if eval.initial_validation:
287
+ val_loss = validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=len(val_dataloader)))
288
+ val_loss = f"{val_loss:.3f}"
289
+ else:
290
+ fabric.print("Verifying settings ...")
291
+ validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=2), verbose=False) # sanity check
292
+ val_loss = "n/a"
293
+
294
+ train_iterator = CycleIterator(train_dataloader)
295
+ throughput = ThroughputMonitor(fabric, window_size=50)
296
+ running_loss = RunningMean(window=train.gradient_accumulation_iters(devices, num_nodes), sync_on_compute=False).to(
297
+ fabric.device
298
+ )
299
+ max_steps = train.max_steps or float("inf")
300
+ step_count = 0
301
+ iter_num = 0
302
+ total_lengths = 0
303
+ total_t0 = time.perf_counter()
304
+
305
+ token_counts = {
306
+ "raw_tokens": torch.tensor(0, device=fabric.device, dtype=torch.long),
307
+ "raw_tokens_plus_prompt_template": torch.tensor(0, device=fabric.device, dtype=torch.long),
308
+ "raw_tokens_plus_prompt_template_and_padding": torch.tensor(0, device=fabric.device, dtype=torch.long),
309
+ }
310
+
311
+ if not resume:
312
+ try:
313
+ iter_match = max(out_dir.rglob("step-*/*.pth.adapter_v2"), key=lambda p: int(p.parent.name.split("-")[1]))
314
+ step_count = int(iter_match.parent.name.split("-")[1]) if iter_match else 0
315
+ except ValueError:
316
+ step_count = 0
317
+
318
+ fabric.print(f"Starting at step count {step_count}")
319
+ while step_count < max_steps and train_iterator.epoch < train.epochs:
320
+ iter_num += 1
321
+ iter_t0 = time.perf_counter()
322
+ batch = next(train_iterator)
323
+ if train_iterator.epoch >= train.epochs:
324
+ break
325
+
326
+ input_ids, targets = batch["input_ids"], batch["labels"]
327
+
328
+ is_accumulating = iter_num % train.gradient_accumulation_iters(devices, num_nodes) != 0
329
+ with fabric.no_backward_sync(model, enabled=is_accumulating):
330
+ logits = model(input_ids, lm_head_chunk_size=128)
331
+ # shift the targets such that output n predicts token n+1
332
+ logits[-1] = logits[-1][..., :-1, :]
333
+ loss = chunked_cross_entropy(logits, targets[..., 1:])
334
+ fabric.backward(loss / train.gradient_accumulation_iters(devices, num_nodes))
335
+
336
+ running_loss.update(loss.detach())
337
+
338
+ if not is_accumulating:
339
+ optimizer.step()
340
+ optimizer.zero_grad()
341
+ scheduler.step()
342
+ step_count += 1
343
+
344
+ token_counts["raw_tokens"] += batch["token_counts"]["raw"].sum().item()
345
+ token_counts["raw_tokens_plus_prompt_template"] += (
346
+ batch["token_counts"]["raw_plus_prompt_template"].sum().item()
347
+ )
348
+ token_counts["raw_tokens_plus_prompt_template_and_padding"] += input_ids.numel()
349
+
350
+ total_lengths += input_ids.numel()
351
+ if iter_num % train.log_interval == 0:
352
+ loss = running_loss.compute().item() # expensive device-to-host synchronization
353
+ t1 = time.perf_counter()
354
+ throughput.update(
355
+ time=t1 - total_t0, batches=iter_num, samples=iter_num * train.micro_batch_size, lengths=total_lengths
356
+ )
357
+ throughput.compute_and_log(step=iter_num)
358
+ metrics = {
359
+ "loss": loss,
360
+ "iter": iter_num,
361
+ "step": step_count,
362
+ "epoch": train_iterator.epoch,
363
+ "iter_time": t1 - iter_t0,
364
+ "tokens": token_counts["raw_tokens_plus_prompt_template"],
365
+ "total_tokens": token_counts["raw_tokens_plus_prompt_template"] * fabric.world_size,
366
+ "learning_rate": scheduler.get_last_lr()[0],
367
+ }
368
+ if isinstance(val_loss, torch.Tensor):
369
+ val_loss = f"{val_loss:.3f}"
370
+ fabric.print(
371
+ f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |"
372
+ f" loss train: {metrics['loss']:.3f},"
373
+ f" val: {val_loss} |"
374
+ f" iter time: {metrics['iter_time'] * 1000:.2f} ms"
375
+ f"{' (step)' if not is_accumulating else ''}"
376
+ )
377
+ fabric.log_dict(metrics, step=iter_num)
378
+
379
+ if not is_accumulating and step_count % eval.interval == 0:
380
+ t0 = time.perf_counter()
381
+ val_loss = validate(fabric, model, val_dataloader, eval)
382
+ generate_example(fabric, model, tokenizer, eval, data)
383
+ t1 = time.perf_counter() - t0
384
+
385
+ val_loss_tensor = val_loss.detach().clone().to(fabric.device)
386
+ val_time_tensor = torch.tensor(t1, device=fabric.device, dtype=torch.float32)
387
+
388
+ fabric.all_reduce(val_loss_tensor, reduce_op="mean")
389
+ fabric.all_reduce(val_time_tensor, reduce_op="mean")
390
+
391
+ fabric.print(
392
+ f"iter {iter_num}: val loss {val_loss_tensor.item():.4f}, val time: {val_time_tensor.item() * 1000:.2f} ms"
393
+ )
394
+ metrics = {"val_loss": val_loss_tensor, "val_ppl": math.exp(val_loss_tensor)}
395
+ fabric.log_dict(metrics, step=iter_num)
396
+ fabric.barrier()
397
+
398
+ if train.save_interval is not None and not is_accumulating and step_count % train.save_interval == 0:
399
+ checkpoint_file = out_dir / f"step-{step_count:06d}" / "lit_model.pth.adapter_v2"
400
+ checkpoint_file.parent.mkdir(parents=True, exist_ok=True)
401
+ save_adapter_v2_checkpoint(fabric, model, checkpoint_file)
402
+ if fabric.global_rank == 0:
403
+ copy_config_files(checkpoint_dir, checkpoint_file.parent)
404
+ save_hyperparameters(setup, checkpoint_file.parent)
405
+ save_prompt_style(data.prompt_style, checkpoint_file.parent)
406
+
407
+ total_token_counts = {}
408
+ for key in token_counts:
409
+ total = fabric.all_reduce(token_counts[key], reduce_op="sum")
410
+ total_token_counts[key] = total.item()
411
+
412
+ return total_token_counts
413
+
414
+
415
+ # FSDP has issues with `inference_mode`
416
+ @torch.no_grad()
417
+ def validate(
418
+ fabric: L.Fabric, model: GPT, val_dataloader: DataLoader, eval: EvalArgs, verbose: bool = True
419
+ ) -> torch.Tensor:
420
+ if verbose:
421
+ fabric.print("Validating ...")
422
+ model.eval()
423
+ losses = torch.zeros(min(len(val_dataloader), eval.max_iters))
424
+ for k, batch in enumerate(val_dataloader):
425
+ if k >= eval.max_iters:
426
+ break
427
+ input_ids, targets = batch["input_ids"], batch["labels"]
428
+ logits = model(input_ids)
429
+ losses[k] = chunked_cross_entropy(logits[..., :-1, :], targets[..., 1:], chunk_size=0)
430
+
431
+ val_loss = losses.mean()
432
+ model.train()
433
+ return val_loss
434
+
435
+
436
+ # the adapter "kv cache" cannot be initialized under `inference_mode`
437
+ @torch.no_grad()
438
+ def generate_example(fabric: L.Fabric, model: GPT, tokenizer: Tokenizer, eval: EvalArgs, data: DataModule):
439
+ instruction = select_sft_generate_example(eval, data)
440
+ fabric.print(instruction)
441
+ prompt = data.prompt_style.apply(instruction)
442
+ encoded = tokenizer.encode(prompt, device=fabric.device)
443
+ model.eval()
444
+
445
+ max_returned_tokens = len(encoded) + eval.max_new_tokens
446
+
447
+ if max_returned_tokens < model.max_seq_length:
448
+ with fabric.init_tensor():
449
+ # do not set `max_seq_length=max_returned_token` because memory is not a concern here
450
+ model.set_kv_cache(batch_size=1)
451
+ output = generate(
452
+ model, encoded, max_returned_tokens=max_returned_tokens, temperature=0.8, eos_id=tokenizer.eos_id
453
+ )
454
+ model.clear_kv_cache()
455
+ model.train()
456
+ output = tokenizer.decode(output)
457
+ fabric.print(f"{output}\n")
458
+ else:
459
+ print(
460
+ f"Length of encoded instruction ({len(encoded)}) and eval.max_new_tokens ({eval.max_new_tokens}) "
461
+ f"exceeds model.max_seq_length ({model.max_seq_length}) used for training. Skipping example generation for efficiency. "
462
+ f"The model's supported context size (post-training) is {model.config.block_size}."
463
+ )
464
+
465
+
466
+ def get_lr_scheduler(optimizer, warmup_steps: int, max_steps: int):
467
+ # linear warmup followed by cosine annealing
468
+ scheduler1 = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step: step / warmup_steps)
469
+ scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(max_steps - warmup_steps))
470
+ return torch.optim.lr_scheduler.SequentialLR(optimizer, [scheduler1, scheduler2], milestones=[warmup_steps])
471
+
472
+
473
+ def get_dataloaders(
474
+ fabric: L.Fabric, data: DataModule, tokenizer: Tokenizer, train: TrainArgs
475
+ ) -> Tuple[DataLoader, DataLoader]:
476
+ data.connect(tokenizer=tokenizer, batch_size=train.micro_batch_size, max_seq_length=train.max_seq_length)
477
+ with fabric.rank_zero_first():
478
+ data.prepare_data()
479
+ data.setup()
480
+ train_dataloader = data.train_dataloader()
481
+ val_dataloader = data.val_dataloader()
482
+ train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader)
483
+ return train_dataloader, val_dataloader
484
+
485
+
486
+ def get_longest_seq_length(data: List[Dict]) -> Tuple[int, int]:
487
+ # find out the minimum max_seq_length required during fine-tuning (saves memory!)
488
+ lengths = [len(d["input_ids"]) for d in data]
489
+ longest_seq_length = max(lengths)
490
+ longest_seq_ix = lengths.index(longest_seq_length)
491
+ return longest_seq_length, longest_seq_ix
492
+
493
+
494
+ def save_adapter_v2_checkpoint(fabric: L.Fabric, model: torch.nn.Module, file_path: Path) -> None:
495
+ fabric.print(f"Saving adapter v2 weights to {str(file_path)!r}")
496
+ fabric.save(file_path, {"model": model}, filter={"model": adapter_filter})
497
+
498
+
499
+ def validate_args(train: TrainArgs, eval: EvalArgs) -> None:
500
+ issues = []
501
+ unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings", "lr_warmup_fraction"])]
502
+ for args, names in unsupported:
503
+ for name in names:
504
+ if getattr(args, name) is not None:
505
+ issues.append(f"{__file__} doesn't support the {name!r} argument. This is set in {args}")
506
+ required = [(train, ["epochs"]), (eval, ["max_new_tokens"])]
507
+ for args, names in required:
508
+ for name in names:
509
+ if getattr(args, name) is None:
510
+ issues.append(f"{__file__} requires the {name!r} argument. This is set in {args}")
511
+ if not train.epochs and not train.max_steps:
512
+ issues.append(f"{__file__} requires either epochs or max_steps to be set. This is set in {train}")
513
+ if issues:
514
+ raise ValueError("\n".join(issues))
litgpt/finetune/full.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import dataclasses
3
+ import math
4
+ import os
5
+ import time
6
+ from pathlib import Path
7
+ from pprint import pprint
8
+ from typing import Dict, List, Literal, Optional, Tuple, Union
9
+
10
+ import lightning as L
11
+ import torch
12
+ from lightning.fabric.strategies import FSDPStrategy
13
+ from torch.utils.data import ConcatDataset, DataLoader
14
+ from torchmetrics import RunningMean
15
+
16
+ from litgpt.args import EvalArgs, LogArgs, TrainArgs
17
+ from litgpt.data import Alpaca, DataModule
18
+ from litgpt.generate.base import generate
19
+ from litgpt.model import GPT, Block, Config
20
+ from litgpt.prompts import save_prompt_style
21
+ from litgpt.tokenizer import Tokenizer
22
+ from litgpt.utils import (
23
+ CycleIterator,
24
+ auto_download_checkpoint,
25
+ check_nvlink_connectivity,
26
+ check_valid_checkpoint_dir,
27
+ choose_logger,
28
+ chunked_cross_entropy,
29
+ copy_config_files,
30
+ create_finetuning_performance_report,
31
+ find_resume_path,
32
+ get_default_supported_precision,
33
+ init_out_dir,
34
+ instantiate_torch_optimizer,
35
+ load_checkpoint,
36
+ num_parameters,
37
+ parse_devices,
38
+ save_hyperparameters,
39
+ select_sft_generate_example,
40
+ )
41
+
42
+
43
+ def setup(
44
+ checkpoint_dir: Path,
45
+ out_dir: Path = Path("out/finetune/full"),
46
+ precision: Optional[str] = None,
47
+ devices: Union[int, str] = 1,
48
+ num_nodes: int = 1,
49
+ resume: Union[bool, Literal["auto"], Path] = False,
50
+ data: Optional[DataModule] = None,
51
+ train: TrainArgs = TrainArgs(
52
+ save_interval=1000,
53
+ log_interval=1,
54
+ global_batch_size=16,
55
+ micro_batch_size=1,
56
+ lr_warmup_steps=100,
57
+ epochs=5,
58
+ max_seq_length=None,
59
+ ),
60
+ eval: EvalArgs = EvalArgs(interval=600, max_new_tokens=100, max_iters=100),
61
+ log: LogArgs = LogArgs(),
62
+ optimizer: Union[str, Dict] = "AdamW",
63
+ logger_name: Literal["wandb", "tensorboard", "csv", "mlflow"] = "csv",
64
+ seed: int = 1337,
65
+ access_token: Optional[str] = None,
66
+ ) -> None:
67
+ """Finetune a model.
68
+
69
+ Arguments:
70
+ checkpoint_dir: The path to the base model's checkpoint directory to load for finetuning.
71
+ out_dir: Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
72
+ /teamspace/jobs/<job-name>/share.
73
+ precision: The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true".
74
+ devices: How many devices/GPUs to use
75
+ num_nodes: How many nodes the code is being run on.
76
+ resume: Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
77
+ from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
78
+ ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
79
+ data: Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``.
80
+ train: Training-related arguments. See ``litgpt.args.TrainArgs`` for details.
81
+ eval: Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details.
82
+ optimizer: An optimizer name (such as "AdamW") or config.
83
+ logger_name: The name of the logger to send metrics to.
84
+ seed: The random seed to use for reproducibility.
85
+ access_token: Optional API token to access models with restrictions.
86
+ """
87
+ checkpoint_dir = auto_download_checkpoint(model_name=checkpoint_dir, access_token=access_token)
88
+ pprint(locals())
89
+ data = Alpaca() if data is None else data
90
+ devices = parse_devices(devices)
91
+ out_dir = init_out_dir(out_dir)
92
+
93
+ check_valid_checkpoint_dir(checkpoint_dir)
94
+ config = Config.from_file(checkpoint_dir / "model_config.yaml")
95
+
96
+ precision = precision or get_default_supported_precision(training=True)
97
+ logger = choose_logger(
98
+ logger_name,
99
+ out_dir,
100
+ name=f"finetune-{config.name}",
101
+ resume=bool(resume),
102
+ log_interval=train.log_interval,
103
+ log_args=dataclasses.asdict(log),
104
+ )
105
+
106
+ if devices * num_nodes > 1:
107
+ strategy = FSDPStrategy(
108
+ auto_wrap_policy={Block},
109
+ activation_checkpointing_policy={Block},
110
+ state_dict_type="full",
111
+ limit_all_gathers=True,
112
+ cpu_offload=False,
113
+ )
114
+ else:
115
+ strategy = "auto"
116
+
117
+ fabric = L.Fabric(devices=devices, num_nodes=num_nodes, strategy=strategy, precision=precision, loggers=logger)
118
+
119
+ if torch.cuda.is_available() and devices > 1:
120
+ check_nvlink_connectivity(fabric)
121
+
122
+ fabric.launch(main, devices, resume, seed, config, data, checkpoint_dir, out_dir, train, eval, optimizer, num_nodes)
123
+
124
+
125
+ def main(
126
+ fabric: L.Fabric,
127
+ devices: int,
128
+ resume: Union[bool, Literal["auto"], Path],
129
+ seed: int,
130
+ config: Config,
131
+ data: DataModule,
132
+ checkpoint_dir: Path,
133
+ out_dir: Path,
134
+ train: TrainArgs,
135
+ eval: EvalArgs,
136
+ optimizer: Union[str, Dict],
137
+ num_nodes: int = 1,
138
+ ) -> None:
139
+ validate_args(train, eval)
140
+
141
+ tokenizer = Tokenizer(checkpoint_dir)
142
+ train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train)
143
+ steps_per_epoch = len(train_dataloader) // train.gradient_accumulation_iters(devices, num_nodes)
144
+ lr_max_steps = min(train.epochs * steps_per_epoch, (train.max_steps or float("inf")))
145
+
146
+ fabric.seed_everything(seed) # same seed for every process to init model (FSDP)
147
+
148
+ if fabric.global_rank == 0:
149
+ os.makedirs(out_dir, exist_ok=True)
150
+
151
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
152
+ with fabric.init_module(empty_init=(fabric.world_size > 1)):
153
+ model = GPT(config)
154
+
155
+ fabric.print(f"Number of trainable parameters: {num_parameters(model, requires_grad=True):,}")
156
+
157
+ model = fabric.setup(model)
158
+
159
+ optimizer = instantiate_torch_optimizer(optimizer, model.parameters())
160
+ optimizer = fabric.setup_optimizers(optimizer)
161
+ scheduler = get_lr_scheduler(optimizer, warmup_steps=train.lr_warmup_steps, max_steps=lr_max_steps)
162
+ state = {"model": model, "optimizer": optimizer, "scheduler": scheduler, "iter_num": 0, "step_count": 0}
163
+
164
+ resume = find_resume_path(resume, out_dir)
165
+ if resume:
166
+ fabric.print(f"Resuming training from {resume}")
167
+ fabric.load(resume, state)
168
+ else:
169
+ load_checkpoint(fabric, state["model"], checkpoint_path)
170
+
171
+ train_time = time.perf_counter()
172
+ token_counts = fit(
173
+ fabric=fabric,
174
+ state=state,
175
+ train_dataloader=train_dataloader,
176
+ val_dataloader=val_dataloader,
177
+ devices=devices,
178
+ num_nodes=num_nodes,
179
+ resume=resume,
180
+ checkpoint_dir=checkpoint_dir,
181
+ out_dir=out_dir,
182
+ train=train,
183
+ eval=eval,
184
+ data=data,
185
+ )
186
+ training_time = time.perf_counter() - train_time
187
+ output = create_finetuning_performance_report(training_time, token_counts, fabric.device.type)
188
+ fabric.print(output)
189
+
190
+ # Final evaluation
191
+ if eval.final_validation:
192
+ val_loss = validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=len(val_dataloader)))
193
+ metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)}
194
+ fabric.log_dict(metrics, step=state["iter_num"])
195
+ fabric.print(f"Final evaluation | val loss: {val_loss.item():.3f} | val ppl: {math.exp(val_loss):.3f}")
196
+
197
+ # Save the final checkpoint at the end of training
198
+ save_path = out_dir / "final" / "lit_model.pth"
199
+ save_path.parent.mkdir(parents=True, exist_ok=True)
200
+ fabric.save(save_path, {"model": state["model"]})
201
+ if fabric.global_rank == 0:
202
+ # Copy checkpoint files from original checkpoint dir
203
+ copy_config_files(checkpoint_dir, save_path.parent)
204
+ save_hyperparameters(setup, save_path.parent)
205
+ save_prompt_style(data.prompt_style, save_path.parent)
206
+
207
+
208
+ def fit(
209
+ fabric: L.Fabric,
210
+ state: Dict,
211
+ train_dataloader: DataLoader,
212
+ val_dataloader: DataLoader,
213
+ devices: int,
214
+ resume: Union[bool, Literal["auto"], Path],
215
+ checkpoint_dir: Path,
216
+ out_dir: Path,
217
+ train: TrainArgs,
218
+ eval: EvalArgs,
219
+ data: DataModule,
220
+ num_nodes: int = 1,
221
+ ) -> None:
222
+ model = state["model"]
223
+ optimizer = state["optimizer"]
224
+ scheduler = state["scheduler"]
225
+ tokenizer = Tokenizer(checkpoint_dir)
226
+ longest_seq_length, longest_seq_ix = get_longest_seq_length(
227
+ ConcatDataset([train_dataloader.dataset, val_dataloader.dataset])
228
+ )
229
+ model.max_seq_length = min(longest_seq_length, train.max_seq_length or float("inf"))
230
+ fabric.print(
231
+ f"The longest sequence length in the train data is {longest_seq_length}, the model's maximum sequence length is"
232
+ f" {model.max_seq_length} and context length is {model.config.block_size}"
233
+ )
234
+
235
+ token_counts = {
236
+ "raw_tokens": torch.tensor(0, device=fabric.device, dtype=torch.long),
237
+ "raw_tokens_plus_prompt_template": torch.tensor(0, device=fabric.device, dtype=torch.long),
238
+ "raw_tokens_plus_prompt_template_and_padding": torch.tensor(0, device=fabric.device, dtype=torch.long),
239
+ }
240
+
241
+ if eval.initial_validation:
242
+ val_loss = validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=len(val_dataloader)))
243
+ val_loss = f"{val_loss:.3f}"
244
+ else:
245
+ fabric.print("Verifying settings ...")
246
+ validate(fabric, model, val_dataloader, dataclasses.replace(eval, max_iters=2), verbose=False) # sanity check
247
+ val_loss = "n/a"
248
+
249
+ initial_iter = state["iter_num"]
250
+ max_steps = train.max_steps or float("inf")
251
+ train_iterator = CycleIterator(train_dataloader)
252
+
253
+ # resume data loader state by fast-forwarding through all seen batches
254
+ if resume:
255
+ resume_t0 = time.perf_counter()
256
+ for resume_iter in range(initial_iter):
257
+ next(train_iterator)
258
+ if resume_iter % 1000 == 0:
259
+ fabric.print(f"Resuming dataset: {resume_iter} / {initial_iter}")
260
+ fabric.barrier()
261
+ fabric.print(
262
+ f"Resuming data loader finished. Took {time.perf_counter() - resume_t0:.1f} seconds to reach iteration"
263
+ f" {initial_iter}."
264
+ )
265
+
266
+ running_loss = RunningMean(window=train.gradient_accumulation_iters(devices, num_nodes), sync_on_compute=False).to(
267
+ fabric.device
268
+ )
269
+ fabric.barrier()
270
+
271
+ while state["step_count"] < max_steps:
272
+ state["iter_num"] += 1
273
+ iter_t0 = time.perf_counter()
274
+ batch = next(train_iterator)
275
+ if train_iterator.epoch >= train.epochs:
276
+ break
277
+ input_ids, targets = batch["input_ids"], batch["labels"]
278
+
279
+ is_accumulating = state["iter_num"] % train.gradient_accumulation_iters(devices, num_nodes) != 0
280
+ with fabric.no_backward_sync(model, enabled=is_accumulating):
281
+ logits = model(input_ids)
282
+ # shift the targets such that output n predicts token n+1
283
+ loss = chunked_cross_entropy(logits[..., :-1, :], targets[..., 1:])
284
+ fabric.backward(loss / train.gradient_accumulation_iters(devices, num_nodes))
285
+
286
+ running_loss.update(loss.detach())
287
+
288
+ if not is_accumulating:
289
+ optimizer.step()
290
+ optimizer.zero_grad()
291
+ scheduler.step()
292
+ state["step_count"] += 1
293
+
294
+ token_counts["raw_tokens"] += batch["token_counts"]["raw"].sum().item()
295
+ token_counts["raw_tokens_plus_prompt_template"] += (
296
+ batch["token_counts"]["raw_plus_prompt_template"].sum().item()
297
+ )
298
+ token_counts["raw_tokens_plus_prompt_template_and_padding"] += input_ids.numel()
299
+
300
+ if state["iter_num"] % train.log_interval == 0:
301
+ loss = running_loss.compute().item() # expensive device-to-host synchronization
302
+ t1 = time.perf_counter()
303
+ metrics = {
304
+ "loss": loss,
305
+ "iter": state["iter_num"],
306
+ "step": state["step_count"],
307
+ "epoch": train_iterator.epoch,
308
+ "iter_time": t1 - iter_t0,
309
+ "tokens": token_counts["raw_tokens_plus_prompt_template"],
310
+ "total_tokens": token_counts["raw_tokens_plus_prompt_template"] * fabric.world_size,
311
+ "learning_rate": scheduler.get_last_lr()[0],
312
+ }
313
+ if isinstance(val_loss, torch.Tensor):
314
+ val_loss = f"{val_loss:.3f}"
315
+ fabric.print(
316
+ f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |"
317
+ f" loss train: {metrics['loss']:.3f},"
318
+ f" val: {val_loss} |"
319
+ f" iter time: {metrics['iter_time'] * 1000:.2f} ms"
320
+ f"{' (step)' if not is_accumulating else ''}"
321
+ )
322
+ fabric.log_dict(metrics, step=state["iter_num"])
323
+
324
+ if not is_accumulating and state["step_count"] % eval.interval == 0:
325
+ t0 = time.perf_counter()
326
+ val_loss = validate(fabric, model, val_dataloader, eval)
327
+ generate_example(fabric, model, tokenizer, eval, data)
328
+ t1 = time.perf_counter() - t0
329
+
330
+ val_loss_tensor = val_loss.detach().clone().to(fabric.device)
331
+ val_time_tensor = torch.tensor(t1, device=fabric.device, dtype=torch.float32)
332
+
333
+ fabric.all_reduce(val_loss_tensor, reduce_op="mean")
334
+ fabric.all_reduce(val_time_tensor, reduce_op="mean")
335
+
336
+ fabric.print(
337
+ f"iter {state['iter_num']}: val loss {val_loss_tensor.item():.4f}, val time: {val_time_tensor.item() * 1000:.2f} ms"
338
+ )
339
+ metrics = {"val_loss": val_loss_tensor, "val_ppl": math.exp(val_loss_tensor)}
340
+ fabric.log_dict(metrics, step=state["iter_num"])
341
+ fabric.barrier()
342
+ if train.save_interval is not None and not is_accumulating and state["step_count"] % train.save_interval == 0:
343
+ checkpoint_file = out_dir / f"step-{state['step_count']:06d}" / "lit_model.pth"
344
+ checkpoint_file.parent.mkdir(parents=True, exist_ok=True)
345
+ fabric.print(f"Saving checkpoint to {str(checkpoint_file.parent)!r}")
346
+ fabric.save(checkpoint_file, state)
347
+ if fabric.global_rank == 0:
348
+ copy_config_files(checkpoint_dir, checkpoint_file.parent)
349
+ save_hyperparameters(setup, checkpoint_file.parent)
350
+ save_prompt_style(data.prompt_style, checkpoint_file.parent)
351
+
352
+ total_token_counts = {}
353
+ for key in token_counts:
354
+ total = fabric.all_reduce(token_counts[key], reduce_op="sum")
355
+ total_token_counts[key] = total.item()
356
+
357
+ return total_token_counts
358
+
359
+
360
+ # FSDP has issues with `inference_mode`
361
+ @torch.no_grad()
362
+ def validate(
363
+ fabric: L.Fabric, model: GPT, val_dataloader: DataLoader, eval: EvalArgs, verbose: bool = True
364
+ ) -> torch.Tensor:
365
+ if verbose:
366
+ fabric.print("Validating ...")
367
+ model.eval()
368
+ losses = torch.zeros(min(len(val_dataloader), eval.max_iters))
369
+ for k, batch in enumerate(val_dataloader):
370
+ if k >= eval.max_iters:
371
+ break
372
+ input_ids, targets = batch["input_ids"], batch["labels"]
373
+ logits = model(input_ids)
374
+ losses[k] = chunked_cross_entropy(logits[..., :-1, :], targets[..., 1:], chunk_size=0)
375
+
376
+ val_loss = losses.mean()
377
+ model.train()
378
+ return val_loss
379
+
380
+
381
+ @torch.no_grad()
382
+ def generate_example(fabric: L.Fabric, model: GPT, tokenizer: Tokenizer, eval: EvalArgs, data: DataModule):
383
+ instruction = select_sft_generate_example(eval, data)
384
+ fabric.print(instruction)
385
+ prompt = data.prompt_style.apply(instruction)
386
+ encoded = tokenizer.encode(prompt, device=fabric.device)
387
+ model.eval()
388
+
389
+ with fabric.init_tensor():
390
+ # do not set `max_seq_length=max_returned_token` because memory is not a concern here
391
+ model.set_kv_cache(batch_size=1)
392
+
393
+ max_returned_tokens = len(encoded) + eval.max_new_tokens
394
+
395
+ if max_returned_tokens < model.max_seq_length:
396
+ with fabric.init_tensor():
397
+ # do not set `max_seq_length=max_returned_token` because memory is not a concern here
398
+ model.set_kv_cache(batch_size=1)
399
+ output = generate(
400
+ model, encoded, max_returned_tokens=max_returned_tokens, temperature=0.8, eos_id=tokenizer.eos_id
401
+ )
402
+ model.clear_kv_cache()
403
+ model.train()
404
+ output = tokenizer.decode(output)
405
+ fabric.print(f"{output}\n")
406
+ else:
407
+ print(
408
+ f"Length of encoded instruction ({len(encoded)}) and eval.max_new_tokens ({eval.max_new_tokens}) "
409
+ f"exceeds model.max_seq_length ({model.max_seq_length}) used for training. Skipping example generation for efficiency. "
410
+ f"The model's supported context size (post-training) is {model.config.block_size}."
411
+ )
412
+
413
+
414
+ def get_lr_scheduler(optimizer, warmup_steps: int, max_steps: int):
415
+ # linear warmup followed by cosine annealing
416
+ scheduler1 = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step: step / warmup_steps)
417
+ scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(max_steps - warmup_steps))
418
+ return torch.optim.lr_scheduler.SequentialLR(optimizer, [scheduler1, scheduler2], milestones=[warmup_steps])
419
+
420
+
421
+ def get_dataloaders(
422
+ fabric: L.Fabric, data: DataModule, tokenizer: Tokenizer, train: TrainArgs
423
+ ) -> Tuple[DataLoader, DataLoader]:
424
+ data.connect(tokenizer=tokenizer, batch_size=train.micro_batch_size, max_seq_length=train.max_seq_length)
425
+ with fabric.rank_zero_first():
426
+ data.prepare_data()
427
+ data.setup()
428
+ train_dataloader = data.train_dataloader()
429
+ val_dataloader = data.val_dataloader()
430
+ train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader)
431
+ return train_dataloader, val_dataloader
432
+
433
+
434
+ def get_longest_seq_length(data: List[Dict]) -> Tuple[int, int]:
435
+ # find out the minimum max_seq_length required during fine-tuning (saves memory!)
436
+ lengths = [len(d["input_ids"]) for d in data]
437
+ longest_seq_length = max(lengths)
438
+ longest_seq_ix = lengths.index(longest_seq_length)
439
+ return longest_seq_length, longest_seq_ix
440
+
441
+
442
+ def validate_args(train: TrainArgs, eval: EvalArgs) -> None:
443
+ issues = []
444
+ unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings", "lr_warmup_fraction"])]
445
+ for args, names in unsupported:
446
+ for name in names:
447
+ if getattr(args, name) is not None:
448
+ issues.append(f"{__file__} doesn't support the {name!r} argument. This is set in {args}")
449
+ required = [(train, ["epochs"]), (eval, ["max_new_tokens"])]
450
+ for args, names in required:
451
+ for name in names:
452
+ if getattr(args, name) is None:
453
+ issues.append(f"{__file__} requires the {name!r} argument. This is set in {args}")
454
+ if not train.epochs and not train.max_steps:
455
+ issues.append(f"{__file__} requires either epochs or max_steps to be set. This is set in {train}")
456
+ if issues:
457
+ raise ValueError("\n".join(issues))