omitakahiro commited on
Commit
8fbcc3a
·
1 Parent(s): a1660f4

Create notebooks/LoRA.ipynb

Browse files
Files changed (1) hide show
  1. notebooks/LoRA.ipynb +234 -0
notebooks/LoRA.ipynb ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "1a884871-7a65-4501-9063-c85ad260d0da",
6
+ "metadata": {},
7
+ "source": [
8
+ "このnotebookはstockmark/stockmark-13bのモデルをkunishou/databricks-dolly-15k-jaのデータセットを用いてLoRA tuningするためのコードの例です。A100またはH100のGPUを用いることを想定しています。T4やV100などのGPUメモリの少ないGPUを用いている場合には、本レポジトリのQLoRA tuningのサンプルをお試しください。\n",
9
+ "\n",
10
+ "- モデル:https://huggingface.co/stockmark/stockmark-13b\n",
11
+ "- データ:https://github.com/kunishou/databricks-dolly-15k-ja\n",
12
+ "\n",
13
+ "以下の例では、学習を1 epochを行います。A100 GPUで実行すると30分ほどかかります。\n",
14
+ "\n",
15
+ "また、ここで用いられているハイパーパラメータは最適化されたものではありませんので、必要に応じて調整してください。"
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "markdown",
20
+ "id": "93b3f4b5-2825-4ef3-a0ee-7a60155aee5d",
21
+ "metadata": {},
22
+ "source": [
23
+ "# 準備"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "id": "6a694ba9-a0fa-4f14-81cf-f35f683ba889",
30
+ "metadata": {},
31
+ "outputs": [],
32
+ "source": [
33
+ "import torch\n",
34
+ "import datasets\n",
35
+ "from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments\n",
36
+ "from peft import get_peft_model, LoraConfig, PeftModel, PeftConfig\n",
37
+ "\n",
38
+ "model_name = \"stockmark/stockmark-13b\"\n",
39
+ "peft_model_name = \"stockmark-13b-adapter\"\n",
40
+ "\n",
41
+ "prompt_template = \"\"\"### Instruction:\n",
42
+ "{instruction}\n",
43
+ "\n",
44
+ "### Input:\n",
45
+ "{input}\n",
46
+ "\n",
47
+ "### Response:\n",
48
+ "\"\"\"\n",
49
+ "\n",
50
+ "def encode(sample):\n",
51
+ " prompt = prompt_template.format(instruction=sample[\"instruction\"], input=sample[\"input\"])\n",
52
+ " target = sample[\"output\"]\n",
53
+ " input_ids_prompt, input_ids_target = tokenizer([prompt, target]).input_ids\n",
54
+ " input_ids_target = input_ids_target + [ tokenizer.eos_token_id ]\n",
55
+ " input_ids = input_ids_prompt + input_ids_target\n",
56
+ " labels = input_ids.copy()\n",
57
+ " labels[:len(input_ids_prompt)] = [-100] * len(input_ids_prompt) # ignore label tokens in a prompt for loss calculation\n",
58
+ " return {\"input_ids\": input_ids, \"labels\": labels}\n",
59
+ "\n",
60
+ "def get_collator(tokenizer, max_length):\n",
61
+ " def collator(batch):\n",
62
+ " batch = [{ key: value[:max_length] for key, value in sample.items() } for sample in batch ]\n",
63
+ " batch = tokenizer.pad(batch)\n",
64
+ " batch[\"labels\"] = [ e + [-100] * (len(batch[\"input_ids\"][0]) - len(e)) for e in batch[\"labels\"] ]\n",
65
+ " batch = { key: torch.tensor(value) for key, value in batch.items() }\n",
66
+ " return batch\n",
67
+ "\n",
68
+ " return collator"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "markdown",
73
+ "id": "51e6cfcf-1ac1-400e-a4bc-ea64375d0f9e",
74
+ "metadata": {},
75
+ "source": [
76
+ "# データセットとモデルのロード"
77
+ ]
78
+ },
79
+ {
80
+ "cell_type": "code",
81
+ "execution_count": null,
82
+ "id": "3ac80067-4e60-46c4-90da-05647cf96ccd",
83
+ "metadata": {},
84
+ "outputs": [],
85
+ "source": [
86
+ "# load_tokenizer\n",
87
+ "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
88
+ "\n",
89
+ "# prepare dataset\n",
90
+ "dataset_name = \"kunishou/databricks-dolly-15k-ja\"\n",
91
+ "dataset = datasets.load_dataset(dataset_name)\n",
92
+ "dataset = dataset.map(encode)\n",
93
+ "dataset = dataset[\"train\"].train_test_split(0.1)\n",
94
+ "train_dataset = dataset[\"train\"]\n",
95
+ "val_dataset = dataset[\"test\"]\n",
96
+ "\n",
97
+ "# load model\n",
98
+ "model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", torch_dtype=torch.bfloat16)\n",
99
+ "\n",
100
+ "peft_config = LoraConfig(\n",
101
+ " task_type=\"CAUSAL_LM\",\n",
102
+ " inference_mode=False,\n",
103
+ " target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n",
104
+ " r=16,\n",
105
+ " lora_alpha=32,\n",
106
+ " lora_dropout=0.05\n",
107
+ ")\n",
108
+ "\n",
109
+ "model = get_peft_model(model, peft_config)\n",
110
+ "model.print_trainable_parameters()"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "markdown",
115
+ "id": "9b471da0-7fba-4127-8b07-22da4cbee6a9",
116
+ "metadata": {},
117
+ "source": [
118
+ "# LoRA Tuning"
119
+ ]
120
+ },
121
+ {
122
+ "cell_type": "code",
123
+ "execution_count": null,
124
+ "id": "b9bafa12-538c-4abb-b8b3-bffeb0990b46",
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "training_args = TrainingArguments(\n",
129
+ " output_dir=\"./log_stockmark_13b\",\n",
130
+ " learning_rate=2e-4,\n",
131
+ " per_device_train_batch_size=2,\n",
132
+ " gradient_accumulation_steps=8,\n",
133
+ " per_device_eval_batch_size=16,\n",
134
+ " num_train_epochs=1,\n",
135
+ " logging_strategy='steps',\n",
136
+ " logging_steps=10,\n",
137
+ " save_strategy='epoch',\n",
138
+ " evaluation_strategy='epoch',\n",
139
+ " load_best_model_at_end=True,\n",
140
+ " metric_for_best_model=\"eval_loss\",\n",
141
+ " greater_is_better=False,\n",
142
+ " save_total_limit=2\n",
143
+ ")\n",
144
+ "\n",
145
+ "trainer = Trainer(\n",
146
+ " model=model,\n",
147
+ " args=training_args,\n",
148
+ " train_dataset=train_dataset,\n",
149
+ " eval_dataset=val_dataset,\n",
150
+ " data_collator=get_collator(tokenizer, 320)\n",
151
+ ")\n",
152
+ "\n",
153
+ "# LoRA tuning\n",
154
+ "trainer.train()\n",
155
+ "\n",
156
+ "# save model\n",
157
+ "model = trainer.model\n",
158
+ "model.save_pretrained(peft_model_name)"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "markdown",
163
+ "id": "a3f80a8e-1ac2-4bdc-8232-fe0ee18ffff5",
164
+ "metadata": {},
165
+ "source": [
166
+ "# 学習したモデルのロード(Optional)\n",
167
+ "異なるセッションでモデルを読み込む場合、まず最初の準備のセクションのコードを実行して、このコードを実行してください。"
168
+ ]
169
+ },
170
+ {
171
+ "cell_type": "code",
172
+ "execution_count": null,
173
+ "id": "43241395-3035-4cb9-8c1c-45ffe8cd48be",
174
+ "metadata": {},
175
+ "outputs": [],
176
+ "source": [
177
+ "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
178
+ "model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", torch_dtype=torch.bfloat16)\n",
179
+ "model = PeftModel.from_pretrained(model, peft_model_name)"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "markdown",
184
+ "id": "2ce4db1f-9bad-4c8e-9c04-d1102b299f24",
185
+ "metadata": {},
186
+ "source": [
187
+ "# 推論"
188
+ ]
189
+ },
190
+ {
191
+ "cell_type": "code",
192
+ "execution_count": null,
193
+ "id": "d7d6359b-e0ac-49df-a178-39bb9f79ca93",
194
+ "metadata": {},
195
+ "outputs": [],
196
+ "source": [
197
+ "prompt = prompt_template.format(instruction=\"自然言語処理とは?\", input=\"\")\n",
198
+ "\n",
199
+ "inputs = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n",
200
+ "with torch.no_grad():\n",
201
+ " tokens = model.generate(\n",
202
+ " **inputs,\n",
203
+ " max_new_tokens=128,\n",
204
+ " do_sample=True,\n",
205
+ " temperature=0.7\n",
206
+ " )\n",
207
+ "\n",
208
+ "output = tokenizer.decode(tokens[0], skip_special_tokens=True)\n",
209
+ "print(output)"
210
+ ]
211
+ }
212
+ ],
213
+ "metadata": {
214
+ "kernelspec": {
215
+ "display_name": "Python 3",
216
+ "language": "python",
217
+ "name": "python3"
218
+ },
219
+ "language_info": {
220
+ "codemirror_mode": {
221
+ "name": "ipython",
222
+ "version": 3
223
+ },
224
+ "file_extension": ".py",
225
+ "mimetype": "text/x-python",
226
+ "name": "python",
227
+ "nbconvert_exporter": "python",
228
+ "pygments_lexer": "ipython3",
229
+ "version": "3.8.10"
230
+ }
231
+ },
232
+ "nbformat": 4,
233
+ "nbformat_minor": 5
234
+ }