mara589 commited on
Commit
e858133
·
verified ·
1 Parent(s): 1c3c87f

Upload 7 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ " ": 50257
3
+ }
config.json CHANGED
@@ -37,5 +37,5 @@
37
  "torch_dtype": "float32",
38
  "transformers_version": "4.40.2",
39
  "use_cache": true,
40
- "vocab_size": 50257
41
  }
 
37
  "torch_dtype": "float32",
38
  "transformers_version": "4.40.2",
39
  "use_cache": true,
40
+ "vocab_size": 50258
41
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef516cfdbb1f62aa0d77cf75155cca7a746ee867b8045cb32d8479d978e40a0f
3
- size 497780432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3eca955b3c6213d96203c29dab284b89421faca9341b4a8ee93899318db85b6
3
+ size 497783504
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "7": {
6
+ "content": "(",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": false
12
+ },
13
+ "8": {
14
+ "content": ")",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": false
20
+ },
21
+ "40": {
22
+ "content": "I",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": false
28
+ },
29
+ "1870": {
30
+ "content": "And",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": false
36
+ },
37
+ "3673": {
38
+ "content": "Not",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": false
44
+ },
45
+ "5574": {
46
+ "content": "Or",
47
+ "lstrip": false,
48
+ "normalized": true,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": false
52
+ },
53
+ "17821": {
54
+ "content": "True",
55
+ "lstrip": false,
56
+ "normalized": true,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": false
60
+ },
61
+ "25101": {
62
+ "content": "False",
63
+ "lstrip": false,
64
+ "normalized": true,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": false
68
+ },
69
+ "50256": {
70
+ "content": "<|endoftext|>",
71
+ "lstrip": false,
72
+ "normalized": true,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "50257": {
78
+ "content": " ",
79
+ "lstrip": false,
80
+ "normalized": true,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": false
84
+ }
85
+ },
86
+ "bos_token": "<|endoftext|>",
87
+ "clean_up_tokenization_spaces": true,
88
+ "eos_token": "<|endoftext|>",
89
+ "errors": "replace",
90
+ "model_max_length": 1024,
91
+ "pad_token": "<|endoftext|>",
92
+ "tokenizer_class": "GPT2Tokenizer",
93
+ "unk_token": "<|endoftext|>"
94
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff