{ "add_bos_token": false, "add_prefix_space": false, "added_tokens_decoder": { "7": { "content": "(", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "8": { "content": ")", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "40": { "content": "I", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "1870": { "content": "And", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "3673": { "content": "Not", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "5574": { "content": "Or", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "17821": { "content": "True", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "25101": { "content": "False", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "50256": { "content": "<|endoftext|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": true }, "50257": { "content": " ", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false } }, "bos_token": "<|endoftext|>", "clean_up_tokenization_spaces": true, "eos_token": "<|endoftext|>", "errors": "replace", "model_max_length": 1024, "pad_token": "<|endoftext|>", "tokenizer_class": "GPT2Tokenizer", "unk_token": "<|endoftext|>" }