suriya7 commited on
Commit
8a2f8be
·
verified ·
1 Parent(s): 3e5a320

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -6
README.md CHANGED
@@ -8,10 +8,10 @@ tags: []
8
  ```python
9
 
10
  # Load model directly
11
- from transformers import AutoTokenizer, AutoModelForCausalLM
12
 
13
  tokenizer = AutoTokenizer.from_pretrained("suriya7/conversational-gpt-1")
14
- model = AutoModelForCausalLM.from_pretrained("suriya7/conversational-gpt-1")
15
  ```
16
 
17
  ### Chatting
@@ -37,7 +37,7 @@ while True:
37
  conversation_history.append(user)
38
 
39
  # Ensure conversation starts with a user's input and keep only the last 2 exchanges (4 turns)
40
- conversation_history = conversation_history[-2:]
41
 
42
  # Build the full prompt
43
  prompt = prompt + "\n".join(conversation_history)
@@ -57,9 +57,6 @@ while True:
57
  pad_token_id=50259,
58
  eos_token_id=50259,
59
  num_return_sequences=1,
60
- temperature=0.2,
61
- top_p=0.9,
62
- do_sample=True
63
  )
64
 
65
  # Decode and process the model's response
 
8
  ```python
9
 
10
  # Load model directly
11
+ from transformers import AutoModelForCausalLM, GPT2Tokenizer
12
 
13
  tokenizer = AutoTokenizer.from_pretrained("suriya7/conversational-gpt-1")
14
+ model = GPT2Tokenizer.from_pretrained("suriya7/conversational-gpt-1")
15
  ```
16
 
17
  ### Chatting
 
37
  conversation_history.append(user)
38
 
39
  # Ensure conversation starts with a user's input and keep only the last 2 exchanges (4 turns)
40
+ conversation_history = conversation_history[-5:]
41
 
42
  # Build the full prompt
43
  prompt = prompt + "\n".join(conversation_history)
 
57
  pad_token_id=50259,
58
  eos_token_id=50259,
59
  num_return_sequences=1,
 
 
 
60
  )
61
 
62
  # Decode and process the model's response