Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from optimum.intel import OVModelForCausalLM
|
4 |
from transformers import AutoTokenizer, pipeline
|
|
|
5 |
|
6 |
# 載入模型和標記器
|
7 |
model_id = "hsuwill000/SmolLM2-135M-openvino"
|
@@ -11,13 +12,26 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
11 |
# 建立生成管道
|
12 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def respond(message, history):
|
15 |
# 將當前訊息與歷史訊息合併
|
16 |
input_text = message if not history else history[-1]["content"] + " " + message
|
17 |
input_text = message
|
18 |
# 獲取模型的回應
|
19 |
response = pipe(input_text, max_new_tokens=150, truncation=True, num_return_sequences=1)
|
20 |
-
reply = response[0]['generated_text']
|
21 |
|
22 |
# 返回新的消息格式
|
23 |
print(f"Message: {message}")
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from optimum.intel import OVModelForCausalLM
|
4 |
from transformers import AutoTokenizer, pipeline
|
5 |
+
import re
|
6 |
|
7 |
# 載入模型和標記器
|
8 |
model_id = "hsuwill000/SmolLM2-135M-openvino"
|
|
|
12 |
# 建立生成管道
|
13 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
14 |
|
15 |
+
def remove_incomplete_sentence(text):
|
16 |
+
# 將文章根據句點、問號或感嘆號來分割成句子
|
17 |
+
sentences = re.split(r'(?<=[.!?])\s+', text.strip())
|
18 |
+
|
19 |
+
# 檢查最後一句是否以省略號結尾或沒有標點符號結尾
|
20 |
+
last_sentence = sentences[-1]
|
21 |
+
if last_sentence.endswith('...') or not re.match(r'.*[.!?]$', last_sentence):
|
22 |
+
# 移除最後一句不完整句子
|
23 |
+
sentences = sentences[:-1]
|
24 |
+
|
25 |
+
# 返回重新組合的文章
|
26 |
+
return ' '.join(sentences)
|
27 |
+
|
28 |
def respond(message, history):
|
29 |
# 將當前訊息與歷史訊息合併
|
30 |
input_text = message if not history else history[-1]["content"] + " " + message
|
31 |
input_text = message
|
32 |
# 獲取模型的回應
|
33 |
response = pipe(input_text, max_new_tokens=150, truncation=True, num_return_sequences=1)
|
34 |
+
reply = remove_incomplete_sentence ( response[0]['generated_text'] )
|
35 |
|
36 |
# 返回新的消息格式
|
37 |
print(f"Message: {message}")
|