Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,15 @@ model = SentenceTransformer('intfloat/multilingual-e5-large-instruct')
|
|
11 |
print("load model end")
|
12 |
print(time.time())
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def make_clickable_both(val):
|
15 |
name, url = val.split('#')
|
16 |
print(name+"\n")
|
@@ -28,10 +37,6 @@ def find(query):
|
|
28 |
queries = [
|
29 |
get_detailed_instruct(task, query)
|
30 |
]
|
31 |
-
|
32 |
-
quran = pd.read_csv('quran-eng.csv', delimiter=",")
|
33 |
-
print("load quran eng")
|
34 |
-
print(time.time())
|
35 |
|
36 |
#file = open('quran-splitted.sav','rb')
|
37 |
#quran_splitted = pickle.load(file)
|
@@ -42,10 +47,6 @@ def find(query):
|
|
42 |
# document_embeddings = model.encode(documents, convert_to_tensor=True, normalize_embeddings=True)
|
43 |
# filename = 'encoded_quran_text_split_multilingual-e5-large-instruct.sav'
|
44 |
# pickle.dump(embeddings, open(filename, 'wb'))
|
45 |
-
file = open('encoded_quran_text_split_multilingual-e5-large-instructs.sav','rb')
|
46 |
-
document_embeddings = pickle.load(file)
|
47 |
-
print("load quran embedding")
|
48 |
-
print(time.time())
|
49 |
|
50 |
query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
|
51 |
print("embed query")
|
|
|
11 |
print("load model end")
|
12 |
print(time.time())
|
13 |
|
14 |
+
quran = pd.read_csv('quran-eng.csv', delimiter=",")
|
15 |
+
print("load quran eng")
|
16 |
+
print(time.time())
|
17 |
+
|
18 |
+
file = open('encoded_quran_text_split_multilingual-e5-large-instructs.sav','rb')
|
19 |
+
document_embeddings = pickle.load(file)
|
20 |
+
print("load quran embedding")
|
21 |
+
print(time.time())
|
22 |
+
|
23 |
def make_clickable_both(val):
|
24 |
name, url = val.split('#')
|
25 |
print(name+"\n")
|
|
|
37 |
queries = [
|
38 |
get_detailed_instruct(task, query)
|
39 |
]
|
|
|
|
|
|
|
|
|
40 |
|
41 |
#file = open('quran-splitted.sav','rb')
|
42 |
#quran_splitted = pickle.load(file)
|
|
|
47 |
# document_embeddings = model.encode(documents, convert_to_tensor=True, normalize_embeddings=True)
|
48 |
# filename = 'encoded_quran_text_split_multilingual-e5-large-instruct.sav'
|
49 |
# pickle.dump(embeddings, open(filename, 'wb'))
|
|
|
|
|
|
|
|
|
50 |
|
51 |
query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
|
52 |
print("embed query")
|