nes470 commited on
Commit
5491467
1 Parent(s): 7339442

Upload 3 files

Browse files
Files changed (3) hide show
  1. QBModelWrapperCopy.py +2 -2
  2. qbmodel.py +52 -0
  3. tfidf.py +102 -0
QBModelWrapperCopy.py CHANGED
@@ -1,8 +1,8 @@
1
  from typing import List
2
  from transformers import PreTrainedModel
3
  from transformers import PretrainedConfig
4
- from QBModelConfig import QBModelConfig
5
- from qbmodel import QuizBowlModel
6
 
7
  class QBModelWrapper(PreTrainedModel):
8
  config_class= QBModelConfig
 
1
  from typing import List
2
  from transformers import PreTrainedModel
3
  from transformers import PretrainedConfig
4
+ from .QBModelConfig import QBModelConfig
5
+ from .qbmodel import QuizBowlModel
6
 
7
  class QBModelWrapper(PreTrainedModel):
8
  config_class= QBModelConfig
qbmodel.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple
2
+ import nltk
3
+ import sklearn
4
+ from .tfidf import TfidfWikiGuesser
5
+ import numpy as np
6
+ import pandas as pd
7
+
8
+
9
+ class QuizBowlModel:
10
+
11
+ def __init__(self):
12
+ """
13
+ Load your model(s) and whatever else you need in this function.
14
+
15
+ Do NOT load your model or resources in the guess_and_buzz() function,
16
+ as it will increase latency severely.
17
+ """
18
+ #best accuracy when using wiki_page_text.json
19
+ self.guesser = TfidfWikiGuesser(wikidump=None) #can specify different wikidump if needed
20
+ print("model loaded")
21
+
22
+
23
+
24
+ def guess_and_buzz(self, question_text: List[str]) -> List[Tuple[str, bool]]:
25
+ """
26
+ This function accepts a list of question strings, and returns a list of tuples containing
27
+ strings representing the guess and corresponding booleans representing
28
+ whether or not to buzz.
29
+
30
+ So, guess_and_buzz(["This is a question"]) should return [("answer", False)]
31
+
32
+ If you are using a deep learning model, try to use batched prediction instead of
33
+ iterating using a for loop.
34
+ """
35
+
36
+ answers = []
37
+ top_guesses = 3 #guesser will return this amount guesses for each question (in sorted confidence)
38
+
39
+ for question in question_text:
40
+ guesses = self.guesser.make_guess(question, num_guesses=top_guesses)
41
+ #print(guesses)
42
+
43
+ #do the buzzing
44
+
45
+ #make a tuple and add to answers list
46
+ tup = (guesses[0], True)
47
+ answers.append(tup)
48
+
49
+ return answers
50
+
51
+
52
+
tfidf.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.feature_extraction.text import TfidfVectorizer
2
+ from sklearn.metrics.pairwise import cosine_similarity
3
+ import numpy as np
4
+ import json
5
+ import zipfile
6
+ import pickle
7
+ import os
8
+ from nltk.corpus import stopwords
9
+ from nltk.tokenize import word_tokenize
10
+ from nltk.stem import WordNetLemmatizer
11
+
12
+
13
+ class TfidfWikiGuesser:
14
+ def __init__(self, wikidump = 'resources/wiki_text_16.json') -> None:
15
+ self.tfidf = None
16
+ self.corpus = None
17
+ self.titles = None
18
+ self.vectorizer = None
19
+ self.lemmatizer = WordNetLemmatizer()
20
+ model_file = "processed_tfidf_wiki_page_text_model.pkl" # <--- has best acc so far (using wiki_page_text.json from gdrive folder)
21
+ #model_file = "processed_tfidf_wiki_16_model.pkl"
22
+ # full_model_path = model_file
23
+ full_model_path = os.path.join("./models", model_file)
24
+
25
+ if os.path.exists(full_model_path):
26
+ print("Loading model from pickle...")
27
+ self.load_from_pkl(full_model_path)
28
+ else:
29
+ if wikidump:
30
+ print("No pre-trained model found, loading data from dump...")
31
+ self.load_model(wikidump)
32
+ self.save_model(full_model_path)
33
+ # self.load_model(wikidump)
34
+
35
+ def load_model(self, wikidump):
36
+ # wiki dump is an json array of json objects with page and text fields
37
+ with open(wikidump) as f:
38
+ doc = json.load(f)
39
+ # with zipfile.ZipFile('resources/wiki_text_8.json.zip', 'r') as z:
40
+ # with z.open('wiki_text_8.json') as f:
41
+ # doc = json.load(f)
42
+
43
+
44
+ self.corpus, self.titles = self.create_corpus(doc)
45
+
46
+ self.vectorizer = TfidfVectorizer(stop_words='english')
47
+ self.tfidf = self.vectorizer.fit_transform(self.corpus)
48
+
49
+ def preprocess_text(self,text):
50
+ if type(text) == float:
51
+ return str(text)
52
+ tokens = word_tokenize(text.lower())
53
+ filtered_tokens = [token for token in tokens if token not in stopwords.words('english')]
54
+ lemmatized_tokens = [self.lemmatizer.lemmatize(token) for token in filtered_tokens]
55
+ processed_text = ' '.join(lemmatized_tokens)
56
+ return processed_text
57
+
58
+ def create_corpus(self, json_file):
59
+ corpus = []
60
+ page_titles = []
61
+
62
+ for json_obj in json_file:
63
+ # corpus.append(json_obj['text'])
64
+ #corpus.append(self.preprocess_text(json_obj['text']))
65
+ corpus.append(json_obj['text'])
66
+ page_titles.append(json_obj['page'])
67
+
68
+ return (corpus, page_titles)
69
+
70
+ def make_guess(self, question, num_guesses = 1):
71
+ tfidf_question = self.vectorizer.transform([question])
72
+
73
+ sim = cosine_similarity(self.tfidf, tfidf_question)
74
+
75
+ #get indices of best matching documents and use it to get (num_guesses) top documents
76
+ sim_indices = np.argsort(sim.flatten())[::-1]
77
+ best_indices = sim_indices[:num_guesses]
78
+
79
+ # best_docs = []
80
+ best_guesses = []
81
+ for i in best_indices:
82
+ # best_docs.append(self.corpus[i])
83
+ best_guesses.append(self.titles[i])
84
+
85
+ return best_guesses
86
+
87
+ def save_model(self, file_name):
88
+ with open(file_name, 'wb') as f:
89
+ pickle.dump({
90
+ 'vectorizer': self.vectorizer,
91
+ 'tfidf_matrix': self.tfidf,
92
+ 'titles': self.titles,
93
+ # 'corpus': self.corpus
94
+ }, f)
95
+
96
+ def load_from_pkl(self, file_name):
97
+ with open(file_name, 'rb') as f:
98
+ data = pickle.load(f)
99
+ self.vectorizer = data['vectorizer']
100
+ self.tfidf = data['tfidf_matrix']
101
+ self.titles = data['titles']
102
+ # self.corpus = data['corpus']