GameReview commited on
Commit
87a4736
1 Parent(s): cc02372

Upload initial attempt at launching app

Browse files
Files changed (2) hide show
  1. app.py +163 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from googleapiclient.discovery import build
3
+ import pandas as pd
4
+ import numpy as np
5
+ from urllib.parse import urlparse, parse_qs
6
+ from transformers import pipeline
7
+ from transformers import AutoTokenizer
8
+ from transformers import AutoModelForSequenceClassification
9
+ from scipy.special import softmax
10
+ import gradio as gr
11
+
12
+ api_key = os.environ['api_key']
13
+ youtube_api = build('youtube','v3',developerKey=api_key)
14
+
15
+ #Get top 100 comments and make a dataframe
16
+ def get_comment_data(youtube_id):
17
+ request = youtube_api.commentThreads().list(part="snippet", videoId= youtube_id, maxResults=100, order="relevance", textFormat="plainText")
18
+ response = request.execute()
19
+ comments = [[comment['snippet']['topLevelComment']['snippet']['textDisplay'], comment['snippet']['topLevelComment']['snippet']['likeCount']] for comment in response['items']]
20
+ df = pd.DataFrame(comments, columns=['Comment_Text', 'Like_Count'])
21
+ return df
22
+
23
+ #In case we ever want all comments
24
+ def get_all_comments(youtube_id):
25
+ comments = [[]]
26
+ next_page_token = None
27
+ while True:
28
+ request = youtube_api.commentThreads().list(part="snippet", videoId= youtube_id, maxResults=100, pageToken=next_page_token, order="relevance", textFormat="plainText")
29
+ response = request.execute()
30
+
31
+ for item in response['items']:
32
+ comments.append([item['snippet']['topLevelComment']['snippet']['textDisplay'], item['snippet']['topLevelComment']['snippet']['likeCount']])
33
+
34
+ if 'nextPageToken' in response:
35
+ next_page_token = response['nextPageToken']
36
+ else:
37
+ break
38
+ df = pd.DataFrame(comments, columns=['Comment_Text', 'Like_Count'])
39
+ return df
40
+
41
+ #Get all videos from a creator
42
+ def get_channel_videos(channel_id):
43
+ all_videos=[]
44
+ # Initial request to retrieve the channel's uploaded videos
45
+ request = youtube_api.search().list(
46
+ part='id',
47
+ channelId=channel_id,
48
+ maxResults=50 # Adjust as needed
49
+ )
50
+
51
+ while request is not None:
52
+ response = request.execute()
53
+
54
+ for item in response.get('items', []):
55
+ if item['id']['kind'] == 'youtube#video':
56
+ all_videos.append(item['id']['videoId'])
57
+
58
+ request = youtube_api.search().list_next(request, response)
59
+
60
+ return all_videos
61
+
62
+ #Pass a valid youtube video url or else function will not work
63
+ def get_video_id(url):
64
+ parsed_url = urlparse(url)
65
+ return parse_qs(parsed_url.query)['v'][0]
66
+
67
+ #Set up the model and tokenizer
68
+ MODEL = f"cardiffnlp/twitter-roberta-base-sentiment"
69
+ MODEL2 = "SamLowe/roberta-base-go_emotions"
70
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
71
+ tokenizer2 = AutoTokenizer.from_pretrained(MODEL2)
72
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL)
73
+ model2 = AutoModelForSequenceClassification.from_pretrained(MODEL2)
74
+ classifier = pipeline(task="text-classification", model="SamLowe/roberta-base-go_emotions", top_k=None)
75
+
76
+ def generate_sentiments(df, progress=gr.Progress()):
77
+ #Set up lists to add to dataframe
78
+ pos_sent = []
79
+ neu_sent = []
80
+ neg_sent = []
81
+
82
+ feeling1 = []
83
+ feeling2 = []
84
+ feeling3 = []
85
+
86
+ for comment in progress.tqdm(df['Comment_Text'],desc="Analyzing Comments"):
87
+ #Encode the comment and run roberta on it
88
+ tokens = tokenizer.tokenize(comment)
89
+ if len(tokens) > 514:
90
+ tokens = tokens[:512]
91
+ comment = tokenizer.convert_tokens_to_string(tokens)
92
+
93
+ model_outputs = classifier(comment)
94
+ top_three_feelings = ""
95
+
96
+ #Top three sentiments, RoBERTa-based model
97
+ sentiment1 = list(model_outputs[0][0].values())[0]
98
+ sentiment2 = list(model_outputs[0][1].values())[0]
99
+ sentiment3 = list(model_outputs[0][2].values())[0]
100
+
101
+ feeling1.append(sentiment1)
102
+ feeling2.append(sentiment2)
103
+ feeling3.append(sentiment3)
104
+
105
+ encoded_comment = tokenizer(comment, return_tensors='pt')
106
+ output = model(**encoded_comment)
107
+ result = output[0][0].detach().numpy()
108
+ #Convert the numbers to be between 0 and 1 to do analysis with it
109
+ result = softmax(result)
110
+ #Add results to the lists
111
+ pos_sent.append(result[2])
112
+ neu_sent.append(result[1])
113
+ neg_sent.append(result[0])
114
+ #Add sentiments to the dataframe
115
+ new_df = df.copy()
116
+ new_df['Positive_Sentiment'] = pos_sent
117
+ new_df['Neural_Sentiment'] = neu_sent
118
+ new_df['Negative_Sentiment'] = neg_sent
119
+
120
+ new_df['Feeling 1'] = feeling1
121
+ new_df['Feeling 2'] = feeling2
122
+ new_df['Feeling 3'] = feeling3
123
+
124
+ return new_df
125
+
126
+ def addWeights(df,progress=gr.Progress()):
127
+ df1 = generate_sentiments(df,progress)
128
+ total_weights = df1['Like_Count'].sum()
129
+ df1['Weights'] = df1['Like_Count'] / total_weights
130
+ return df1
131
+
132
+ def getWeightSentimentAll(df, progress=gr.Progress()):
133
+ df1 = addWeights(df,progress)
134
+ #Start at default 0.5, add the results of positive sentiment and subtract negative sentiment
135
+ weighted_avg = (df1['Positive_Sentiment'] * df1['Weights']).sum()*0.5 - (df1['Negative_Sentiment'] * df1['Weights']).sum()*0.5 + 0.5
136
+ df['Weighted Average'] = weighted_avg
137
+ return weighted_avg
138
+
139
+ def rate(youtube_url, progress=gr.Progress()):
140
+ try:
141
+ vid_id = get_video_id(youtube_url)
142
+ vid_df = get_comment_data(vid_id)
143
+ #This step to be replaced with whatever final calculation we decide
144
+ vid_sent = getWeightSentimentAll(vid_df,progress)
145
+ return vid_sent
146
+ except:
147
+ raise gr.Error("Process failed. Ensure link is a valid YouTube URL")
148
+
149
+
150
+ with gr.Blocks() as app:
151
+ gr.Markdown("""
152
+ # Game Review Analysis Using Youtube
153
+
154
+ ### Insert a YouTube URL to analyze the comments and get the population's review on the game!
155
+ """
156
+ )
157
+
158
+ input = gr.Textbox(label="YouTube URL", placeholder = "Place link here")
159
+ output = gr.Textbox(label = "Community's Rating of the Game")
160
+ rate_btn = gr.Button("Rate!")
161
+ rate_btn.click(fn=rate, inputs=input,outputs=output)
162
+
163
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ tensorflow
3
+ google-api-python-client