File size: 18,726 Bytes
11e4790
 
c567be4
 
 
9e3b21a
6530075
079aa3a
b7fa139
 
3c4e755
 
 
 
e9909ed
 
7ed5900
a69bace
02dd3ba
 
3310ad5
7ed5900
e9909ed
cf25313
 
 
e9909ed
a931b41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9909ed
 
 
3c4e755
 
11e4790
2320eeb
 
9e3b21a
a931b41
 
 
 
 
 
 
 
 
 
7ed5900
66d6a91
7ed5900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a931b41
 
 
 
 
 
 
 
2fd09c9
 
 
 
 
 
 
 
 
a931b41
1ae05ec
 
 
 
 
 
 
 
 
 
d22eec5
 
1ae05ec
a48473e
 
 
 
 
 
 
 
 
 
 
 
 
a931b41
a028c47
 
 
 
b300db2
 
 
 
 
 
 
 
a028c47
f99c291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9e3b21a
8414f72
 
 
c567be4
 
8414f72
c567be4
 
 
 
 
f4b741f
ef94623
e8ac9fc
 
ef94623
30e87ad
 
6777045
30e87ad
 
 
 
 
6777045
30e87ad
915c63d
c567be4
 
 
 
 
 
 
 
 
e950bce
a11ae70
 
 
e950bce
a11ae70
e950bce
cf25313
 
 
 
 
 
 
 
 
 
 
 
 
f99c291
6d95426
a931b41
f99c291
7ed5900
f99c291
a931b41
 
7ed5900
a931b41
f99c291
aae1ef7
474b2c8
f99c291
a931b41
 
f99c291
a931b41
a48473e
 
cf25313
f99c291
 
 
d22eec5
f99c291
9abfccc
f99c291
 
d22eec5
f99c291
 
 
 
 
 
 
 
 
 
 
 
d22eec5
f99c291
938aee2
a11ae70
b6d28cd
938aee2
e950bce
 
a41dfa9
f99c291
a11ae70
 
 
 
3c4e755
b6d28cd
a11ae70
 
b6d28cd
6a6dfe0
a6ad75a
51478fd
 
 
a41dfa9
f99c291
 
 
 
52d5702
 
 
 
 
b6d28cd
 
a41dfa9
 
 
 
 
 
9b769a8
a41dfa9
 
 
 
 
c567be4
3c4e755
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c05c5f9
d22eec5
 
 
 
3c4e755
a11ae70
 
 
 
 
 
 
 
c567be4
 
 
 
 
 
 
6777045
 
 
8b6845f
6777045
 
 
 
 
 
 
 
 
 
 
bbc9832
6777045
 
 
 
 
 
 
915c63d
6777045
ef94623
26894bc
 
d8ab087
e6657c6
 
d8ab087
 
e6657c6
d8ab087
 
 
 
 
 
 
 
 
 
 
bbc9832
d8ab087
 
 
28e9de2
9d0ff0e
d8ab087
9d0ff0e
 
61e0e54
26894bc
 
 
 
 
 
4749834
ef94623
 
 
 
 
39c5160
ef94623
 
 
 
 
 
 
 
9e3b21a
 
 
 
 
bbc9832
9e3b21a
 
 
ef94623
 
9e3b21a
 
ef94623
 
 
 
 
 
 
 
 
9e3b21a
 
 
b7653fb
c567be4
 
 
24ff085
d476719
 
9e3b21a
ef94623
a41dfa9
 
b6d28cd
 
 
3eafd89
51d81e0
3eafd89
3e13a5c
51d81e0
3eafd89
61e0e54
915c63d
 
 
 
d476719
 
 
 
 
6777045
 
 
 
d476719
915c63d
6777045
 
915c63d
c567be4
b6d28cd
c567be4
 
 
26894bc
4610447
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
import gradio as gr
import pandas as pd
import requests
from bs4 import BeautifulSoup
from docx import Document
import os
from openai import OpenAI
import json
from youtube_transcript_api import YouTubeTranscriptApi

from moviepy.editor import VideoFileClip
from pytube import YouTube
import os

from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
from googleapiclient.http import MediaIoBaseDownload
from googleapiclient.http import MediaIoBaseUpload

import io


from urllib.parse import urlparse, parse_qs


# 假设您的环境变量或Secret的名称是GOOGLE_APPLICATION_CREDENTIALS_JSON
# credentials_json_string = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
# credentials_dict = json.loads(credentials_json_string)
# SCOPES = ['https://www.googleapis.com/auth/drive']
# credentials = service_account.Credentials.from_service_account_info(
#         credentials_dict, scopes=SCOPES)
# service = build('drive', 'v3', credentials=credentials)
# # 列出 Google Drive 上的前10個文件
# results = service.files().list(pageSize=10, fields="nextPageToken, files(id, name)").execute()
# items = results.get('files', [])

# if not items:
#     print('No files found.')
# else:
#     print("=====Google Drive 上的前10個文件=====")
#     print('Files:')
#     for item in items:
#         print(u'{0} ({1})'.format(item['name'], item['id']))



OUTPUT_PATH = 'videos'


OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
client = OpenAI(api_key=OPEN_AI_KEY)

# 初始化Google Drive服务
def init_drive_service():
    credentials_json_string = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
    credentials_dict = json.loads(credentials_json_string)
    SCOPES = ['https://www.googleapis.com/auth/drive']
    credentials = service_account.Credentials.from_service_account_info(
            credentials_dict, scopes=SCOPES)
    service = build('drive', 'v3', credentials=credentials)
    return service

def create_folder_if_not_exists(service, folder_name, parent_id):
    print("检查是否存在特定名称的文件夹,如果不存在则创建")
    query = f"mimeType='application/vnd.google-apps.folder' and name='{folder_name}' and '{parent_id}' in parents and trashed=false"
    response = service.files().list(q=query, spaces='drive', fields="files(id, name)").execute()
    folders = response.get('files', [])
    if not folders:
        # 文件夹不存在,创建新文件夹
        file_metadata = {
            'name': folder_name,
            'mimeType': 'application/vnd.google-apps.folder',
            'parents': [parent_id]
        }
        folder = service.files().create(body=file_metadata, fields='id').execute()
        return folder.get('id')
    else:
        # 文件夹已存在
        return folders[0]['id']

# 检查Google Drive上是否存在文件
def check_file_exists(service, folder_name, file_name):
    query = f"name = '{file_name}' and '{folder_name}' in parents and trashed = false"
    response = service.files().list(q=query).execute()
    files = response.get('files', [])
    return len(files) > 0, files[0]['id'] if files else None


def upload_to_drive(service, file_name, folder_id, content):
    print("上传文本内容到Google Drive指定的文件夹中")
    # 如果您的内容是字符串(文本),请使用io.StringIO
    # 对于二进制内容,请使用io.BytesIO
    file_metadata = {'name': file_name, 'parents': [folder_id]}
    # 这里我们假定content是文本,因此使用io.StringIO
    media = MediaFileUpload(io.StringIO(content), mimetype='text/plain')
    
    service.files().create(body=file_metadata, media_body=media, fields='id').execute()

def upload_content_directly(service, file_name, folder_id, content):
    """
    直接将内容上传到Google Drive中的新文件。
    """
    file_metadata = {'name': file_name, 'parents': [folder_id]}
    # 使用io.StringIO为文本内容创建一个内存中的文件对象
    fh = io.BytesIO(content.encode('utf-8'))
    media = MediaIoBaseUpload(fh, mimetype='text/plain', resumable=True)
    
    # 执行上传
    file = service.files().create(body=file_metadata, media_body=media, fields='id').execute()
    return file.get('id')

def download_file_as_string(service, file_id):
    """
    从Google Drive下载文件并将其作为字符串返回。
    """
    request = service.files().get_media(fileId=file_id)
    fh = io.BytesIO()
    downloader = MediaIoBaseDownload(fh, request)
    done = False
    while done is False:
        status, done = downloader.next_chunk()
    fh.seek(0)
    content = fh.read().decode('utf-8')
    return content

def upload_img_directly(service, file_name, folder_id, file_path):
    file_metadata = {'name': file_name, 'parents': [folder_id]}
    media = MediaFileUpload(file_path, mimetype='image/jpeg')
    file = service.files().create(body=file_metadata, media_body=media, fields='id').execute()
    return file.get('id')  # 返回文件ID

def set_public_permission(service, file_id):
    service.permissions().create(
        fileId=file_id,
        body={"type": "anyone", "role": "reader"},
        fields='id',
    ).execute()

def update_file_on_drive(service, file_id, file_content):
    """
    更新Google Drive上的文件内容。
    
    参数:
    - service: Google Drive API服务实例。
    - file_id: 要更新的文件的ID。
    - file_content: 新的文件内容,字符串格式。
    """
    # 将新的文件内容转换为字节流
    fh = io.BytesIO(file_content.encode('utf-8'))
    media = MediaIoBaseUpload(fh, mimetype='application/json', resumable=True)
    
    # 更新文件
    updated_file = service.files().update(
        fileId=file_id,
        media_body=media
    ).execute()
    
    print(f"文件已更新,文件ID: {updated_file['id']}")

def process_file(file):
    # 读取文件
    if file.name.endswith('.csv'):
        df = pd.read_csv(file)
        text = df_to_text(df)
    elif file.name.endswith('.xlsx'):
        df = pd.read_excel(file)
        text = df_to_text(df)
    elif file.name.endswith('.docx'):
        text = docx_to_text(file)
    else:
        raise ValueError("Unsupported file type")

    df_string = df.to_string()
    # 宜蘭:移除@XX@符号 to |
    df_string = df_string.replace("@XX@", "|")

    # 根据上传的文件内容生成问题
    questions = generate_questions(df_string)
    df_summarise = generate_df_summarise(df_string)

    # 返回按钮文本和 DataFrame 字符串
    return questions[0] if len(questions) > 0 else "", \
           questions[1] if len(questions) > 1 else "", \
           questions[2] if len(questions) > 2 else "", \
           df_summarise, \
           df_string

def df_to_text(df):
    # 将 DataFrame 转换为纯文本
    return df.to_string()

def docx_to_text(file):
    # 将 Word 文档转换为纯文本
    doc = Document(file)
    return "\n".join([para.text for para in doc.paragraphs])

def format_seconds_to_time(seconds):
    """将秒数格式化为 时:分:秒 的形式"""
    hours = int(seconds // 3600)
    minutes = int((seconds % 3600) // 60)
    seconds = int(seconds % 60)
    return f"{hours:02}:{minutes:02}:{seconds:02}"

def extract_youtube_id(url):
    parsed_url = urlparse(url)
    
    if "youtube.com" in parsed_url.netloc:
        # 对于标准链接,视频ID在查询参数'v'中
        query_params = parse_qs(parsed_url.query)
        return query_params.get("v")[0] if "v" in query_params else None
    elif "youtu.be" in parsed_url.netloc:
        # 对于短链接,视频ID是路径的一部分
        return parsed_url.path.lstrip('/')
    else:
        return None

def process_transcript_and_screenshots(video_id):
    print("====process_transcript_and_screenshots====")
    service = init_drive_service()
    parent_folder_id = '1GgI4YVs0KckwStVQkLa1NZ8IpaEMurkL'
    folder_id = create_folder_if_not_exists(service, video_id, parent_folder_id)
    file_name = f'{video_id}_transcript.json'

    # 检查逐字稿是否存在
    exists, file_id = check_file_exists(service, folder_id, file_name)
    if not exists:
        # 从YouTube获取逐字稿并上传
        transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['zh-TW'])
        transcript_text = json.dumps(transcript, ensure_ascii=False, indent=2)
        file_id = upload_content_directly(service, file_name, folder_id, transcript_text)
        print("逐字稿已上传到Google Drive")
    else:
        # 逐字稿已存在,下载逐字稿内容
        print("逐字稿已存在于Google Drive中")
        transcript_text = download_file_as_string(service, file_id)
        transcript = json.loads(transcript_text)

    # 处理逐字稿中的每个条目,检查并上传截图
    for entry in transcript:
        if 'img_src' not in entry:
            screenshot_youtube_video(video_id, entry['start'])
            img_file_id = upload_img_directly(service, f"{video_id}_{entry['start']}.jpg", folder_id, screenshot_path)
            set_public_permission(service, img_file_id)
            img_src = f"https://drive.google.com/uc?export=view&id={img_file_id}"
            entry['img_src'] = img_src
            print(f"截图已上传到Google Drive: {img_src}")
    
    # 更新逐字稿文件
    updated_transcript_text = json.dumps(transcript, ensure_ascii=False, indent=2)
    update_file_on_drive(service, file_id, updated_transcript_text)
    print("逐字稿已更新,包括截图链接")

    return transcript

def process_youtube_link(link):
    # 使用 YouTube API 获取逐字稿
    # 假设您已经获取了 YouTube 视频的逐字稿并存储在变量 `transcript` 中
    video_id = extract_youtube_id(link)
    download_youtube_video(video_id, output_path=OUTPUT_PATH)
    transcript = process_transcript_and_screenshots(video_id)

    formatted_transcript = []
    screenshot_paths = []
    for entry in transcript:
        start_time = format_seconds_to_time(entry['start'])
        end_time = format_seconds_to_time(entry['start'] + entry['duration'])
        embed_url = get_embedded_youtube_link(video_id, entry['start'])
        screenshot_path = entry['img_src']
        line = {
            "start_time": start_time,
            "end_time": end_time,
            "text": entry['text'],
            "embed_url": embed_url,
            "screenshot_path": screenshot_path
        }
        formatted_transcript.append(line)
        screenshot_paths.append(screenshot_path)

    html_content = format_transcript_to_html(formatted_transcript)
    print("=====html_content=====")
    print(html_content)
    print("=====html_content=====")

    # 基于逐字稿生成其他所需的输出
    questions = generate_questions(transcript)
    df_summarise = generate_df_summarise(transcript)

    # 确保返回与 UI 组件预期匹配的输出
    return questions[0] if len(questions) > 0 else "", \
            questions[1] if len(questions) > 1 else "", \
            questions[2] if len(questions) > 2 else "", \
            df_summarise, \
            html_content,  \
            screenshot_paths, 

def format_transcript_to_html(formatted_transcript):
    html_content = ""
    for entry in formatted_transcript:
        html_content += f"<h3>{entry['start_time']} - {entry['end_time']}</h3>"
        html_content += f"<p>{entry['text']}</p>"
        html_content += f"<img src='{entry['screenshot_path']}' width='500px' />"
    return html_content

def get_embedded_youtube_link(video_id, start_time):
    embed_url = f"https://www.youtube.com/embed/{video_id}?start={start_time}&autoplay=1"
    return embed_url

def download_youtube_video(youtube_id, output_path=OUTPUT_PATH):
    # Construct the full YouTube URL
    youtube_url = f'https://www.youtube.com/watch?v={youtube_id}'

    # Create the output directory if it doesn't exist
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    # Download the video
    yt = YouTube(youtube_url)
    video_stream = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
    video_stream.download(output_path=output_path, filename=youtube_id+".mp4")

    print(f"Video downloaded successfully: {output_path}/{youtube_id}.mp4")


def screenshot_youtube_video(youtube_id, snapshot_sec):    
    video_path = f'{OUTPUT_PATH}/{youtube_id}.mp4'
    with VideoFileClip(video_path) as video:
        screenshot_path = f'{OUTPUT_PATH}/{file_name}'
        video.save_frame(screenshot_path, snapshot_sec)

def get_screenshot_from_video(video_link, start_time):
    # 实现从视频中提取帧的逻辑
    # 由于这需要服务器端处理,你可能需要一种方法来下载视频,
    # 并使用 ffmpeg 或类似工具提取特定时间点的帧
    # 这里只是一个示意性的函数实现
    screenshot_url = f"[逻辑以提取视频 {video_link}{start_time} 秒时的截图]"
    return screenshot_url

def process_web_link(link):
    # 抓取和解析网页内容
    response = requests.get(link)
    soup = BeautifulSoup(response.content, 'html.parser')
    return soup.get_text()


def generate_df_summarise(df_string):
    # 使用 OpenAI 生成基于上传数据的问题
    sys_content = "你是一個資料分析師,服務對象為老師,請精讀資料,使用 zh-TW"
    user_content = f"請根據 {df_string},大概描述這張表的欄位敘述、資料樣態與資料分析,告訴老師這張表的意義,以及可能的結論與對應方式"
    messages = [
        {"role": "system", "content": sys_content},
        {"role": "user", "content": user_content}
    ]
    print("=====messages=====")
    print(messages)
    print("=====messages=====")

    request_payload = {
        "model": "gpt-4-1106-preview",
        "messages": messages,
        "max_tokens": 4000,
    }

    response = client.chat.completions.create(**request_payload)
    df_summarise = response.choices[0].message.content.strip()
    print("=====df_summarise=====")
    print(df_summarise)
    print("=====df_summarise=====")

    return df_summarise

def generate_questions(df_string):
    # 使用 OpenAI 生成基于上传数据的问题

    sys_content = "你是一個資料分析師,user為老師,請精讀資料,並用既有資料為本質猜測用戶可能會問的問題,使用 zh-TW"
    user_content = f"請根據 {df_string} 生成三個問題,並用 JSON 格式返回 questions:[q1, q2, q3]"
    messages = [
        {"role": "system", "content": sys_content},
        {"role": "user", "content": user_content}
    ]
    response_format = { "type": "json_object" }

    print("=====messages=====")
    print(messages)
    print("=====messages=====")


    request_payload = {
        "model": "gpt-4-1106-preview",
        "messages": messages,
        "max_tokens": 4000,
        "response_format": response_format
    }

    response = client.chat.completions.create(**request_payload)
    questions = json.loads(response.choices[0].message.content)["questions"]
    print("=====json_response=====")
    print(questions)
    print("=====json_response=====")

    return questions

def send_question(question, df_string_output, chat_history):
    # 当问题按钮被点击时调用此函数
    return respond(question, df_string_output, chat_history)

def respond(user_message, df_string_output, chat_history):
    print("=== 變數:user_message ===")
    print(user_message)
    print("=== 變數:chat_history ===")
    print(chat_history)

    sys_content = f"你是一個資料分析師,請用 {df_string_output} 為資料進行對話,使用 zh-TW"
    messages = [
        {"role": "system", "content": sys_content},
        {"role": "user", "content": user_message}
    ]

    print("=====messages=====")
    print(messages)
    print("=====messages=====")


    request_payload = {
        "model": "gpt-4-1106-preview",
        "messages": messages,
        "max_tokens": 4000  # 設定一個較大的值,可根據需要調整
    }

    response = client.chat.completions.create(**request_payload)
    print(response)
    
    response_text = response.choices[0].message.content.strip()

    # 更新聊天历史
    new_chat_history = (user_message, response_text)
    if chat_history is None:
        chat_history = [new_chat_history]
    else:
        chat_history.append(new_chat_history)

    # 返回聊天历史和空字符串清空输入框
    return "", chat_history

with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            file_upload = gr.File(label="Upload your CSV or Word file")
            youtube_link = gr.Textbox(label="Enter YouTube Link")
            web_link = gr.Textbox(label="Enter Web Page Link")
            chatbot = gr.Chatbot()
            msg = gr.Textbox(label="Message")
            send_button = gr.Button("Send")

        with gr.Column():
            with gr.Tab("YouTube Transcript and Video"):
                transcript_html = gr.HTML(label="YouTube Transcript and Video")
            with gr.Tab("images"):
                gallery = gr.Gallery(label="截图")
            
            with gr.Tab("資料本文"):
                df_string_output = gr.Textbox()
            with gr.Tab("資料摘要"):
                gr.Markdown("## 這是什麼樣的資料?")
                df_summarise = gr.Textbox(container=True, show_copy_button=True, label="資料本文", lines=40)    
            with gr.Tab("常用問題"):
                gr.Markdown("## 常用問題")
                btn_1 = gr.Button()
                btn_2 = gr.Button()
                btn_3 = gr.Button()

    send_button.click(
        respond, 
        inputs=[msg, df_string_output, chatbot], 
        outputs=[msg, chatbot]
    )
    # 连接按钮点击事件
    btn_1.click(respond, inputs=[btn_1, df_string_output, chatbot], outputs=[msg, chatbot])
    btn_2.click(respond, inputs=[btn_2, df_string_output, chatbot], outputs=[msg, chatbot])
    btn_3.click(respond, inputs=[btn_3, df_string_output, chatbot], outputs=[msg, chatbot])


    # file_upload.change(process_file, inputs=file_upload, outputs=df_string_output)
    file_upload.change(process_file, inputs=file_upload, outputs=[btn_1, btn_2, btn_3, df_summarise, df_string_output])

    # 当输入 YouTube 链接时触发
    youtube_link.change(process_youtube_link, inputs=youtube_link, outputs=[btn_1, btn_2, btn_3, df_summarise, transcript_html, gallery])

    # 当输入网页链接时触发
    web_link.change(process_web_link, inputs=web_link, outputs=[btn_1, btn_2, btn_3, df_summarise, df_string_output])

demo.launch(allowed_paths=["videos"])