# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import openpyxl
import sqlite3
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from collections import Counter
from nltk.corpus import stopwords
import gensim
from gensim import corpora
import pyLDAvis.gensim_models
import pyLDAvis
import nltk

def scrape_github_trending():
    # 访问 GitHub Trending 页面
    print("正在访问 GitHub Trending 页面...")
    url = 'https://github.com/trending'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, 'html.parser')

    # 查找所有的仓库元素
    repos = soup.find_all('article', class_='Box-row')
    data = []

    for repo in repos:
        # 获取仓库名称、作者和链接
        h2_tag = repo.find('h2', class_='h3 lh-condensed')
        if h2_tag and h2_tag.a:
            full_name = h2_tag.a.get_text(strip=True).split('/')
            author = full_name[0].strip()
            repo_name = full_name[1].strip()
            repo_link = 'https://github.com' + h2_tag.a['href']
        else:
            author = '未知'
            repo_name = '未知'
            repo_link = '无链接'

        # 获取 stars
        stars_tag = repo.select_one('a[href*="/stargazers"]')
        stars = stars_tag.get_text(strip=True).replace(',', '') if stars_tag else '0'

        # 获取 forks
        forks_tag = repo.select_one('a[href*="/network/members"]')
        forks = forks_tag.get_text(strip=True).replace(',', '') if forks_tag else '0'

        # 获取描述
        description_tag = repo.p
        description = description_tag.get_text(strip=True) if description_tag else '无描述'

        # 获取语言（如果存在）
        language_tag = repo.find('span', itemprop='programmingLanguage')
        language = language_tag.get_text(strip=True) if language_tag else '未指定'

        # 获取今日 Star 数（如果存在）
        today_stars_tag = repo.find('span', class_='d-inline-block float-sm-right')
        if today_stars_tag:
            today_stars_text = today_stars_tag.get_text(strip=True)
            today_stars = today_stars_text.split()[0].replace(',', '') if 'stars today' in today_stars_text else '0'
        else:
            today_stars = '0'
        data.append([repo_name, author, stars, today_stars, forks, language, description, repo_link])

    return data


def save_to_excel(data):
    # 创建一个新的 Excel 工作簿
    wb = openpyxl.Workbook()
    sheet = wb.active
    sheet.title = "GitHub Trending"
    sheet.append(["仓库名", "作者", "Star 数", "今日 Star 数", "Fork 数量", "语言类型", "描述", "链接"])

    for entry in data:
        sheet.append(entry)

    # 保存工作簿
    wb.save('db/GitHub_Trending.xlsx')
    print("数据已保存到 GitHub_Trending.xlsx")


def read_from_excel_and_save_to_sqlite():
    # 打开 Excel 文件并读取数据
    print("正在读取 Excel 文件...")
    wb = openpyxl.load_workbook('db/GitHub_Trending.xlsx')
    sheet = wb.active

    data = []
    # 跳过标题行并读取其余行
    for row in sheet.iter_rows(min_row=2, values_only=True):
        data.append(row)

    # 连接到 SQLite 数据库（如果不存在将会创建）
    conn = sqlite3.connect('db/b23015129.db')
    cursor = conn.cursor()

    print("正在保存数据到 SQLite 数据库...")

    # 创建表（如果不存在）
    cursor.execute('''CREATE TABLE IF NOT EXISTS trending_repos (
                        repo_name TEXT,
                        author TEXT,
                        stars INTEGER,
                        today_stars INTEGER,
                        forks INTEGER,
                        language TEXT,
                        description TEXT,
                        repo_link TEXT
                    )''')

    # 插入数据
    cursor.executemany('INSERT INTO trending_repos VALUES (?, ?, ?, ?, ?, ?, ?, ?)', data)

    # 提交更改并关闭连接
    conn.commit()
    conn.close()
    print("数据已从 Excel 文件导入并保存到 SQLite 数据库中")


def nlp_analysis():
    import nltk
    nltk.download('stopwords')
    from nltk.corpus import stopwords
    from collections import Counter
    import jieba
    from wordcloud import WordCloud
    import matplotlib.pyplot as plt
    from gensim import corpora
    import gensim
    import pyLDAvis.gensim_models
    import pyLDAvis

    # 连接到 SQLite 数据库并读取描述字段
    conn = sqlite3.connect('db/b23015129.db')
    cursor = conn.cursor()
    cursor.execute('SELECT description FROM trending_repos')
    descriptions = [row[0] for row in cursor.fetchall()]
    conn.close()

    # 文本预处理
    texts = []
    stop_words = set(stopwords.words('english'))
    for description in descriptions:
        # 使用 jieba 分词（适用于中文）
        tokens = jieba.lcut(description)
        # 去除停用词和标点符号
        tokens = [token for token in tokens if token.isalpha() and token.lower() not in stop_words]
        texts.append(tokens)

    # 检查是否有有效的词语
    if not any(texts):
        print("没有有效的文本数据用于分析。")
        return

    # 词频统计
    all_tokens = [token for text in texts for token in text]
    word_freq = Counter(all_tokens)

    # 生成词云
    try:
        wordcloud = WordCloud(font_path='static/PingFang Light.ttf', width=800, height=400).generate_from_frequencies(word_freq)
    except Exception as e:
        print(f"生成词云时出错：{e}")
        return

    # 显示词云
    plt.figure(figsize=(15, 7.5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    # 保存图片到文件
    plt.savefig('b23015129Wordcloud.png', format='png', dpi=300, bbox_inches='tight')

    plt.show()

    # 构建词典和语料库
    dictionary = corpora.Dictionary(texts)
    corpus = [dictionary.doc2bow(text) for text in texts]

    # 训练 LDA 模型
    lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
                                                id2word=dictionary,
                                                num_topics=5,
                                                random_state=100,
                                                update_every=1,
                                                chunksize=10,
                                                passes=10,
                                                alpha='auto',
                                                per_word_topics=True)

    # 打印每个主题的词
    for idx, topic in lda_model.print_topics(-1):
        print("主题: {} \n单词: {}".format(idx, topic))
        print()

    # 使用 pyLDAvis 可视化
    vis = pyLDAvis.gensim_models.prepare(lda_model, corpus, dictionary)
    pyLDAvis.show(vis)

def main():
    data = scrape_github_trending()
    save_to_excel(data)
    read_from_excel_and_save_to_sqlite()
    nlp_analysis()


if __name__ == "__main__":
    main()