import pandas as pd
import matplotlib.pyplot as plt
import os
import re
from collections import Counter
import jieba
from pyecharts import options as opts
from pyecharts.charts import Bar, Pie, Line, WordCloud
from pyecharts.globals import ThemeType
from bs4 import BeautifulSoup

# 设置matplotlib中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class BibliometricAnalysis:
    def __init__(self, file_path):
        # 读取HTML格式的Excel文件
        with open(file_path, 'r', encoding='utf-8') as f:
            html_content = f.read()
        
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 提取表格数据
        table = soup.find('table')
        rows = table.find_all('tr')
        
        # 提取表头
        headers = [td.text.split('-')[1] if '-' in td.text else td.text for td in rows[0].find_all('td')]
        
        # 提取数据
        data = []
        for row in rows[1:]:
            data.append([td.text for td in row.find_all('td')])
            
        # 创建DataFrame
        self.df = pd.DataFrame(data, columns=headers)
        
        # 创建输出目录
        self.output_dir = '文献计量分析/results'
        os.makedirs(self.output_dir, exist_ok=True)
        
    def clean_data(self):
        """清理数据"""
        # 删除空行
        self.df = self.df.dropna(how='all')
        
    def analyze_year_distribution(self):
        """分析文献年份分布"""
        # 使用Year列或从发表时间提取年份
        years = []
        if '年' in self.df.columns:
            years = [int(year) for year in self.df['年'].dropna()]
        else:
            for time in self.df['发表时间']:
                match = re.search(r'(\d{4})', str(time))
                if match:
                    years.append(int(match.group(1)))
        
        # 统计年份分布
        year_count = Counter(years)
        # 转换为DataFrame并重命名列
        year_df = pd.DataFrame.from_dict(year_count, orient='index', columns=['count']).sort_index()
        
        # 绘制年份分布图
        line = (
            Line(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
            .add_xaxis(list(map(str, year_df.index)))
            .add_yaxis("发文量", year_df['count'].tolist())
            .set_global_opts(
                title_opts=opts.TitleOpts(title="文献年份分布"),
                xaxis_opts=opts.AxisOpts(name="年份"),
                yaxis_opts=opts.AxisOpts(name="发文量")
            )
        )
        line.render(f"{self.output_dir}/year_distribution.html")
        
    def analyze_author_distribution(self):
        """分析作者分布"""
        # 统计作者出现次数
        authors = []
        for author in self.df['作者']:
            if isinstance(author, str):
                # 移除空白字符并分割
                authors.extend([a.strip() for a in author.split(';') if a.strip()])
        
        author_count = Counter(authors).most_common(20)
        
        # 绘制作者分布图
        bar = (
            Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
            .add_xaxis([item[0] for item in author_count])
            .add_yaxis("发文量", [item[1] for item in author_count])
            .set_global_opts(
                title_opts=opts.TitleOpts(title="高产作者统计(Top20)"),
                xaxis_opts=opts.AxisOpts(name="作者", axislabel_opts=opts.LabelOpts(rotate=45)),
                yaxis_opts=opts.AxisOpts(name="发文量")
            )
        )
        bar.render(f"{self.output_dir}/author_distribution.html")
        
    def analyze_institution_distribution(self):
        """分析机构分布"""
        # 统计机构出现次数
        institutions = []
        for inst in self.df['单位']:  # 使用'单位'列名
            if isinstance(inst, str):
                # 移除空白字符并分割
                institutions.extend([i.strip() for i in inst.split(';') if i.strip()])
        
        inst_count = Counter(institutions).most_common(20)
        
        # 绘制机构分布图
        pie = (
            Pie(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
            .add(
                "",
                [list(z) for z in inst_count],
                radius=["30%", "75%"],
            )
            .set_global_opts(
                title_opts=opts.TitleOpts(title="研究机构分布(Top20)"),
                legend_opts=opts.LegendOpts(orient="vertical", pos_top="15%", pos_right="2%")
            )
        )
        pie.render(f"{self.output_dir}/institution_distribution.html")
        
    def analyze_keywords(self):
        """分析关键词"""
        # 使用关键词列和标题
        text = []
        
        # 添加关键词
        if '关键词' in self.df.columns:
            keywords = ' '.join(self.df['关键词'].dropna().astype(str))
            text.append(keywords)
            
        # 添加标题
        titles = ' '.join(self.df['题名'].dropna().astype(str))
        text.append(titles)
        
        # 合并文本
        text = ' '.join(text)
        
        # 使用jieba分词
        words = jieba.cut(text)
        # 过滤停用词
        stop_words = {'的', '了', '和', '是', '在', '对', '等', '与', '及', '中', '为', '从', '到', 
                     '把', '让', '给', '但', '但是', '所以', '因此', '因为', '由于', '这个', '那个',
                     '这些', '那些', '进行', '作为', '通过', '以及', '或者', '可以', '这样', '那样',
                     '什么', '如何', '怎样', '一个', '一种', '一些', '有关', '关于', '本文', '研究'}
        words = [w for w in words if len(w) > 1 and w not in stop_words]
        
        # 统计词频
        word_count = Counter(words).most_common(100)
        
        # 生成词云
        wordcloud = (
            WordCloud()
            .add("", word_count, word_size_range=[20, 100])
            .set_global_opts(title_opts=opts.TitleOpts(title="关键词词云"))
        )
        wordcloud.render(f"{self.output_dir}/keyword_wordcloud.html")
        
    def generate_report(self):
        """生成分析报告"""
        report = []
        report.append("文献计量分析报告")
        report.append("=" * 50)
        
        # 基本统计信息
        report.append("\n1. 基本统计信息")
        report.append(f"总文献数量: {len(self.df)}")
        
        # 统计作者数量
        unique_authors = set()
        for authors in self.df['作者'].dropna():
            unique_authors.update([a.strip() for a in authors.split(';') if a.strip()])
        report.append(f"作者数量: {len(unique_authors)}")
        
        # 统计机构数量
        unique_institutions = set()
        for inst in self.df['单位'].dropna():
            unique_institutions.update([i.strip() for i in inst.split(';') if i.strip()])
        report.append(f"机构数量: {len(unique_institutions)}")
        
        # 保存报告
        with open(f"{self.output_dir}/analysis_report.txt", "w", encoding='utf-8') as f:
            f.write('\n'.join(report))
            
    def run_analysis(self):
        """运行所有分析"""
        print("开始文献计量分析...")
        self.clean_data()
        self.analyze_year_distribution()
        self.analyze_author_distribution()
        self.analyze_institution_distribution()
        self.analyze_keywords()
        self.generate_report()
        print(f"分析完成！结果保存在: {self.output_dir}")

if __name__ == "__main__":
    analyzer = BibliometricAnalysis('CNKI.xls')
    analyzer.run_analysis() 