import pandas as pd
from literature.models.Literature import Literature, Keyword
from literature.models.Institution import Institution
from literature.models.Author import Author
from literature.models.GlobalTables import Language, LiteratureStatusType, LiteratureType, JournalPublicationCycleType, InstitutionType, KeywordType
from literature.models.Publications import Journal, Publication
from control.models.User import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from django.db import transaction
from datetime import datetime
from tqdm import tqdm
import re, glob

language_map = {
    "AZER": "Azerbaijani",
    "BULG": "Bulgarian",
    "CHIN": "Chinese",
    "CZEC": "Czech",
    "DANH": "Danish",
    "DUTH": "Dutch",
    "ENGL": "English",
    "FREN": "French",
    "GERM": "German",
    "HUNG": "Hungarian",
    "ITAL": "Italian",
    "JAPN": "Japanese",
    "KORE": "Korean",
    "POLH": "Polish",
    "PORT": "Portuguese",
    "ROM":  "Romanian",
    "ROMN": "Romanian",
    "ROUM": "Romanian",
    "RUSS": "Russian",
    "SLOE": "Slovak",
    "SLWN": "Slovene",  # 无对应
    "SPAN": "Spanish",
    "SRCR": "Serbian",
    "SWED": "Swedish",
    "UKRN": "Ukrainian",
    "ZZ":   "unknown",
    "ar": "Arabic",
    "cs": "Czech",
    "de": "German",
    "en": "English",
    "es": "Spanish",
    "fr": "French",
    "hu": "Hungarian",
    "it": "Italian",
    "ja": "Japanese",
    "nb": "Norwegian Bokmål",
    "nl": "Dutch",
    "no": "Norwegian",
    "pl": "Polish",
    "ru": "Russian",
    "sr": "Serbian",
    "tr": "Turkish",
    "unknow": "unknow"
}


def init_literature():
    print("开始读取文献...")
    try:
        matched_files = glob.glob("../final_basic_tables/A075. PCdata基表-3-参考文献表*.xlsx")
        latest_file = max(matched_files, key=lambda x: x.split("-")[-1]) if matched_files else None
        sheet_name = "文献总表"
        df = pd.read_excel(latest_file, sheet_name=sheet_name, header=0, skiprows=[1], engine='openpyxl').fillna('')
    except Exception as e:
        print(f"Error reading the Excel file: {e}")
        exit(1)
    # csv_file_path = "../final_basic_tables/A075. PCdata基表-3-参考文献表（20250604R1）-1版.csv"
    # # df = pd.read_csv(csv_file_path, dtype=str, nrows=2000).fillna('')
    # df = pd.read_csv(csv_file_path, dtype=str).fillna('')

    print("文献读取完毕")
    print("开始文献处理")
    user = User.objects.get(user_account="superadmin")
    pub_cycle = JournalPublicationCycleType.objects.filter(name='unknown')[0]
    status = LiteratureStatusType.objects.filter(name='Available')[0]
    inst_type = InstitutionType.objects.filter(name='unknown')[0]
    keyword_type_entity = KeywordType.objects.filter(name='Keyword')[0]
    liter_list = []
    author_list = []
    keyword_list = []
    # 遍历每一行
    for index, row in tqdm(df.iterrows(), total=len(df)):

        # 将每一行转换为字典形式，便于后续操作
        # if index > 10:
        #     break
        if index >= 0:
            row_data = row.to_dict()

            title = row_data['sTitle'] if row_data['sTitle'] else "  "
            if title is not None:
                author_ids = []
                if row_data['sAuthorList']:
                    # 统一分隔符处理
                    authors = re.split(r'[;,]', row_data['sAuthorList'].replace(' ', ''))
                    authors = [a for a in authors if a]
                    
                    for author_name in authors:
                        author_name = author_name.strip().replace(' ', '')
                        if len(author_name) <= 128 and len(author_name) > 0:
                            author, _ = Author.objects.get_or_create(
                                name=author_name,
                                defaults={'creator': user, 'last_editor': user}
                            )
                            author_ids.append(author.uuid)
                
                keyword_ids = []
                if row_data['sKeywordList']:
                    # 统一分隔符处理
                    keywords = re.split(r'[;,]', row_data['sKeywordList'].strip())
                    keywords = [a for a in keywords if a]
                    
                    for keyword_name in keywords:
                        if len(keyword_name) <= 400:
                            keyword, _ = Keyword.objects.get_or_create(
                                k=keyword_name,
                                v=keyword_name,
                                kt=keyword_type_entity,
                                is_reviewing=True,
                                defaults={'creator': user, 'last_editor': user, 'reviewer': user}
                            )
                            keyword_ids.append(keyword.uuid)
                #语言
                if row_data['eLanguage'] == "":
                    language = Language.objects.get(name="unknown")
                else:
                    try:
                        language = Language.objects.get(name=row_data['eLanguage'])
                    except ObjectDoesNotExist:
                        language = language_map.get(row_data['eLanguage'], "unknown")
                        # 如果没有找到，则使用 name 为 'unknown' 的 Language 实例
                        language = Language.objects.get(name=language)
                # 引文类型
                try:
                    liter_type = LiteratureType.objects.get(name=row_data['eCitationType'])
                except ObjectDoesNotExist:
                    liter_type = LiteratureType.objects.filter(name='Unspecified')[0]

                # 部门数据
                # 出版物信息
                # 
                comment = ""
                if not pd.isna(row_data['sJCODEN']) and row_data['sJCODEN'] != "":
                    comment += f"coden: {row_data['sJCODEN']}\n"
                # 无类型代码的对应关系
                if not pd.isna(row_data['DETHERM_ID']) and row_data['DETHERM_ID'] != "":
                    comment += f"DocID: {row_data['DETHERM_ID']}\n"
                if not pd.isna(row_data['DIPPR_ID']) and row_data['DIPPR_ID'] != "":
                    comment += f"RefID: {row_data['DIPPR_ID']}\n"
                if not pd.isna(row_data['DETHERM_Exist']) and row_data['DETHERM_Exist'] != "":
                    comment += f"collection: {row_data['DETHERM_Exist']}\n"
                if not pd.isna(row_data['sCorporateSource']) and row_data['sCorporateSource'] != "":
                    comment += f"corporate-source: {row_data['sCorporateSource']}\n"
                if not pd.isna(row_data['sBEditior']) and row_data['sBEditior'] != "":
                    comment += f"editor: {row_data['sBEditior']}\n"
                # if not pd.isna(row_data['monograph-title']) and row_data['monograph-title'] != "": # 删除?
                #     comment += f"monograph-title: {row_data['monograph-title']}\n"

                cit_name = None if pd.isna(
                    row_data['sCollection']) or row_data['sCollection'] == "" else row_data['sCollection']
                edition_number = None if pd.isna(
                    row_data['sBEdition']) or row_data['sBEdition'] == "" else row_data['sBEdition']
                liter_code = None if pd.isna(
                    row_data['nPCdataCitID']) or row_data['nPCdataCitID'] == "" else row_data['nPCdataCitID']
                doi = None if pd.isna(
                    row_data['sDOI']) or row_data['sDOI'] == "" else row_data['sDOI']
                abstract = None if pd.isna(
                    row_data['sAbstract']) or row_data['sAbstract'] == "" else row_data['sAbstract']
                issue = None if pd.isna(
                    row_data['sJIssue']) or row_data['sJIssue'] == "" else row_data['sJIssue']
                volume = None if pd.isna(
                    row_data['sVol']) or row_data['sVol'] == "" else row_data['sVol']
                cit_id = None if pd.isna(
                    row_data['sOriginRefID']) or row_data['sOriginRefID'] == "" else row_data['sOriginRefID']
                year = None if pd.isna(
                    row_data['yrPubYr']) or row_data['yrPubYr'] == "" else row_data['yrPubYr']
                if type(year) != int:
                    if type(year) == str and year.isdigit():
                        year = int(year)
                    else:
                        year = None
                date = None if year is None or year <= 0 else datetime(
                    year, 1, 1).date()
                # 网络链接
                net_src = None if pd.isna(
                    row_data['urlCit']) or row_data['urlCit'] == "" else row_data['urlCit']
                
                # 出版社类型
                if row_data['sPubName'] is None and row_data['sJISSN'] is None:
                    pub_type = "unknown"
                    journal = None
                else:
                    pub_type = "Journal"
                    institutions = []
                    if not pd.isna(row_data['sBPublisher']):
                        ins, _ = Institution.objects.get_or_create(
                            name=row_data['sBPublisher'], inst_type=inst_type, creator=user, last_editor=user)
                        institutions.append(ins.uuid)
                    journal, created = Journal.objects.get_or_create(issn=row_data['sJISSN'],
                                                                     publication_cycle=pub_cycle,
                                                                     creator=user, last_editor=user)
                    journal.institutions.set(institutions)
                    journal.save()
                        
                if pd.isna(row_data['sPage']) or row_data['sPage'] == "":
                    first_page = None
                    last_page = None
                else:
                    sPage = str(row_data['sPage']).strip()  # 确保为字符串并去除两端空格
                    # 使用正则表达式分割，匹配任意一个分隔符（-、空格、;）
                    pages = re.split(r'[- ;]', sPage, maxsplit=1)  # maxsplit=1表示只分割一次
                    
                    if len(pages) > 1:
                        first_page = pages[0].strip() or None  # 空字符串转为None
                        last_page = pages[1].strip() or None
                    else:
                        first_page = sPage or None
                        last_page = sPage or None
                liter_list.append(
                    Literature(
                            title=title, 
                            zh_title=title, 
                            cit_name=cit_name,
                            liter_code=liter_code, 
                            abstract=abstract, 
                            DOI=doi, 
                            edition_number = edition_number,
                            language=language,
                            net_src = net_src,
                            cit_id = cit_id,
                            comment=comment, 
                            pub_type=pub_type, 
                            pub_id=journal, 
                            issue=issue,
                            volume=volume, 
                            first_page=first_page, 
                            last_page=last_page, 
                            date=date,
                            creator=user, 
                            last_editor=user, 
                            status=status, 
                            liter_type=liter_type
                    )
                )
                author_list.append(author_ids)
                keyword_list.append(keyword_ids)
    print("文献处理完毕")
    print("开始录入文献")
    bulk_create_literatures(liter_list, author_list, keyword_list)


def bulk_create_literatures(liter_list, author_list, keyword_list):

    with transaction.atomic():
        # 1. 批量创建文献主体
        print("正在批量入库文献...")
        created_literatures = Literature.objects.bulk_create(
            tqdm(liter_list, desc="文献入库进度")
        )
        
        # 2. 处理作者关联
        author_through = Literature.authors.through
        author_relations = []
        author_existing = set()
        
        # 3. 处理关键词关联
        keyword_through = Literature.keywords.through
        keyword_relations = []
        keyword_existing = set()
        
        # 带进度处理每篇文献
        for lit_idx, literature in enumerate(tqdm(
            created_literatures,
            desc="构建关联关系",
            total=len(created_literatures)
        )):
            # 处理作者
            for author_id in author_list[lit_idx]:
                pair = (literature.uuid, author_id)
                if pair not in author_existing:
                    author_relations.append(
                        author_through(
                            literature_id=literature.uuid,
                            author_id=author_id
                        )
                    )
                    author_existing.add(pair)
            
            # 处理关键词
            for keyword_id in keyword_list[lit_idx]:
                pair = (literature.uuid, keyword_id)
                if pair not in keyword_existing:
                    keyword_relations.append(
                        keyword_through(
                            literature_id=literature.uuid,
                            keyword_id=keyword_id
                        )
                    )
                    keyword_existing.add(pair)
                    
            if len(author_relations) >= 1000:
                author_through.objects.bulk_create(author_relations, ignore_conflicts=True)
                author_relations = []
            if len(keyword_relations) >= 1000:
                keyword_through.objects.bulk_create(keyword_relations, ignore_conflicts=True)
                keyword_relations = []
        
        # 提交剩余数据
        author_through.objects.bulk_create(author_relations, ignore_conflicts=True)
        keyword_through.objects.bulk_create(keyword_relations, ignore_conflicts=True)
    
    print(f"文献录入完成，创建文献：{len(created_literatures)}篇")
    return (len(created_literatures))