import os
import ftplib
from datetime import datetime
import subprocess
import gzip
import shutil
import xml.etree.ElementTree as ET
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
import tqdm.auto as tqdm
import os
import json
import glob
import re,json
import datetime

# 自动同步服务器数据
def sync_pubmed_data(workdir = '/mnt/nas/health_data/data',OPEN_ACCESS_DIR = 'pubmed/updatefiles'):
    '''
    利用wget自动下载数据，根据OPEN_ACCESS_DIR对ftp的结构目录进行映射，结果存放在workdir/OPEN_ACCESS_DIR的目录下面
    '''
    # FTP服务器地址和凭据
    FTP_HOST = 'ftp.ncbi.nlm.nih.gov'
    FTP_USER = 'anonymous'
    FTP_PASS = ''
    # 开放访问子集目录路径
    # OPEN_ACCESS_DIR = 'pubmed/baseline'
    
    os.chdir(workdir)
    xml_folder = f'{workdir}/ftp.ncbi.nlm.nih.gov/pubmed/xml'
    download_folder = f'{workdir}/{FTP_HOST}/{OPEN_ACCESS_DIR}'


    # 连接到FTP服务器
    ftp = ftplib.FTP(FTP_HOST, FTP_USER, FTP_PASS)
    ftp.cwd(OPEN_ACCESS_DIR)

    # 获取文件列表
    file_list = sorted(ftp.nlst())[::-1]
    # 关闭FTP连接
    ftp.quit() 
    # 遍历文件列表并下载
    for filename in file_list:
        local_path = f'{FTP_HOST}/{OPEN_ACCESS_DIR}/{filename}'
        if filename.endswith('.html'):
            continue
        # 检查本地是否已存在该文件
        if os.path.exists(local_path):
            continue
        else:
            # print(f'Downloading {local_path}...')
            os.system(f'wget https://{FTP_HOST}/{OPEN_ACCESS_DIR}/{filename} -p')
    # 切换到下载路径
    os.chdir(download_folder)
    # MD5检验
    for md5file in os.listdir(download_folder):
        if md5file.endswith('.md5'):
            flag = md5file + ".flag"
            # 检查标志文件是否存在，如果存在则跳过该文件
            if os.path.isfile(flag):
                continue
            filename = os.path.splitext(md5file)[0]
            if os.path.isfile(filename):
                # 运行md5sum -c命令
                result = subprocess.run(["md5sum", "-c", md5file], stdout=subprocess.PIPE, text=True)
                
                # 检查校验结果
                if "FAILED" in result.stdout:
                    print(f"校验失败，删除文件：{filename}")
                    os.remove(filename)
                else:
                    print(f"校验成功：{filename}")
                    # 创建标志文件
                    open(flag, 'a').close()
            else:
                print(f"文件不存在，跳过：{filename}")
            
    # 解压缩文件
    for filename in file_list:
        local_path = f'{download_folder}/{filename}'   
        unzip_path = local_path[:-3] #未压缩文件的路径
        if local_path.endswith('.gz') and not os.path.exists(unzip_path):
            print(f'unzip {local_path}')
            os.system(f'gunzip -c {local_path} > {unzip_path}') # gzip解压的结果有问题，因此换成gunzip
            os.system(f'ln {unzip_path} {xml_folder}/{local_path}')

# 抽取xml中的数据并转化成相应的格式
## 识别NCT编号
def find_nct_numbers(text):
    pattern = r'NCT\d{8}'
    matches = re.findall(pattern,text)
    return matches

# 抽取文章信息
def extract_article_info(article_element):
    article_info = {}
    # 提取Article ID
    article_id_list = article_element.find('.//ArticleIdList')
    if article_id_list is not None:
        article_info['PMC'] = article_id_list.find("ArticleId[@IdType='pmc']").text if article_id_list.find("ArticleId[@IdType='pmc']") is not None else None
        article_info['pubmed'] = article_id_list.find("ArticleId[@IdType='pubmed']").text if article_id_list.find("ArticleId[@IdType='pubmed']") is not None else None
        article_info['doi'] = article_id_list.find("ArticleId[@IdType='doi']").text if article_id_list.find("ArticleId[@IdType='doi']") is not None else None
    if article_info.get('PMC',None) is None:
        return article_info
    # 提取文章标题和摘要
    title_element = article_element.find('.//ArticleTitle')
    if title_element is not None:
        article_info['title'] =  title_element.text
    abstract_element = article_element.find('.//Abstract')
    if abstract_element is not None:
        abstract_text = ET.tostring(abstract_element, method='text', encoding='unicode')
        article_info['abstract'] = abstract_text.strip()  # 去除字符串两侧的空白字符
        article_info['NCT_number'] = ','.join(find_nct_numbers(article_info['abstract']))
    # 提取文章类型
    ui_values = [elem.get('UI') for elem in article_element.findall('.//PublicationType')]
    article_info['PublicationType'] = ui_values
    # 提取发布时间信息
    pub_date_element = article_element.find('.//PubDate')
    if pub_date_element is not None:
        year = pub_date_element.find('Year').text if pub_date_element.find('Year') is not None else '0000'
        month = pub_date_element.find('Month').text if pub_date_element.find('Month') is not None else '01'
        day = pub_date_element.find('Day').text if pub_date_element.find('Day') is not None else '01'
        article_info['date'] =f'{year}-{month}-{day}'
    return article_info

## 过滤和抽取文章
def parse_xml(xml_file):
    articles = []
    tree = ET.parse(xml_file)
    root = tree.getroot()
    for article_element in root.findall('PubmedArticle'):
        article_info = extract_article_info(article_element)
        # 过滤非PMC文章
        if article_info.get('PMC',None) is None:
            continue
        # 过滤不含NCT的文章
        if article_info.get('NCT_number',None) is None:
            continue
        # 过滤review类型文章
        if 'D016454' in article_info.get('PublicationType',[]):
            continue
        if article_info.get('title',None) == None and article_info.get('abstract',None) == None:
            continue
        articles.append(article_info)
    return articles

## 将每个文章保存为PubTator
def save_to_PubTator (articles, output_folder,resume=False):
    for article in articles:
        output_file = os.path.join(output_folder,article['pubmed']+'.PubTator')
        if os.path.exists(output_file) and resume:
            continue
        with open(output_file, 'w',encoding='utf-8') as f:
            title =  article.get('title',None)
            if title == None:
                title = ''
            abstract = article.get('abstract',None)
            if abstract == None:
                abstract = ''
            f.write(article['pubmed']+'|t|' + title.replace('\n',' ')+'\n')
            f.write(article['pubmed']+'|a|'+ article.get('abstract','').replace('\n',' ')+'\n\n')
## 将所有文章保存为jsonl文件
def save_to_jsonl(articles, output_file):
    with open(output_file, 'w') as f:
        for article in articles:
            json.dump(article, f)
            f.write('\n')
##处理xml文件
def process_file(xml_file,to='Pubtator'):
    # date_str = datetime.date.today().strftime("%Y%m%d")
    # 创建标志文件的文件名
    flag_file = f"{os.path.splitext(xml_file)[0]}.{to}.flag"
    output_folder = xml_file[:-4].replace('xml','PubTator')
    if os.path.exists(flag_file):
        print(xml_file,output_folder)
        return
    try:
        os.makedirs(output_folder,exist_ok=True)
        articles = parse_xml(xml_file)
        if to.lower() == 'pubtator':
            save_to_PubTator(articles, output_folder)
        elif to.lower() == 'jsonl':
            save_to_jsonl(articles, output_folder)
        print(xml_file,output_folder)
        open(flag_file,'a').close()
    except Exception as e:
        print(e)
        print('failed:',xml_file)
        # os.remove(xml_file)
        return