import requests, urllib3
import time
import os
import zipfile
from openpyxl import Workbook
from datetime import datetime
from bs4 import BeautifulSoup
import yaml

with open('config.yml', 'r', encoding='utf-8') as f:
    config = yaml.safe_load(f)

with open('cookie_huiwu.txt', 'r', encoding='utf-8') as file:
    cookie_huiwu = file.read()

with open('cookie_oa.txt', 'r', encoding='utf-8') as file:
    cookie_oa = file.read()

def get_MtFile(json_data):

    # 原始路径
    origin_path = os.getcwd()
    # 创建文件夹
    folder_path = "文件"
    os.makedirs(folder_path, exist_ok=True)

    # TODO cnt控制读取的位置：只读取最先的15份
    # cnt = 0 
    for item in json_data[::-1]:
        # cnt = cnt + 1
        # if cnt == 15 : break

        # 解析日期字符串
        date_obj = datetime.strptime(item['FJoinTime'], "%Y-%m-%d %H:%M:%S")
        file_name_date = date_obj.strftime("%m%d") # 0509

        # 创建日期文件夹
        folder_path_date = folder_path + "/" + file_name_date
        os.makedirs(folder_path_date, exist_ok=True)
        
        # 创建正文文件夹
        # 处理文件名太长的问题 名字最多83个汉字
        file_path = folder_path + "/" + file_name_date + "/" + item['FName'][:83]
        os.makedirs(file_path, exist_ok=True)

        os.chdir(file_path)
        try:
            # 发送HTTP GET请求，并设置请求头
            response = requests.get(config['huiwu_url'] + "/getMtFile.html?mtrkey=" + item['rKey'], stream=True, headers=config['huiwu_headers'], verify=False)
            response.raise_for_status()  # 检查请求是否成功
            response_content_type = response.headers.get('Content-Type', '')

            if "application/octet-stream" in response_content_type :
                # 以二进制写入模式保存文件
                with open(item['FName'][:79] + ".pdf", 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        if chunk:  # 过滤掉保持连接的新块
                            f.write(chunk)
                print(f"{item['FName'][:79]} 已保存正文")
            else:
                print(f"{item['FName'][:79]} 无正文")

            #下载zip附件
            response = requests.get(config['huiwu_url'] + "/fileDownAll.html?rkey=" + item['rKey'], stream=True, headers=config['huiwu_headers'], verify=False)
            response.raise_for_status()  # 检查请求是否成功
            response_content_type = response.headers.get('Content-Type', '')
            if "application/octet-stream" in response_content_type :
                with open("全部附件.zip", 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        if chunk:  # 过滤掉保持连接的新块
                            f.write(chunk)
                print(f"{item['FName'][:79]} 已保存附件")
                #解压zip
                try:
                # 确保目标目录存在
                    os.makedirs("全部附件", exist_ok=True)
                    # 打开 ZIP 文件
                    with zipfile.ZipFile("全部附件.zip", 'r') as zip_ref:
                        # 解压所有文件到目标目录
                        zip_ref.extractall("全部附件")
                    os.remove("全部附件.zip")
                except zipfile.BadZipFile:
                    print("错误：文件不是有效的 ZIP 文件")
                except Exception as e:
                    print(f"解压文件时发生错误: {e}")
            else:
                print(f"{item['FName'][:79]} 无附件")
                
        except Exception as e:
            print(f"下载失败: {e}")
        # 返回原始路径
        os.chdir(origin_path)

def only_file_path(json_data):

    # 创建文件夹
    folder_path = "文件夹"
    os.makedirs(folder_path, exist_ok=True)

    for item in json_data[::-1]:

        # 解析日期字符串
        date_obj = datetime.strptime(item['FJoinTime'], "%Y-%m-%d %H:%M:%S")
        file_name_date = date_obj.strftime("%m%d") # 例：0509

        # 创建日期文件夹
        folder_path_date = folder_path + "/" + file_name_date # 例：文件夹/0509
        os.makedirs(folder_path_date, exist_ok=True)
        
        # 创建正文文件夹
        # 处理文件名太长的问题 名字最多83个汉字
        file_path = folder_path + "/" + file_name_date + "/" + item['FName'][:83] # 例：文件夹/0509/关于xxxxxx
        os.makedirs(file_path, exist_ok=True)

def write_xlxs(json_data, search_oa_switch='off'):
    # 创建一个新的Excel工作簿
    wb = Workbook()
    ws = wb.active
    
    # 写入表头
    # headers = ['FJoinTime', 'FCreateDept', 'FName']
    # ws.append(headers)

    # 初始化上一个日期
    previous_date = None

    # 写入数据
    for item in json_data[::-1]:

        # 解析日期字符串
        date_obj = datetime.strptime(item['FJoinTime'], "%Y-%m-%d %H:%M:%S")
        # 格式化日期为所需的格式
        # formatted_date = date_obj.strftime("%Y/%m/%d") # 2025/05/09
        formatted_date = f"{date_obj.year}/{date_obj.month}/{date_obj.day}" # 2025/5/9
    
        # 检查当前日期是否与上一个日期相同
        if formatted_date == previous_date:
            formatted_date = ""
        else:
            previous_date = formatted_date

        dept_mapping = {
            "市发展改革局": "市发改局",
            "市工业和信息化局": "市工信局",
            "市住房城乡建设局": "市住建局",
            "市机关事务管理局": "市机关事务管",
            "市人大常委会办公室": "市人大常委办",
            "市政府办公室": "市府办",
            "市应急管理局": "市应急局",
            "市市场监督管理局": "市市场监管局",
            "百千万指挥办": "市百千万指挥办",
            "市人力资源社会保障局": "市人社局",
            "市卫生健康局": "市卫健局",
            "市直机关工委": "市直工委"
        }
        # 直接替换 ***
        item['FCreateDept'] = dept_mapping.get(item['FCreateDept'], item['FCreateDept'])
        
        # 搜索oa
        if search_oa_switch == 'on':
            dep = search_in_oa(item['FName'])
            ws.append([formatted_date, item['FCreateDept'], item['FName'],'','', dep])
        else:
            ws.append([formatted_date, item['FCreateDept'], item['FName']])
        print(item['FName'])

    # 保存Excel文件
    wb.save('已导出的数据.xlsx')
    # print("数据已成功写入 已导出的数据.xlsx 文件")

def get_json_data(num1, num2):
    # 1：0-14 
    # 2：15-29
    # 3：30-44
    # n：(n-1)*15

    if num1 >= num2 :
        start = str((num1-1)*15)
        url = config['huiwu_url'] + "/getJson.html?&draw=1&start=" + start + "&length=15&page="+ str(num1) +"&limit=15&action=all&_="
    else:
        start = str((num1-1)*15)
        length = str((num2-num1+1)*15)
        url = config['huiwu_url'] + "/getJson.html?&draw=1&start=" + start + "&length=" + length + "&page="+ str(num1) +"&limit=" + length + "&action=all&_="
    # 发送GET请求（如果是POST请求，使用requests.post()）
    # print(url)
    response = requests.get(url + str(int(time.time() * 1000)), headers=config['huiwu_headers'], verify=False)
    # 检查请求是否成功（状态码200）
    # response.raise_for_status()
    # 解析JSON响应
    # print(response.text)
    if response.text == '请先登录系统 !':
        input('获取失败，请尝试更新会务系统的Cookie')
        exit()
    json_data = response.json()
    # 打印或处理JSON数据
    # print("获取到的JSON数据:")
    # print(json_data)
    # 提取所需字段并去掉空格
    extracted_data = []
    for item in json_data['data']:
        extracted_item = {
            'rKey': item.get('rKey'),
            'FJoinTime': item.get('FJoinTime'),
            'FCreateDept': item.get('FCreateDept').replace(" ", ""),
            'FName': item.get('FName')
        }
        extracted_data.append(extracted_item)
    # 打印提取的数据
    # print(extracted_data)
    return extracted_data

def search_in_oa(filename):
    # start 获取文件的value
    response = requests.get(config['oa_url']['search_url'] + filename, headers=config['oa_headers'])
    content = response.text
    if '你还没有登录，请先登录' in content:
        input('获取失败，请尝试更新oa的Cookie')
        exit()
    # 从html找出value
    soup = BeautifulSoup(content, 'html.parser')
    matches = []
    for input_tag in soup.find_all('input', {'name': 'ids[]'}):
        text = input_tag.get('value')
        matches.append(text)
    # end 获取文件的value

    if matches == []:
        # print("未找到文件")
        return
    
    # start 获取详情
    get_det_url = config['oa_url']['detail_url'] + matches[0]
    response = requests.get(get_det_url, headers=config['oa_headers'])
    soup = BeautifulSoup(response.text, 'html.parser')
    text = soup.find('div', {'id': 'form'}).text
    # 提取字符串
    departments = ['局办公室', '高新科', '产学研科', '法规科', '社农科', '成果科','情报所', '机关党委', '地震监测中心','拟请各科室','拟请各业务科室','局工会','局妇委会','局办公室（财务）','局办公室（人事）']
    dep = [d for d in departments if d in text] # ***
    dep_to_string = '、'.join(dep)
    dep_to_string = dep_to_string.replace("拟请", "")
    # end 获取详情
    return dep_to_string

if __name__ == "__main__":
    # 禁用SSH警告
    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

    # 替换cookie
    config['huiwu_headers']['Cookie'] = cookie_huiwu
    config['oa_headers']['Cookie'] = cookie_oa

    # 让用户可选读取的文件数目
    page = input("（1/2）请输入页码（如 1-8 或 6）：")
    if '-' in page:
        parts = page.split('-')  # 按 '-' 分割字符串
        num1 = int(parts[0])  # 第一个部分 '1' 转为整数
        num2 = int(parts[1])  # 第二个部分 '8' 转为整数
        # 获取目录内容
        json_data = get_json_data(num1, num2)
    else:
        num1 = int(page)
        json_data = get_json_data(num1, -1)

    # 选择功能
    function = int(input("（2/2）\n1 ----- 导出xlxs\n2 ----- 导出xlxs（包含科室）\n请选择功能序号："))
    # function = int(input("（2/2）\n1 ----- 导出xlxs和文件夹目录\n2 ----- 导出xlxs（包含科室）\n3 ----- 导出xlxs和下载文件（会导致'未阅'消失）\n请选择功能序号："))
    print("请稍等。。。")
    if function == 1:
        # 将目录内容写入xlxs
        write_xlxs(json_data, 'off')
        # only_file_path(json_data)
    elif function == 2 :
        write_xlxs(json_data, 'on')
        # only_file_path(json_data)
    elif function == 3 :
        write_xlxs(json_data)
        # 下载文件
        get_MtFile(json_data)
    else:
        print("请输入 1 或 2 或 3")
    input("操作完毕，按任意键退出")