import os
import shutil
from collections import defaultdict
from datetime import datetime
from urllib.parse import quote
import pandas as pd

import requests
from flask import request, jsonify, send_from_directory, url_for, send_file
import logging
from flask_restx import Namespace, Resource, fields
from flask_jwt_extended import jwt_required
from sqlalchemy import and_, or_
from werkzeug.security import safe_join
from werkzeug.utils import secure_filename

import app1
from app1.models import Zbfwb_kms_alarm

# 导入 tb_alarm 模型
from app1 import db
from app1 import api

from app1.customdate import CustomDate

# 创建 Flask-RESTx 命名空间
ns_zos = Namespace('ns_zos', description='主机')

INPUTPATH = './app1/zos/input'
OUTPUT = './app1/zos/output'
LIST1 = '/p1tou1.dlm.jcl'
LIST2 = '/p1tou1.fenqu.proc'
BASEPATH = './app1/zos'

# DBNAME	TSNAME	DBID	PSID	OBID	TBNAME	PARTS	DSSIZE	PGSIZE
# col A  col L
# 使用 Flask-RESTx
Excel_p1_model = ns_zos.model(
    'P1-PLEXF3',
    {
        "DBNAME": fields.String(description='DBNAME'),
        "TSNAME": fields.String(description='TSNAME'),
        "DBID": fields.String(description='DBID'),
        "PSID": fields.String(description='PSID'),
        "OBID": fields.String(description='OBID'),
        "TBNAME": fields.String(description='TBNAME'),
        "PARTS": fields.String(description='PARTS'),
        "DSSIZE": fields.String(description='DSSIZE'),
        "PGSIZE": fields.String(description='PGSIZE'),
        "DBNAME1": fields.String(description='DBNAME'),
        "TSNAME1": fields.String(description='TSNAME'),
        "DBID1": fields.String(description='DBID'),
        "PSID1": fields.String(description='PSID'),
        "OBID1": fields.String(description='OBID'),
        "TBNAME1": fields.String(description='TBNAME'),
        "PARTS1": fields.String(description='PARTS'),
        "DSSIZE1": fields.String(description='DSSIZE'),
        "PGSIZE1": fields.String(description='PGSIZE'),
    }
)

Excel_config_hour_model = ns_zos.model(
    'excel_config',
    {
        'file_name': fields.String(description='指定文件', required=True, default="hour.xlsx"),
        'sheet_name': fields.String(description='sheet名字', required=True, default="Sheet1"),
        'file_dir': fields.String(description='文件列表目录', required=True, default="p1tou1.dlm.jcl"),
    }
)

Excel_config_model = ns_zos.model(
    'excel_config_hour',
    {
        'file_name': fields.String(description='指定文件', required=True, default="国内核心各环境最新OBID-2024年2412批次投产后20241223"
                                                                                  ".xlsx"),
        'sheet_name': fields.String(description='sheet名字', required=True, default="BSBASE(P1.F3)")
    }
)

response_excel_config_model = ns_zos.model('Response_excel_config_fill', {
    'message': fields.Nested(ns_zos.model('Message_excel_config_fill', {
        "result": fields.Integer(description='查询结果返回值'),
        "result_text": fields.String(description='查询结果信息'),
        'count': fields.Integer(description='查询结果返回数据条数'),
        'data': fields.List(fields.Nested(Excel_p1_model))
    }))
})

Excel_hour_model = ns_zos.model(
    'excel_hour',
    {
        "datasetcount": fields.Integer(description='数据集条数'),
    }
)

response_excel_hour_model = ns_zos.model('Response_excel_config_fill', {
    'message': fields.Nested(ns_zos.model('Message_excel_config_fill', {
        "result": fields.Integer(description='查询结果返回值'),
        "result_text": fields.String(description='查询结果信息'),
        'count': fields.Integer(description='查询结果返回数据条数'),
        'data': fields.List(fields.Nested(Excel_hour_model))
    }))
})


# p1转f3
@ns_zos.route('/p1tof3')
class AlarmP1tof3Resource(Resource):
    @ns_zos.expect(Excel_config_model)
    @ns_zos.marshal_with(response_excel_config_model)
    def post(self):
        """# p1转f3"""
        try:
            # 获取请求的文件名和 sheet 名称
            file_name = api.payload.get('file_name')
            sheet_name = api.payload.get('sheet_name')

            # 服务器当前目录 + ./app1/zos
            dir_name = BASEPATH
            base_dir = os.path.abspath(dir_name)

            # 检查路径是否有效
            if not os.path.exists(base_dir) or not os.path.isdir(base_dir):
                raise ValueError(f"指定目录 {dir_name} 不存在或不是有效目录")

            # 拼接文件路径
            file_path = os.path.join(base_dir, file_name)
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"文件 {file_name} 不存在于路径 {base_dir}")

            # 读取 Excel 文件指定 sheet，从第二行开始读取
            df = pd.read_excel(file_path, sheet_name=sheet_name, header=1)
            if df.empty:
                raise ValueError(f"文件 {file_name} 的 sheet {sheet_name} 是空的")
            print(df.info)

            # 打印实际读取的列名（用于调试）
            print("实际列名:", df.columns)

            # 明确读取的列索引
            columns_a_to_i = df.iloc[:, 0:9]  # A到I列
            columns_l_to_t = df.iloc[:, 11:20]  # L到T列

            # 重命名列，映射到预期字段名
            columns_a_to_i.columns = ["DBNAME", "TSNAME", "DBID", "PSID", "OBID", "TBNAME", "PARTS", "DSSIZE", "PGSIZE"]
            columns_l_to_t.columns = ["DBNAME1", "TSNAME1", "DBID1", "PSID1", "OBID1", "TBNAME1", "PARTS1", "DSSIZE1",
                                      "PGSIZE1"]

            # 合并两部分
            result_df = pd.concat([columns_a_to_i, columns_l_to_t], axis=1)

            # 转换 DataFrame 为字典列表
            records = result_df.to_dict(orient='records')

            # 遍历  INPUTPATH = './app1/zos/input'
            # OUTPUT = './app1/zos/output'
            # LIST1 = '/p1tou1.dlm.jcl'
            # LIST2 = '/p1tou1.fenqu.proc'

            # 检查 连续两行
            # “//SYSUT2    DD”  DSN=PRDCBSN3.DSNDBD.BSBASE.TSFXRM.I0001.A001,DISP=SHR 匹配一整行
            # “//SYSXLAT  DD”
            # 匹配 records 中的数据
            # 2列 9列 15列
            # “DBID   262	263“
            # “PSID   2024  3820
            # ”OBID   2115  3821”

            # 调用函数
            process_files_with_excel(records)

            # 处理proc
            process_directory_list2(records)

            # 准备响应数据
            response_data = {
                "message": {
                    "result": 1,  # 成功
                    "result_text": "文件处理成功",
                    "count": len(records),
                    "data": records
                }
            }
            return response_data, 200

        except Exception as e:
            # 异常处理
            response_data = {
                "message": {
                    "result": 0,  # 失败
                    "result_text": f"文件处理失败: {str(e)}",
                    "count": 0,
                    "data": []
                }
            }
            return response_data, 500


def process_files_with_excel(records):
    """
    Process files in input directory based on Excel records and write to output directory
    """
    # Create input and output directory paths
    input_dir = os.path.abspath(INPUTPATH)
    output_dir = os.path.abspath(OUTPUT)

    # Get the directory paths for processing
    input_path = os.path.join(input_dir, LIST1.strip('/'))
    output_path = os.path.join(output_dir, LIST1.strip('/'))

    # Create output directory if it doesn't exist
    os.makedirs(output_path, exist_ok=True)

    # Get list of all files in input directory
    for filename in os.listdir(input_path):
        input_file = os.path.join(input_path, filename)
        output_file = os.path.join(output_path, filename)

        # Process each file
        process_single_file(input_file, output_file, records)


# def process_single_file(input_file, output_file, records):
#     """
#     Process a single file according to the specified rules
#
#     Parameters:
#     - input_file: path to source file
#     - output_file: path where modified file should be written
#     - records: list of dictionaries containing Excel data
#     """
#     with open(input_file, 'r') as f:
#         lines = f.readlines()
#
#     modified_lines = lines.copy()
#     i = 0
#
#     while i < len(lines):
#         line = lines[i].rstrip()
#         # print("debug1")
#         # print(len(line))
#
#         # More precise check for SYSUT2 DD line:
#         # 1. Starts with //SYSUT2 (exactly)
#         # 2. Has multiple spaces (at least 4) after SYSUT2
#         # 3. Followed by DD
#         if line.startswith("//SYSUT2") and "   DD" in line:
#             # Extract the full SYSUT2 line for comparison
#             sysut2_line = line.strip()
#
#             # For debugging (optional)
#             print(f"Found SYSUT2 line: {sysut2_line}")
#
#             # For each record in Excel data
#             for record in records:
#                 # Check if TSNAME exists in the current line
#                 tsname = record["TSNAME"].strip()
#                 if tsname in sysut2_line:
#                     print(f"Matched TSNAME: {tsname}")
#
#                     # Process the next lines if we have enough lines remaining
#                     if i + 5 < len(lines):
#                         # For debugging (optional)
#                         print("Original lines to modify:")
#                         print(f"Line 3 (DBID): {lines[i + 2].rstrip()}")
#                         print(f"Line 4 (PSID): {lines[i + 3].rstrip()}")
#                         print(f"Line 5 (OBID): {lines[i + 4].rstrip()}")
#
#                         # Replace values in lines 3, 4, and 5 after SYSUT2 DD
#                         # Line 3: DBID
#                         modified_lines[i + 2] = replace_line_value(
#                             lines[i + 2], record["DBID1"])
#
#                         # Line 4: PSID
#                         modified_lines[i + 3] = replace_line_value(
#                             lines[i + 3], record["PSID1"])
#
#                         # Line 5: OBID
#                         modified_lines[i + 4] = replace_line_value(
#                             lines[i + 4], record["OBID1"])
#
#                         # For debugging (optional)
#                         print("Modified lines:")
#                         print(f"Line 3 (DBID): {modified_lines[i + 2].rstrip()}")
#                         print(f"Line 4 (PSID): {modified_lines[i + 3].rstrip()}")
#                         print(f"Line 5 (OBID): {modified_lines[i + 4].rstrip()}")
#         i += 1
#
#     # Write modified content to output file
#     with open(output_file, 'w') as f:
#         f.writelines(modified_lines)

def process_single_file(input_file, output_file, records):
    """
    处理单个文件

    参数:
    - input_file: 源文件路径
    - output_file: 输出文件路径
    - records: 包含Excel数据的字典列表
    """
    with open(input_file, 'r') as f:
        lines = f.readlines()

    modified_lines = lines.copy()
    i = 0

    while i < len(lines):
        line = lines[i].rstrip()

        if line.startswith("//SYSUT2") and "   DD" in line:
            sysut2_line = line.strip()
            print(f"找到SYSUT2行: {sysut2_line}")

            for record in records:
                tsname = record["TSNAME"].strip()
                if tsname in sysut2_line:
                    print(f"匹配到TSNAME: {tsname}")

                    if i + 5 < len(lines):
                        print("要修改的原始行:")
                        print(f"第3行 (DBID): {lines[i + 2].rstrip()}")
                        print(f"第4行 (PSID): {lines[i + 3].rstrip()}")
                        print(f"第5行 (OBID): {lines[i + 4].rstrip()}")

                        modified_lines[i + 2] = replace_line_value(
                            lines[i + 2], record["DBID1"])
                        modified_lines[i + 3] = replace_line_value(
                            lines[i + 3], record["PSID1"])
                        modified_lines[i + 4] = replace_line_value(
                            lines[i + 4], record["OBID1"])

                        print("修改后的行:")
                        print(f"第3行 (DBID): {modified_lines[i + 2].rstrip()}")
                        print(f"第4行 (PSID): {modified_lines[i + 3].rstrip()}")
                        print(f"第5行 (OBID): {modified_lines[i + 4].rstrip()}")
        i += 1

    # 确保每行都以换行符结束
    final_lines = [line if line.endswith('\n') else line + '\n' for line in modified_lines]

    # 写入修改后的内容到输出文件
    with open(output_file, 'w') as f:
        f.writelines(final_lines)


def replace_line_value(line, new_value):
    """
    Split the line by whitespace and replace the last value with new_value
    """
    # Split line into parts by whitespace
    parts = [part for part in line.rstrip().split() if part]

    if len(parts) >= 3:
        # Replace the last part with the new value
        parts[-1] = str(new_value)
        # Reconstruct the line maintaining original spacing
        original_spacing = line[:line.find(parts[0])]
        return original_spacing + '    '.join(parts) + '\n'

    return line + '\n'


# =================================================================

def process_directory_list2(records):
    """
    处理第二个目录下的文件 - fenqu.proc
    """
    # 创建输入和输出目录路径
    input_dir = os.path.abspath(INPUTPATH)
    output_dir = os.path.abspath(OUTPUT)

    # 获取处理目录路径
    input_path = os.path.join(input_dir, LIST2.strip('/'))
    output_path = os.path.join(output_dir, LIST2.strip('/'))

    # 创建输出目录（如果不存在）
    os.makedirs(output_path, exist_ok=True)

    # 遍历输入目录中的所有文件
    for filename in os.listdir(input_path):
        input_file = os.path.join(input_path, filename)
        output_file = os.path.join(output_path, filename)

        # 处理单个文件
        process_single_file_list2(input_file, output_file, records)


# def process_single_file_list2(input_file, output_file, records):
#     """
#     处理单个文件 - 三行三列格式
#     """
#     # 读取文件内容
#     with open(input_file, 'r') as f:
#         lines = f.readlines()
#
#     # 确保文件有三行
#     if len(lines) != 3:
#         print(f"警告: 文件 {input_file} 不是三行格式")
#         return
#
#     # 创建新的行列表
#     modified_lines = []
#
#     # 定义列头映射关系
#     column_mappings = [
#         ("DBID", "DBID1"),
#         ("PSID", "PSID1"),
#         ("OBID", "OBID1")
#     ]
#
#     # 处理每一行
#     for i, (line, (old_col, new_col)) in enumerate(zip(lines, column_mappings)):
#         # 分割行内容（假设是空格分隔）
#         parts = [part.strip() for part in line.split() if part.strip()]
#
#         # 确保行有三列
#         if len(parts) != 3:
#             print(input_file)
#             print(f"警告: 行 {i + 1} 不是三列格式: {line}")
#             modified_lines.append(line)
#             continue
#
#         # 获取第二列的值
#         second_col_value = parts[1]
#
#         # 在records中查找匹配项
#         updated_third_col = None
#         for record in records:
#             if str(record[old_col]) == str(second_col_value):
#                 updated_third_col = record[new_col]
#                 break
#
#         # 如果找到匹配项，更新第三列
#         if updated_third_col is not None:
#             # 保持原始的第一列和第二列，只更新第三列
#             new_line = f"{parts[0]}    {parts[1]}    {updated_third_col}\n"
#             modified_lines.append(new_line)
#         else:
#             # 如果没有找到匹配项，保持原行不变
#             modified_lines.append(line)
#
#     # 写入修改后的内容到输出文件
#     with open(output_file, 'w') as f:
#         f.writelines(modified_lines)

def process_single_file_list2(input_file, output_file, records):
    """
    处理单个文件 - 三行三列格式
    """
    with open(input_file, 'r') as f:
        lines = f.readlines()

    if len(lines) != 3:
        print(f"警告: 文件 {input_file} 不是三行格式")
        return

    modified_lines = []

    column_mappings = [
        ("DBID", "DBID1"),
        ("PSID", "PSID1"),
        ("OBID", "OBID1")
    ]

    for i, (line, (old_col, new_col)) in enumerate(zip(lines, column_mappings)):
        parts = [part.strip() for part in line.split() if part.strip()]

        if len(parts) != 3:
            print(input_file)
            print(f"警告: 行 {i + 1} 不是三列格式: {line}")
            # 确保行末有换行符
            modified_lines.append(line if line.endswith('\n') else line + '\n')
            continue

        second_col_value = parts[1]

        updated_third_col = None
        for record in records:
            if str(record[old_col]) == str(second_col_value):
                updated_third_col = record[new_col]
                break

        if updated_third_col is not None:
            # 创建新行，确保有适当的间距和换行符
            new_line = f"{parts[0]}    {parts[1]}    {updated_third_col}\n"
            modified_lines.append(new_line)
        else:
            # 确保原始行有换行符
            modified_lines.append(line if line.endswith('\n') else line + '\n')

    # 写入修改后的内容到输出文件
    with open(output_file, 'w') as f:
        f.writelines(modified_lines)


# p1转f3修正hour
@ns_zos.route('/p1tof3hour')
class AlarmP1tof3hourResource(Resource):
    @ns_zos.expect(Excel_config_hour_model)
    @ns_zos.marshal_with(response_excel_hour_model)
    def post(self):
        """# p1转f3修正hour"""
        try:
            # 获取请求的文件名和 sheet 名称
            file_name = api.payload.get('file_name')
            sheet_name = api.payload.get('sheet_name')
            file_dir = api.payload.get('file_dir')

            # 服务器当前目录 + ./app1/zos
            dir_name = BASEPATH
            base_dir = os.path.abspath(dir_name)

            # 检查路径是否有效
            if not os.path.exists(base_dir) or not os.path.isdir(base_dir):
                raise ValueError(f"指定目录 {dir_name} 不存在或不是有效目录")

            # 拼接文件路径
            file_path = os.path.join(base_dir, file_name)
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"文件 {file_name} 不存在于路径 {base_dir}")

            # 读取 Excel 文件指定 sheet，从第一行开始读取
            df = pd.read_excel(file_path, sheet_name=sheet_name, header=None)
            if df.empty:
                raise ValueError(f"文件 {file_name} 的 sheet {sheet_name} 是空的")

            # 获取第一列数据
            records = df.iloc[:, 0].tolist()
            row_count = len(records)

            # 处理文件
            process_files_hour_excel(records, file_dir)

            # 构建返回数据
            response_data = {
                "message": {
                    "result": 1,  # 成功
                    "result_text": "文件处理成功",
                    "count": 1,
                    "data": [
                        {
                            "datasetcount": row_count
                        }
                    ]
                }
            }
            return response_data, 200

        except Exception as e:
            # 异常处理
            response_data = {
                "message": {
                    "result": 0,  # 失败
                    "result_text": f"文件处理失败: {str(e)}",
                    "count": 0,
                    "data": []
                }
            }
            return response_data, 500


def process_files_hour_excel(records, file_dir):
    """处理输入目录中的文件并写入输出目录"""
    input_dir = os.path.abspath(INPUTPATH)
    output_dir = os.path.abspath(OUTPUT)

    input_path = os.path.join(input_dir, file_dir)
    output_path = os.path.join(output_dir, file_dir)

    # 创建输出目录
    os.makedirs(output_path, exist_ok=True)

    # 处理输入目录中的每个文件
    for filename in os.listdir(input_path):
        input_file = os.path.join(input_path, filename)
        output_file = os.path.join(output_path, filename)
        process_hour_file(input_file, output_file, records)


def process_hour_file(input_file, output_file, records):
    """处理单个文件的内容"""
    processed_records = {}
    for record in records:
        record = str(record).strip()
        if '.' in record:
            last_dot_index = record.rindex('.')
            key = record[:last_dot_index]
            value = record[last_dot_index + 1:]
            processed_records[key] = value

    with open(input_file, 'r') as f:
        lines = f.readlines()

    modified_lines = []
    for line in lines:
        line = line.rstrip('\n')
        if '.' in line and '=' in line:
            last_dot_index = line.rindex('.')
            equal_index = line.index('=')
            if equal_index < last_dot_index:
                prefix = line[equal_index + 1:last_dot_index]
                if prefix in processed_records:
                    # print("debug1")
                    # print(prefix)
                    new_line = line[:last_dot_index] + '.' + processed_records[prefix]
                    # print(new_line)
                    modified_lines.append(new_line + '\n')
                else:
                    modified_lines.append(line + '\n')
            else:
                modified_lines.append(line + '\n')
        else:
            modified_lines.append(line + '\n')

    with open(output_file, 'w') as f:
        f.writelines(modified_lines)
