import time
import pandas as pd

import xlrd
import os
import os.path
from itertools import groupby
import Logger
import openpyxl
from operator import itemgetter


# 读取xls文件下所有需要处理的.xls文件的名称
def get_all_files_name(target_dir):
    # os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表
    list_files = os.listdir(target_dir)
    return list_files


# 对表中的数据进行规则化
def ten_state_template(excel_file):
    # 打开Excel文件
    data = xlrd.open_workbook(excel_file)
    # 找到文件中叫[Sheet1]的表
    table = data.sheet_by_name(sheet_name='Sheet1')

    # 单元格第二列到最后一列的数据，保存为列表
    raw_data = table._cell_values[1:]
    # print(raw_data)

    test_data = [['FFFFKK0014283414YQ', '2022-12-16', 'LA', '369-84446541', 'Out for Delivery'],
                 ['FFFFKK0014283414YQ', '2022-12-16', 'TX', '369-84446541', 'In Transit, Arriving Late']]

    # 对原始列表进行去重
    # 服务单号会有重复，这里需要做去重处理。比如一个学生只有一个学号，但随着状态的改变，学号出现了多次
    # 举例：
    # 007：张三，一年级学生, 男；007：张三，五年级学生，男
    # 我们只要他“男”的数据，这里就需要去重，如果007==007,那么“男”只记录一次
    unique_data = list(filter(lambda x, t=[]: (not x[0] in t) and (not t.append(x[0])), raw_data))

    # print(unique_data)

    # 获取表头
    # ['服务商单号', '到货时间', '收件人州', '提单号', '最后一条轨迹']
    header = table.row_values(0)

    # 构建字典,将表头与数据进行匹配。
    # 学号：007————姓名：张三————年级：一年级；————性别：男
    dict_list = []
    for row_value in unique_data:
        data_dict = zip(header, row_value)
        # print(dict(data_dict))
        dict_list.append(dict(data_dict))

    # for i in dict_list:
    #     print(i)

    # 取表头第二列
    arrived_time = header[1]

    # 根据日期排序
    trail_sort = sorted(dict_list, key=lambda x: x[arrived_time])
    # 根据日期分组
    trail_group = groupby(trail_sort, key=lambda x: x[arrived_time])
    # print(trail_group)

    # for i in trail_sort:
    #     print(i)

    # for i in trail_group:
    #     print(list(i))

    for key, group in trail_group:
        print(f"{key}:")
        count_number(key, list(group))
        print()


# 对表中的数据进行规则化
def ten_state_template_xlsx(excel_file):
    # 打开Excel文件
    wb = openpyxl.load_workbook(excel_file)
    # 找到文件中叫[Sheet1]的表
    sheet = wb['Sheet1']

    # 单元格第二列到最后一列的数据，保存为列表
    raw_data = [[cell.value for cell in row[0:]] for row in sheet.iter_rows(min_row=2)]
    # print(raw_data)
    # 对原始列表进行去重
    unique_data = list(filter(lambda x, t=[]: (not x[0] in t) and (not t.append(x[0])), raw_data))

    # 获取表头
    header = [cell.value for cell in sheet[1]]

    # # 构建字典,将表头与数据进行匹配。
    # dict_list = []
    # for row_value in unique_data:
    #     data_dict = dict(zip(header, row_value))
    #     # print(data_dict)
    #     dict_list.append(data_dict)
    #
    dict_list = []
    for row_value in unique_data:
        data_dict = zip(header, row_value)
        # print(dict(data_dict))
        dict_list.append(dict(data_dict))

    # 取表头第二列
    arrived_time = header[1]

    # 根据日期排序
    trail_sort = sorted(dict_list, key=lambda x: x[arrived_time])
    # 根据日期分组
    trail_group = groupby(trail_sort, key=lambda x: x[arrived_time])

    for key, group in trail_group:
        print(f"{key}:")
        count_number(key, list(group))
        print()


def ten_state_template_csv(csv_file):
    # Read the CSV file
    df = pd.read_csv(csv_file)
    # Get raw data
    raw_data = df.values.tolist()
    # Remove duplicates from raw_data
    unique_data = list(filter(lambda x, t=[]: (not x[0] in t) and (not t.append(x[0])), raw_data))
    # Get headers
    header = list(df.columns)
    dict_list = []
    for row_value in unique_data:
        data_dict = dict(zip(header, row_value))
        dict_list.append(data_dict)
    # Get the second column name
    arrived_time = header[1]
    # Sort by date
    trail_sort = sorted(dict_list, key=lambda x: x[arrived_time])
    # Group by date
    trail_group = groupby(trail_sort, key=lambda x: x[arrived_time])

    for key, group in trail_group:
        print(f"{key}:")
        count_number(key, list(group))
        print()


def ten_state_template_csv_improved(csv_file):
    # Read the CSV file
    df = pd.read_csv(csv_file)

    # Remove duplicates
    df = df.drop_duplicates(subset=df.columns[0])

    # Sort dataframe by the second column (date)
    arrived_time = df.columns[1]
    sorted_df = df.sort_values(arrived_time)

    # Create list of dictionaries
    dict_list = sorted_df.to_dict(orient='records')

    # Group by date
    trail_group = groupby(dict_list, key=itemgetter(arrived_time))

    for key, group in trail_group:
        print(f"{key}:")
        count_number(key, list(group))
        print()


# 统计数量
def count_number(date, table_col_values):
    # Initializing variables
    warehousing_number = 0
    ex_warehouse_number = 0
    departure_number = 0
    arrival_number = 0
    customs_clearance_number = 0
    tail_number = 0
    successful_delivery_number = 0
    abnormal_delivery_number = 0
    undefined_count = 0

    print(f"{date}的数据有：")
    for i in table_col_values:
        item = i['最后一条轨迹']
        if "快件到达作业中心" in item:
            warehousing_number += 1
        elif any(substring in item for substring in ["快件离开作业中心", "货物交给航空公司", "快件已从发件人处提取"]):
            ex_warehouse_number += 1
        elif "货物转运-从机场发往目的国" in item:
            departure_number += 1
        elif "快件到达目的地国家" in item:
            arrival_number += 1
        elif any(substring in item for substring in ["海关已经放行(进口)", "已到达中转中心"]):
            customs_clearance_number += 1
        elif "Delivered" in item or "Held" in item:
            successful_delivery_number += 1
        elif "Return to Sender" in item or "Addressee Unknown" in item:
            abnormal_delivery_number += 1
        elif "Pre" in item:
            undefined_count += 1
        elif "最后一条轨迹" in item:
            print("我是标题:最后一条轨迹，别统计我")
        else:
            tail_number += 1

    print_format(warehousing_number, ex_warehouse_number, departure_number, arrival_number, customs_clearance_number,
                 tail_number, successful_delivery_number, abnormal_delivery_number, undefined_count)


# 封装输出函数
def print_format(warehousing_number, ex_warehouse_number, departure_number, arrival_number, customs_clearance_number,
                 tail_number, successful_delivery_number, abnormal_delivery_number, undefined_count):
    # 用内置的求和函数计算出总量,需要传列表
    total = sum([warehousing_number, ex_warehouse_number, departure_number, arrival_number, customs_clearance_number,
                 tail_number, successful_delivery_number, abnormal_delivery_number, undefined_count])

    # 这里的字符串也可以用全局变量来表示
    print(f"入库-出库的数量是：{warehousing_number}")
    print(f"出库-航班起飞的数量是：{ex_warehouse_number}")
    print(f"航班起飞-航班落地的数量是：{departure_number}")
    print(f"航班落地-清关的数量是：{arrival_number}")
    print(f"清关-尾程交接的数量是：{customs_clearance_number}")
    print(f"尾程交接-派送的数量是：{tail_number}")
    print(f"派送成功的数量是：{successful_delivery_number}")
    print(f"派送异常的数量是：{abnormal_delivery_number}")
    print(f"待核查的数量是：{undefined_count}")
    print(f"包裹总量:{total}")
    print("________________________________________")


def both_file(file):
    _, file_extension = os.path.splitext(file)

    if file_extension == '.xls':
        ten_state_template(file)
    elif file_extension == '.xlsx':
        ten_state_template_xlsx(file)
    elif file_extension == ".csv":
        ten_state_template_csv_improved(file)
    else:
        print(f"Unsupported file format: {file_extension}")


if __name__ == '__main__':

    # Read file names in both xls and xlsx directories
    # xls_file_names = get_all_files_name(r"xls")
    xlsx_file_names = get_all_files_name(r"xlsx")
    csv_file_names = get_all_files_name(r'csv')

    # Combine file paths with their respective directories all_files = ["xls/" + file for file in xls_file_names] + [
    # "xlsx/" + file for file in xlsx_file_names] + ['csv/'+file for file in csv_file_names]

    all_files = ['csv/' + file for file in csv_file_names] + ["xlsx/" + file for file in xlsx_file_names]

    # Process all files
    for file in all_files:
        t = time.strftime("-%Y%m%d-%H%M%S", time.localtime())
        Logger.sys.stdout = Logger.Logger('log' + t + '.txt')

        both_file(file)
        print("————执行结束———————")
