# views.py
import time
from venv import logger
from datetime import datetime
import numpy as np
import pandas as pd
from django.shortcuts import render, redirect
import os
from django.conf import settings
from io import BytesIO
import matplotlib.pyplot as plt
import base64
from django.http import JsonResponse
from django.http import HttpResponse
import re
import json
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from datetime import datetime, timedelta
from django.core.cache import cache
# 首页
def home_page(request):
    return render(request, 'home.html')

def process_files(wms_path, erp_path):
    """处理文件对比逻辑"""
    try:
        # ================= WMS 处理 =================
        wms_raw = pd.read_excel(wms_path, header=None, skiprows=2)

        # 查找「合计」行结束位置
        total_row_index = None
        for idx, row in wms_raw.iterrows():
            if '合计' in str(row[0]):  # 假设A列是第0列
                total_row_index = idx
                break
        # 截取有效数据范围（A3到合计行前）
        if total_row_index is not None:
            wms_data = wms_raw.iloc[:total_row_index]
        else:
            wms_data = wms_raw
        # 强制提取 H 列（索引7）并命名
        wms_df = wms_data.iloc[:, [3, 13]]  # 第4列是物品名称，第12列是应余量
        wms_df.columns = ['物品名称', '可用量']
        # 清洗数据
        wms_df = wms_df.dropna(subset=['物品名称'])  # 去除空物品名称行
        wms_df['可用量'] = (
            wms_df['可用量']
                .astype(str)
                .str.replace(r'[^\d.]', '', regex=True)
                .pipe(pd.to_numeric, errors='coerce')
                .fillna(0)
        )


        # 按物品名称汇总
        wms_group = wms_df.groupby('物品名称')['可用量'].sum().reset_index()
        wms_group.columns = ['物品名称', 'WMS数量']

        # ================= ERP 处理 =================
        erp_raw = pd.read_excel(erp_path, header=None, skiprows=7)
        # 查找「总计」行结束位置
        total_row_index = None
        for idx, row in erp_raw.iterrows():
            if '总计' in str(row[1]):  # 检查B列（索引1）
                total_row_index = idx
                break
        # 截取有效数据范围（B8到总计行前）
        if total_row_index is not None:
                erp_data = erp_raw.iloc[:total_row_index]
        else:
                erp_data = erp_raw
        # 提取指定列（B列=索引1，X列=索引23）
        erp_df = erp_data.iloc[:, [1, 23]]  # 第2列是物品名称，第24列是期末结存
        erp_df.columns = ['物品名称', '期末结存']
        # 清洗数据
        erp_df = erp_df.dropna(subset=['物品名称'])  # 去除空物品名称行
        erp_df['期末结存'] = (
            erp_df['期末结存']
                .astype(str)
                .str.replace(r'[^\d.]', '', regex=True)
                .pipe(pd.to_numeric, errors='coerce')
                .fillna(0)
        )


        # ================= 合并对比 =================
        merged_df = pd.merge(
            wms_group,
            erp_df,
            on='物品名称',
            how='outer'
        ).fillna(0)

        # 计算差异
        merged_df['差异量'] = merged_df['WMS数量'] - merged_df['期末结存']
        merged_df['对比结果'] = np.where(
            merged_df['差异量'] == 0,
            '一致',
            '不一致'
        )

        # 整理最终列
        final_df = merged_df[[
            '物品名称',
            'WMS数量',
            '期末结存',
            '差异量',
            '对比结果'
        ]].rename(columns={'期末结存': 'ERP数量'})

        return final_df

    except Exception as e:
        raise RuntimeError(f"文件处理失败: {str(e)}")


def upload_file(request):
    if request.method == 'POST':
        # 初始化变量
        wms_path, erp_path = None, None

        try:
            # 验证文件存在
            if 'wms_file' not in request.FILES or 'erp_file' not in request.FILES:
                return JsonResponse({
                    'status': 'error',
                    'message': '请上传WMS和ERP两个文件'
                }, status=400)

            wms_file = request.FILES['wms_file']
            erp_file = request.FILES['erp_file']

            # 验证文件类型和大小
            if not (wms_file.name.endswith('.xlsx') and erp_file.name.endswith('.xlsx')):
                return JsonResponse({
                    'status': 'error',
                    'message': '只支持.xlsx格式文件'
                }, status=400)

            if wms_file.size > 10 * 1024 * 1024 or erp_file.size > 10 * 1024 * 1024:  # 10MB限制
                return JsonResponse({
                    'status': 'error',
                    'message': '单个文件大小不能超过10MB'
                }, status=400)

            # 安全处理文件名
            safe_filename = lambda f: os.path.basename(f.name).replace(' ', '_')
            wms_filename = f"wms_{int(time.time())}_{safe_filename(wms_file)}"
            erp_filename = f"erp_{int(time.time())}_{safe_filename(erp_file)}"

            # 保存临时文件
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'temp')
            os.makedirs(temp_dir, exist_ok=True)
            wms_path = os.path.join(temp_dir, wms_filename)
            erp_path = os.path.join(temp_dir, erp_filename)

            with open(wms_path, 'wb+') as f:
                for chunk in wms_file.chunks():
                    f.write(chunk)
            with open(erp_path, 'wb+') as f:
                for chunk in erp_file.chunks():
                    f.write(chunk)

            # 处理数据
            result_df = process_files(wms_path, erp_path)
            result_df['差异量绝对值'] = result_df['差异量'].abs()

            # 生成Excel数据
            output = BytesIO()
            try:
                with pd.ExcelWriter(output, engine='openpyxl') as writer:
                    result_df.to_excel(writer, index=False)
                excel_data = output.getvalue()
                excel_b64 = base64.b64encode(excel_data).decode('utf-8')
            finally:
                output.close()

            return JsonResponse({
                'status': 'success',
                'raw_data': result_df.to_json(orient='records', force_ascii=False),
                'excel': {
                    'data': excel_b64,
                    'filename': '库存对比报告.xlsx'
                }
            })

        except Exception as e:
            logger.error(f"文件处理失败: {str(e)}", exc_info=True)
            return JsonResponse({
                'status': 'error',
                'message': f'处理失败: {str(e)}'
            }, status=500)

        finally:
            # 确保清理临时文件
            for file_path in [wms_path, erp_path]:
                if file_path and os.path.exists(file_path):
                    try:
                        os.remove(file_path)
                    except Exception as e:
                        logger.error(f"删除临时文件失败: {file_path}, 错误: {str(e)}")

    return render(request, 'upload.html')


def generate_charts(result_df):
    """生成图表数据（优化版）"""
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False

    charts = {}

    try:
        # 对比柱状图
        plt.figure(figsize=(12, 7))
        top10 = result_df.nlargest(10, '差异量绝对值')
        ax = top10.plot(x='物品名称', y=['WMS数量', 'ERP数量'], kind='bar')
        plt.title('库存数量对比（TOP10差异商品）', fontsize=14)
        plt.xticks(rotation=35, ha='right')
        plt.tight_layout()

        buf = BytesIO()
        plt.savefig(buf, format='png', dpi=120)
        charts['comparison_chart'] = base64.b64encode(buf.getvalue()).decode()
        plt.close()
    except Exception as e:
        charts['comparison_chart'] = None

    try:
        # 饼图
        plt.figure(figsize=(8, 8))
        status_counts = result_df['对比结果'].value_counts()
        status_counts.plot.pie(
            autopct='%1.1f%%',
            startangle=90,
            wedgeprops={'edgecolor': 'white', 'linewidth': 1}
        )
        plt.title('数据一致性分布', fontsize=14)
        plt.ylabel('')

        buf = BytesIO()
        plt.savefig(buf, format='png', dpi=120)
        charts['pie_chart'] = base64.b64encode(buf.getvalue()).decode()
        plt.close()
    except Exception as e:
        charts['pie_chart'] = None

    return charts

# ================= 处理 移动日志Excel 的核心逻辑 =================
def process_wms_data(file):
    """处理 Excel 的核心逻辑（复用原始代码）"""
    df = pd.read_excel(file)

    GROUP_COLS = ["物品名称", "包装", "产线编码", "原货位", "目的货位",
                  "移动类型", "发运地", "托盘类型", "操作人"]
    df['物品名称'] = (
        df['物品名称']
            .astype(str)
            .str.strip()  # 去除前后空格
            .str.replace(r'\s+', ' ', regex=True)  # 合并多个空格
    )
    # 聚合处理
    result = (
        df.groupby(GROUP_COLS, as_index=False)
            .agg(
            总数量=('数量', 'sum'),
            是否有码=('原容器编码', lambda x: x.notna().any())
        )
            .assign(上架类型=lambda x: np.where(x['是否有码'], "有码上架", "无码上架"))
            .drop(columns=['是否有码'])
    )

    # 计算每托数量
    def calculate_pallet(package_str):
        match = re.search(r"\((\d+\*\d+)\)", str(package_str))
        return eval(match.group(1)) if match else np.nan

    result['每托数量'] = result['包装'].apply(calculate_pallet)

    # 格式化托数
    def format_pallets(row):
        total = row['总数量']
        per_pallet = row['每托数量']

        if pd.isna(per_pallet) or per_pallet == 0:
            return "无效包装"

        try:
            pallets, remainder = divmod(total, per_pallet)
        except:
            return "计算错误"

        parts = []
        if pallets > 0:
            parts.append(f"{int(pallets)}托")
        if remainder > 0:
            parts.append(f"{int(remainder)}箱")

        return "".join(parts) or "0托"

    result['托数'] = result.apply(format_pallets, axis=1)
    return result.drop(columns=['每托数量'])

# ================= 处理 移动日志Excel 数据展示在前端 =================
def movelog_file(request):
    if request.method == 'POST':
        try:
            # 获取上传文件
            movelog_file = request.FILES['file']

            # 处理数据
            result_df = process_wms_data(movelog_file)

            # === 新增代码：生成Excel并存储到session ===
            output = BytesIO()
            result_df.to_excel(output, index=False)
            excel_b64 = base64.b64encode(output.getvalue()).decode('utf-8')

            # 将数据存入session
            request.session['wms_download'] = {
                'data': excel_b64,
                'filename': '移动日志处理结果.xlsx'
            }

            # === 原数据存储逻辑 ===
            request.session['wms_data'] = {
                'columns': result_df.columns.tolist(),
                'data': result_df.to_dict('records')
            }
            return redirect('display')

        except Exception as e:
            return render(request, 'MoveLog.html', {'error': str(e)})

    # 处理GET请求（展示页面）
    return render(request, 'MoveLog.html')


def display_data(request):
    """数据展示与查询处理"""
    context = {}

    # 处理下载触发
    download_data = request.session.pop('wms_download', None)
    if download_data:
        context['download_trigger'] = json.dumps({
            'data': download_data['data'],
            'filename': download_data['filename']
        }, ensure_ascii=False)

    # 从session获取数据
    session_data = request.session.get('wms_data')
    if not session_data:
        return redirect('movelog')

    df = pd.DataFrame(session_data['data'], columns=session_data['columns'])

    # 获取查询参数
    operator = request.GET.get('operator', '').strip()
    item_name = request.GET.get('item_name', '').strip()
    original_location = request.GET.get('original_location', '').strip()
    destination_location = request.GET.get('destination_location', '').strip()
    shipment_type = request.GET.get('shipment_type', '').strip()

    # 应用筛选条件
    if operator:
        df = df[df['操作人'].str.strip() == operator]
    if item_name:
        clean_item = re.escape(item_name.replace('\t', ' ').strip())
        df = df[df['物品名称'].str.contains(clean_item, case=False, na=False)]
    if original_location:
        df = df[df['原货位'].str.contains(original_location, case=False, na=False)]
    if destination_location:
        df = df[df['目的货位'].str.contains(destination_location, case=False, na=False)]
    if shipment_type:  # 新增：按发货类型过滤
        df = df[df['上架类型'] == shipment_type]

    # 计算总数量和总托数
    total_quantity = df['总数量'].sum()
    # 提取托数部分的总和
    def extract_pallet(tuo_str):
        match = re.search(r'(\d+)托', str(tuo_str))
        return int(match.group(1)) if match else 0
    df['托数部分'] = df['托数'].apply(extract_pallet)
    total_pallets = df['托数部分'].sum()

    # 分页处理
    per_page = int(request.GET.get('per_page', 10))
    page = int(request.GET.get('page', 1))
    total = len(df)
    total_pages = (total + per_page - 1) // per_page if total else 1
    page = max(1, min(page, total_pages))
    start = (page - 1) * per_page
    end = start + per_page
    paginated_df = df.iloc[start:end]

    # 生成HTML表格
    html_table = paginated_df.to_html(
        classes='table table-hover table-bordered',
        index=False,
        na_rep='',
        border=0
    )

    context.update({
        'html_table': html_table,
        'operator': operator,
        'item_name': item_name,
        'original_location': original_location,
        'destination_location': destination_location,
        'shipment_type': shipment_type,
        'page': page,
        'per_page': per_page,
        'total_pages': total_pages,
        'total_quantity': total_quantity,
        'total_pallets': total_pallets,
    })
    return render(request, 'display.html', context)



def download_data(request):
    session_data = request.session.get('wms_data')
    if not session_data:
        return HttpResponse("请先上传文件")

    df = pd.DataFrame(session_data['data'], columns=session_data['columns'])

    # 应用相同的查询条件
    operator = request.GET.get('operator', '')
    item_name = request.GET.get('item_name', '')
    original_location = request.GET.get('original_location', '')
    destination_location = request.GET.get('destination_location', '')

    if operator:
        df = df[df['操作人'] == operator]
    if item_name:
        df = df[df['物品名称'].str.contains(item_name, case=False)]
    if original_location:
        df = df[df['原货位'].str.contains(original_location, case=False)]
    if destination_location:
        df = df[df['目的货位'].str.contains(destination_location, case=False)]

    # 生成Excel
    output = BytesIO()
    df.to_excel(output, index=False)
    output.seek(0)

    response = HttpResponse(
        output.getvalue(),
        content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
    )
    response['Content-Disposition'] = 'attachment; filename="filtered_data.xlsx"'
    return response


def process_shipping_data(file_path):
    """处理运单数据"""
    try:
        df = pd.read_excel(file_path)

        # 筛选有效数据
        filtered = df[df['单据状态'] == '默认']

        # 按车牌号分组并整理数据
        grouped = filtered.groupby('车牌号').apply(
            lambda x: x[['物品编号', '物品名称', '计划发运量', '发货数量', '货主代码']].to_dict('records')
        ).to_dict()

        return grouped

    except Exception as e:
        raise ValueError(f"数据处理失败: {str(e)}")


def shipping_management(request):
    if request.method == 'POST':
        try:
            # 处理上传文件
            uploaded_file = request.FILES['file']

            # 保存临时文件
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'shipping')
            os.makedirs(temp_dir, exist_ok=True)
            file_path = os.path.join(temp_dir, uploaded_file.name)

            with open(file_path, 'wb+') as f:
                for chunk in uploaded_file.chunks(): f.write(chunk)

            # 处理数据
            grouped_data = process_shipping_data(file_path)

            # 存入session
            request.session['shipping_data'] = grouped_data

            # 清理临时文件
            os.remove(file_path)

            return redirect('display_shipping')

        except Exception as e:
            return render(request, 'shipping_upload.html', {'error': str(e)})

    return render(request, 'shipping_upload.html')


def display_shipping(request):
    """展示运单数据"""
    context = {}
    shipping_data = request.session.get('shipping_data')

    if not shipping_data:
        return redirect('shipping_management')

    # 转换数据结构便于模板处理
    processed_data = []
    for plate, items in shipping_data.items():
        processed_data.append({
            'plate': plate,
            'items': items
        })

    context['grouped_data'] = processed_data
    return render(request, 'display_shipping.html', context)


# views.py 新增部分

def process_picking_data(file):
    """处理拣货日志的核心逻辑"""
    try:
        df = pd.read_excel(file)

        # 验证必要列存在
        required_columns = ['客户', '物品名称', '包装名称', '库房名称', '操作人' ]
        for col in required_columns:
            if col not in df.columns:
                raise ValueError(f"Excel文件中缺少必要列: {col}")

        df['物品名称'] = (
            df['物品名称'].astype(str).str.strip()
            .str.replace(r'\s+', ' ', regex=True)
        )
        # 聚合处理
        result = (
            df.groupby(required_columns, as_index=False)
            .agg(
                总数量=('拣货量', 'sum'),
                是否有码=('容器编码', lambda x: x.notna().any())
            )
            .assign(发货类型=lambda x: np.where(x['是否有码'], "有码发货", "无码发货"))
            .drop(columns=['是否有码'])
        )

        # 计算每托数量
        def calculate_pallet(package_str):
            match = re.search(r"\((\d+\*\d+)\)", str(package_str))
            return eval(match.group(1)) if match else np.nan

        result['每托数量'] = result['包装名称'].apply(calculate_pallet)

        # 格式化托数
        def format_pallets(row):
            total = row['总数量']
            per_pallet = row['每托数量']

            if pd.isna(per_pallet) or per_pallet == 0:
                return "无效包装"

            try:
                pallets, remainder = divmod(total, per_pallet)
            except:
                return "计算错误"

            parts = []
            if pallets > 0:
                parts.append(f"{int(pallets)}托")
            if remainder > 0:
                parts.append(f"{int(remainder)}箱")

            return "".join(parts) or "0托"

        result['托数'] = result.apply(format_pallets, axis=1)
        grouped_result = {}
        for group_name, group_df in result.groupby('库房名称'):
            grouped_result[group_name] = group_df.drop(columns=['库房名称'])

        return grouped_result

    except Exception as e:
        raise RuntimeError(f"处理拣货日志失败: {str(e)}")


def picking_log(request):
    if request.method == 'POST':
        try:
            # 获取上传文件
            picking_file = request.FILES['file']

            # 创建临时目录
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'picking')
            os.makedirs(temp_dir, exist_ok=True)

            file_path = os.path.join(temp_dir, picking_file.name)

            # 保存文件
            with open(file_path, 'wb+') as f:
                for chunk in picking_file.chunks():
                    f.write(chunk)

            # 处理数据（假设返回的是一个包含 DataFrame 的 group 数据）
            grouped_data = process_picking_data(file_path)

            # 将 DataFrame 转换为可序列化结构
            serializable_data = convert_df_and_series(grouped_data)

            # 存入 session
            request.session['pick_data'] = serializable_data

            # 删除临时文件
            os.remove(file_path)

            return redirect('display_picking')

        except Exception as e:
            return render(request, 'picking_upload.html', {'error': str(e)})

    return render(request, 'picking_upload.html')


def display_picking(request):
    context = {}
    picking_data = request.session.get('pick_data')
    if not isinstance(picking_data, dict):
        return redirect('picking_log')

    # 获取查询参数
    filters = {
        'customer': request.GET.get('customer', '').strip(),
        'item_name': request.GET.get('item_name', '').strip(),
        'operator': request.GET.get('operator', '').strip(),
        'warehouse': request.GET.get('warehouse', '').strip(),
        'shipment_type': request.GET.get('shipment_type', '').strip()
    }

    processed_data = []
    for plate, items in picking_data.items():
        filtered_items = items
        # 客户名称
        if filters['customer']:
            keyword = filters['customer'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('客户', '')).lower()]
        # 物品名称
        if filters['item_name']:
            keyword = filters['item_name'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('物品名称', '')).lower()]
        # 操作人
        if filters['operator']:
            keyword = filters['operator'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('操作人', '')).lower()]
        # 库房名称
        if filters['warehouse']:
            keyword = filters['warehouse'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('库房名称', '')).lower()]
        # 发货类型
        if filters['shipment_type']:
            keyword = filters['shipment_type'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('发货类型', '')).lower()]
        if not filtered_items:
            continue

        # 明细行分页（每页10条）
        per_page = 10
        page_key = f'page_{plate}'  # 使用不同的 page 参数区分不同库房
        page_number = request.GET.get(page_key) or 1
        paginator = Paginator(filtered_items, per_page)
        try:
            page_obj = paginator.page(page_number)
        except EmptyPage:
            page_obj = paginator.page(1)

        processed_data.append({
            'plate': plate,
            'items': list(page_obj),  # 当前页的数据
            'page_obj': page_obj,  # 分页对象用于模板渲染
            'paginator': paginator,  # 可选：判断是否有多页
            'page_key': page_key,  # 页面上用于生成分页链接
            # 新增字段：计算当前页的总发货数量和总托数
            'total_quantity': sum(item['总数量'] for item in page_obj),

            # 解析“托数”列中的托数部分，并求和
            'total_pallets': sum(
                int(re.search(r'(\d+)托', str(item['托数'])).group(1))
                if re.search(r'(\d+)托', str(item['托数'])) else 0
                for item in page_obj
            )
        })

    # 库房整体分页（每页5个库房）
    group_paginator = Paginator(processed_data, 5)
    group_page_number = request.GET.get('group_page') or 1
    try:
        group_page_obj = group_paginator.page(group_page_number)
    except EmptyPage:
        group_page_obj = group_paginator.page(1)

    context.update(filters)
    context['grouped_data'] = group_page_obj
    context['is_paginated'] = group_page_obj.has_other_pages()

    return render(request, 'display_picking.html', context)


def factory_shipping_management(request):
    if request.method == 'POST':
        try:
            # 处理上传文件
            factory_file = request.FILES['file']

            # 保存临时文件
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'factory-shipping')
            os.makedirs(temp_dir, exist_ok=True)
            file_path = os.path.join(temp_dir, factory_file.name)

            with open(file_path, 'wb+') as f:
                for chunk in factory_file.chunks(): f.write(chunk)

            # 处理数据
            grouped_data = factory_shipping_data(file_path)

            # 存入session
            request.session['factory_shipping_data'] = grouped_data

            # 清理临时文件
            os.remove(file_path)

            return redirect('factory_display_shipping')

        except Exception as e:
            return render(request, 'factory_shipping_upload.html', {'error': str(e)})

    return render(request, 'factory_shipping_upload.html')


def factory_display_shipping(request):
    """展示运单数据"""
    context = {}
    factory_shipping_data = request.session.get('factory_shipping_data')
    if not factory_shipping_data:
        return redirect('factory_shipping_management')

    # 将每个 DataFrame 转换为 Python 字典列表
    processed_data = []
    for plate, df in factory_shipping_data.items():
        if isinstance(df, pd.DataFrame):
            items = df.to_dict(orient='records')  # 关键转换步骤
        else:
            items = df  # 如果不是 DataFrame，保持原样

        processed_data.append({
            'plate': plate,
            'items': items
        })

    context['grouped_data'] = processed_data
    return render(request, 'factory_display_shipping.html', context)


# views.py 新增部分

def factory_shipping_data(file_path):
    """处理运单数据"""
    try:
        df = pd.read_excel(file_path)

        # 筛选有效数据
        filtered = df[df['单据状态'] == '默认']

        # 按工厂和物品名称分组，累加计划发运量和实际发货量
        grouped = (
            filtered.groupby(['工厂名称', '物品名称'], as_index=False)
            .agg(
                total_plan=('计划发运量', 'sum'),
                total_ship=('发货数量', 'sum')
            )
        )

        # 添加未发货数量
        grouped['未发货数量'] = grouped['total_plan'] - grouped['total_ship']

        # 构建嵌套结构：工厂 -> 物品列表
        result = {}
        for _, row in grouped.iterrows():
            factory = row['工厂名称']
            if factory not in result:
                result[factory] = []
            result[factory].append({
                '物品名称': row['物品名称'],
                '计划发运量': row['total_plan'],
                '发货数量': row['total_ship'],
                '未发货数量': row['未发货数量']
            })

        return result

    except Exception as e:
        raise ValueError(f"数据处理失败: {str(e)}")


def stock_management(request):
    if request.method == 'POST':
        try:
            # 从上传的文件中获取名为 'file' 的文件对象
            wms_file = request.FILES['file']
            #构建临时存储路径，MEDIA_ROOT 是 Django 设置中的媒体文件根目录。
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'stock')
            #如果该目录不存在，则创建它。exist_ok=True 表示即使目录存在也不会报错。
            os.makedirs(temp_dir, exist_ok=True)
            #构建完整文件路径：temp_dir/上传的文件名
            file_path = os.path.join(temp_dir, wms_file.name)
            #将上传的文件内容写入服务器上的临时路径中。
            with open(file_path, 'wb+') as f:
                for chunk in wms_file.chunks():
                    f.write(chunk)
            #调用 wms_stock_data() 函数处理上传的 Excel 文件数据，
            grouped_data = wms_stock_data(file_path)
            #将 Pandas DataFrame 和 Series 转换为可序列化的格式（如列表、字典等），以便可以存入 session。
            serializable_data = convert_df_and_series(grouped_data)
            #将处理后的数据存入 session 中，供后续页面访问使用
            request.session['wms_stock_data'] = serializable_data
            #删除本地保存的临时文件，防止占用空间。
            os.remove(file_path)
            #重定向到库存展示页面
            return redirect('display_stock')
        #捕获所有异常，返回上传页面并显示错误信息。
        except Exception as e:
            return render(request, 'wms_upload.html', {'error': str(e)})
    #如果是 GET 请求，直接渲染上传页面。
    return render(request, 'wms_upload.html')


def wms_stock_data(file_path):
    """处理库存的核心逻辑"""
    try:
        df = pd.read_excel(file_path)
        # 筛选正常库位
        filtered = df[df['库位状态'] == '正常'].copy()
        # 检查必要字段
        required_columns = ['所属库房', '物品编码', '物品名称', '包装', '可用数量']
        for col in required_columns:
            if col not in df.columns:
                raise ValueError(f"Excel文件中缺少必要列: {col}")


        # 安全地修改副本中的列
        filtered['物品名称'] = (
            filtered['物品名称'].astype(str).str.strip()
            .str.replace(r'\s+', ' ', regex=True)
        )


        # 提取数值并转换为整数 (处理'可用数量'和'下架数量')
        def extract_numeric_value(value):
            """从带单位或小数的字符串中提取整数"""
            if pd.isna(value):
                return 0
            value_str = str(value).strip()
            # 先尝试直接转 float 再取整
            try:
                return int(float(value_str))
            except ValueError:
                # 如果失败，再尝试提取数字部分
                match = re.search(r'(\d+)', value_str)
                return int(match.group(1)) if match else 0

        filtered['总数量_数值'] = filtered['可用数量'].apply(extract_numeric_value)



        # 分组汇总
        result = (
            filtered.groupby(['所属库房', '物品编码', '物品名称', '包装'], as_index=False)
            .agg({

                '总数量_数值': 'sum',
            })
            .rename(columns={

                '总数量_数值': '总数量'
            })
        )

        # 提取包装中的 x*y 数量
        def calculate_pallet(package_str):
            if pd.isna(package_str):
                return np.nan

            # 改进的正则：只匹配数字 * 数字，且前后不能有其他字母或符号
            match = re.search(r'\b(\d+)\s*\*\s*(\d+)\b', str(package_str))
            if match:
                x, y = int(match.group(1)), int(match.group(2))
                if x * y <= 100000:  # 合理限制
                    return x * y
                else:
                    return np.nan  # 超出合理范围也标记为无效
            return np.nan

        result['每托数量'] = result['包装'].apply(calculate_pallet)

        # 格式化为"X托Y箱"
        def format_pallets(row):
            total = row['总数量']
            per_pallet = row['每托数量']

            # 类型转换
            try:
                total = int(float(total))
                per_pallet = int(float(per_pallet))
            except (ValueError, TypeError):
                return "无效数量"

            if pd.isna(per_pallet) or per_pallet <= 0:
                return "无效包装"

            # 添加业务合理性判断
            if per_pallet > 1000:
                return f"每托{per_pallet}件，数量异常"

            pallets, remainder = divmod(total, per_pallet)
            parts = []

            if pallets > 0:
                parts.append(f"{int(pallets)}托")
            if remainder > 0:
                parts.append(f"{int(remainder)}箱")

            return "".join(parts) or "0托"

        result['托数'] = result.apply(format_pallets, axis=1)


        # 按库房分组
        grouped_result = {}
        for group_name, group_df in result.dropna(subset=['所属库房']).groupby('所属库房'):
            grouped_result[group_name] = group_df.drop(columns=['所属库房']).to_dict(orient='records')

        return grouped_result

    except Exception as e:
        raise ValueError(f"数据处理失败: {str(e)}")


def display_stock(request):
    #初始化上下文字典，用于传递给模板。
    context = {}
    #从 session 中获取之前存储的 WMS 库存数据。
    wms_stock_data = request.session.get('wms_stock_data')
    #如果数据不是字典类型（说明数据有问题或未上传），则跳回上传页面。
    if not isinstance(wms_stock_data, dict):
        return redirect('stock_management')
    #从 URL 参数中获取搜索条件（库房、物品名称、物品编号）。
    filters = {
        'item_hose': request.GET.get('item_hose', '').strip(),
        'item_name': request.GET.get('item_name', '').strip(),
        'item_code': request.GET.get('item_code', '').strip(),
    }
    #初始化一个列表，用来存放处理后的数据。
    processed_data = []
    #遍历每个库房及其对应的物品列表。
    for plate, items in wms_stock_data.items():
        #先复制当前库房的所有物品。
        filtered_items = items

        #按“库房”关键词过滤。
        if filters['item_hose']:
            keyword = filters['item_hose'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('所属库房', '')).lower()]
        if filters['item_name']:
            keyword = filters['item_name'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('物品名称', '')).lower()]

        if filters['item_code']:
            keyword = filters['item_code'].lower()
            filtered_items = [item for item in filtered_items
                              if keyword in str(item.get('物品编码', '')).lower()]
        #如果过滤后没有匹配项，跳过这个库房。
        if not filtered_items:
            continue
        #将过滤后的库房和物品加入结果列表。
        processed_data.append({
            'plate': plate,
            'items': filtered_items
        })
    #设置分页器，每页显示10条数据。
    paginator = Paginator(processed_data, 10)
    #获取当前页码。
    page_number = request.GET.get('page')
    #获取当前页的数据。
    page_obj = paginator.get_page(page_number)
    #把搜索条件也传入模板。
    context.update(filters)
    #将分页后的数据传入模板。
    context['grouped_data'] = page_obj
    #标记是否有更多页，用于控制分页控件显示。
    context['is_paginated'] = page_obj.has_other_pages()
    # 渲染库存展示页面，并传递上下文。
    return render(request, 'wms_display.html', context)

def convert_df_and_series(obj):
    """递归地将 DataFrame 和 Series 转换为可序列化的 dict/list"""
    if isinstance(obj, dict):
        return {k: convert_df_and_series(v) for k, v in obj.items()}
    elif isinstance(obj, list):
        return [convert_df_and_series(i) for i in obj]
    elif isinstance(obj, (pd.DataFrame, pd.Series)):
        return obj.to_dict(orient='records') if isinstance(obj, pd.DataFrame) else obj.tolist()
    else:
        return obj

def process_factory_picking_data(file):
    """处理工厂拣货日志的核心逻辑"""
    try:
        df = pd.read_excel(file)
        # 验证必要列存在
        required_columns = [ '物品名称', '库房名称',  '拣货量', '容器编码']
        for col in required_columns:
            if col not in df.columns:
                raise ValueError(f"Excel文件中缺少必要列: {col}")

        df['物品名称'] = (
            df['物品名称'].astype(str).str.strip()
            .str.replace(r'\s+', ' ', regex=True)
        )

        # 添加发货类型：容器编码不为空为“有码发货”，否则为“无码发货”
        df['发货类型'] = df['容器编码'].apply(
            lambda x: "有码发货" if pd.notna(x) and str(x).strip() != "" else "无码发货"
        )

        # 聚合数据：按 库房名称 + 物品名称 + 发货类型 分组，统计总拣货量
        grouped = df.groupby(['库房名称', '物品名称', '发货类型'], as_index=False).agg(
            总数量=('拣货量', 'sum')
        )

        # 按库房分组
        grouped_result = {}
        for group_name, group_df in grouped.groupby('库房名称'):
            grouped_result[group_name] = group_df.drop(columns=['库房名称'])

        return grouped_result
    except Exception as e:
        raise RuntimeError(f"处理工厂拣货日志失败: {str(e)}")


def factory_picking_log(request):
    if request.method == 'POST':
        try:
            picking_file = request.FILES['file']
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'factory_picking')
            os.makedirs(temp_dir, exist_ok=True)
            file_path = os.path.join(temp_dir, picking_file.name)
            with open(file_path, 'wb+') as f:
                for chunk in picking_file.chunks():
                    f.write(chunk)
            grouped_data = process_factory_picking_data(file_path)
            serializable_data = convert_df_and_series(grouped_data)
            request.session['factory_pick_data'] = serializable_data
            os.remove(file_path)
            return redirect('display_factory_picking')
        except Exception as e:
            return render(request, 'factory_picking_upload.html', {'error': str(e)})
    return render(request, 'factory_picking_upload.html')


def display_factory_picking(request):
    context = {}

    # 获取 session 中的数据
    picking_data = request.session.get('factory_pick_data')
    if not isinstance(picking_data, dict):
        return redirect('factory_picking_log')

    # 设置 Matplotlib 字体支持中文 + 字体大小统一调整
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    plt.rcParams['font.size'] = 15  # 全局字体大小
    plt.rcParams['axes.titlesize'] = 18  # 标题字体大小
    plt.rcParams['legend.fontsize'] = 14  # 图例字体大小
    plt.rcParams['xtick.labelsize'] = 12  # x轴刻度字体大小
    plt.rcParams['ytick.labelsize'] = 12  # y轴刻度字体大小

    charts = []

    for plate, items in picking_data.items():
        # 统计发货类型数量
        total_by_type = {}
        for item in items:
            shipment_type = item.get('发货类型', '未知类型')
            if shipment_type == "":
                shipment_type = "无码发货"  # 显式处理空字符串
            qty = item.get('总数量', 0)
            total_by_type[shipment_type] = total_by_type.get(shipment_type, 0) + qty
        # 创建饼图
        if "无码发货" not in total_by_type:
            total_by_type["无码发货"] = 0
        labels = list(total_by_type.keys())
        sizes = list(total_by_type.values())

        if not labels:
            charts.append({
                'plate': plate,
                'pie_chart': None,
                'has_data': False
            })
            continue

        # 创建饼图
        fig, ax = plt.subplots(figsize=(8, 8))
        wedges, texts, autotexts = ax.pie(
            sizes,
            labels=labels,
            autopct='%1.1f%%',
            startangle=90,
            textprops={'fontsize': 40}
        )

        ax.axis('equal')  # 确保饼图为圆形


        # 添加图例到右侧
        ax.legend(wedges, labels,
                  title="发货类型",
                  loc="center left",
                  bbox_to_anchor=(1, 1, 0.5, 0),
                  prop={'size': 14})

        # 保存图像为 base64
        buf = BytesIO()
        plt.savefig(buf, format='png', dpi=150, bbox_inches='tight')
        plt.close()

        chart_b64 = base64.b64encode(buf.getvalue()).decode('utf-8')

        charts.append({
            'plate': plate,
            'pie_chart': chart_b64,
            'has_data': True
        })

    context['charts'] = charts
    return render(request, 'display_factory_picking.html', context)

def productionOrderGatherQuery_log(request):
    if request.method == 'POST':
        try:
            productionOrderGatherQuery_file = request.FILES['file']
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'productionOrderGatherQuery_picking')
            os.makedirs(temp_dir, exist_ok=True)
            file_path = os.path.join(temp_dir, productionOrderGatherQuery_file.name)
            with open(file_path, 'wb+') as f:
                for chunk in productionOrderGatherQuery_file.chunks():
                    f.write(chunk)
            grouped_data = productionOrderGatherQuery_data(file_path)
            serializable_data = convert_df_and_series(grouped_data)
            request.session['productionOrderGatherQuery_pick_data'] = serializable_data
            os.remove(file_path)
            return redirect('display_productionOrderGatherQuery')
        except Exception as e:
            return render(request, 'productionOrderGatherQuery_picking_upload.html', {'error': str(e)})
    return render(request, 'productionOrderGatherQuery_picking_upload.html')


def productionOrderGatherQuery_data(file):
    """处理生产工单采集查询文件的核心逻辑"""
    try:
        df = pd.read_excel(file)
        # 验证必要列是否存在
        required_columns = ['采集时间', '线号', '物品名称', '任务单号',
                            '垛号', '包装', '生产总数', '采集总数', '采集率', '库存组织']
        for col in required_columns:
            if col not in df.columns:
                raise ValueError(f"Excel文件中缺少必要列: {col}")

        # 数据清洗和标准化
        df['物品名称'] = df['物品名称'].astype(str).str.strip()
        df['垛号'] = df['垛号'].astype(str).str.strip()
        df['包装'] = df['包装'].astype(str).str.strip()

        # 将采集时间转换为 datetime 类型（如果非日期格式则自动转换）
        if not pd.api.types.is_datetime64_dtype(df['采集时间']):
            df['采集时间'] = pd.to_datetime(df['采集时间'], errors='coerce')

        # 确保数值列为数字类型
        numeric_cols = ['生产总数', '采集总数', '采集率']
        for col in numeric_cols:
            df[col] = pd.to_numeric(df[col], errors='coerce')

        # 过滤采集率不等于100的数据
        filtered_df = df[df['采集率'] != 100]

        # 构建返回结果的DataFrame
        result_df = filtered_df[[
            '采集时间', '线号', '物品名称', '任务单号',
            '垛号', '包装', '生产总数', '采集总数', '采集率', '库存组织'
        ]]
        return result_df

    except Exception as e:
        raise ValueError(f"生产工单采集数据处理失败: {str(e)}")

def display_productionOrderGatherQuery(request):
    """展示生产工单采集查询数据"""
    context = {}

    # 获取 session 中的数据
    production_data = request.session.get('productionOrderGatherQuery_pick_data')

    # 如果不是字典类型，尝试按物品名称重新组织成字典
    if not isinstance(production_data, dict):
        if isinstance(production_data, list) and len(production_data) > 0 and '物品名称' in production_data[0]:
            production_data = convert_to_dict_by_item_name(production_data)
        else:
            return redirect('productionOrderGatherQuery_upload')

    # 获取查询参数
    filters = {
        'item_name': request.GET.get('item_name', '').strip(),
        'duanumber': request.GET.get('duanumber', '').strip(),
        'start_time': request.GET.get('start_time', None),
        'end_time': request.GET.get('end_time', None),
        'packaging': request.GET.get('packaging', '').strip()
    }

    # 时间处理
    if filters['start_time']:
        filters['start_time'] = datetime.strptime(filters['start_time'], '%Y-%m-%d %H:%M:%S')
    if filters['end_time']:
        filters['end_time'] = datetime.strptime(filters['end_time'], '%Y-%m-%d %H:%M:%S')

    # 过滤数据
    processed_data = []
    for plate, items in production_data.items():
        if not isinstance(items, list):
            continue

        filtered_items = items

        # 物品名称过滤
        if filters['item_name']:
            keyword = filters['item_name'].lower()
            filtered_items = [item for item in filtered_items if keyword in str(item.get('物品名称', '')).lower()]

        # 垛号过滤
        if filters['duanumber']:
            keyword = filters['duanumber'].lower()
            filtered_items = [item for item in filtered_items if keyword in str(item.get('垛号', '')).lower()]

        # 时间范围过滤
        if filters['start_time'] and filters['end_time']:
            start = filters['start_time']
            end = filters['end_time']
            try:
                filtered_items = [item for item in filtered_items if start <= item.get('采集时间') <= end]
            except:
                pass  # 忽略无效的时间比较

        # 包装过滤
        if filters['packaging']:
            keyword = filters['packaging'].lower()
            filtered_items = [item for item in filtered_items if keyword in str(item.get('包装', '')).lower()]

        if filtered_items:
            processed_data.append({
                'plate': plate,
                'items': filtered_items
            })

    # 分页处理（每页5个组）
    paginator = Paginator(processed_data, 5)
    page_number = request.GET.get('page') or 1
    try:
        page_obj = paginator.page(page_number)
    except EmptyPage:
        page_obj = paginator.page(1)

    # 统计信息
    total_records = sum(len(group['items']) for group in processed_data)
    completed_count = total_records
    total_original = len(production_data.values()) or 1
    completion_rate = round(completed_count / total_original * 100, 2) if total_original > 0 else 100

    context.update({
        'grouped_data': page_obj,
        'is_paginated': page_obj.has_other_pages(),
        'total_records': total_records,
        'completion_rate': completion_rate,
        'total_original': total_original,
        'filters': filters
    })

    return render(request, 'display_productionOrderGatherQuery.html', context)

def productionOrderGatherQuery_upload(request):
    """上传并处理生产工单采集查询文件"""
    if request.method == 'POST':
        try:
            # 获取上传文件
            production_file = request.FILES['file']

            # 创建临时目录
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'productionOrderGatherQuery_picking')
            os.makedirs(temp_dir, exist_ok=True)

            # 保存文件到服务器
            file_path = os.path.join(temp_dir, production_file.name)
            with open(file_path, 'wb+') as f:
                for chunk in production_file.chunks():
                    f.write(chunk)

            # 处理数据
            grouped_data = productionOrderGatherQuery_data(file_path)

            # 转换为可序列化结构
            serializable_data = convert_df_and_series(grouped_data)

            # 存入 session
            request.session['productionOrderGatherQuery_pick_data'] = serializable_data

            # 清理临时文件
            os.remove(file_path)

            # 重定向到展示页面
            return redirect('display_productionOrderGatherQuery')
        except Exception as e:

            return render(request, 'productionOrderGatherQuery_picking_upload.html', {'error': str(e)})

    # GET 请求：渲染上传页面
    return render(request, 'productionOrderGatherQuery_picking_upload.html')

def convert_to_dict_by_item_name(data):
    result = {}
    for item in data:
        item_name = item.get('物品名称', '未知')
        if item_name == '':
            item_name = '未命名物品'
        if item_name not in result:
            result[item_name] = []
        result[item_name].append(item)
    return result


# views.py 新增部分

def production_order_analysis_upload(request):
    """上传生产工单采集报表"""
    if request.method == 'POST':
        try:
            file = request.FILES['file']
            # 创建临时目录
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'production_order_analysis')
            os.makedirs(temp_dir, exist_ok=True)
            file_path = os.path.join(temp_dir, file.name)
            # 保存文件
            with open(file_path, 'wb+') as f:
                for chunk in file.chunks():
                    f.write(chunk)
            # 处理数据
            grouped_data = process_production_order_analysis(file_path)
            # 转换为可序列化的结构
            serializable_data = convert_df_and_series(grouped_data)
            # 存入session
            request.session['production_order_analysis_data'] = serializable_data
            # 删除临时文件
            os.remove(file_path)
            return redirect('display_production_order_analysis')
        except Exception as e:
            return render(request, 'production_order_analysis_upload.html', {'error': str(e)})
    return render(request, 'production_order_analysis_upload.html')


def process_production_order_analysis(file_path):
    """处理生产工单采集报表文件，筛选采集率<100%的数据，并按库存组织分组"""
    try:
        # 读取Excel文件
        df = pd.read_excel(file_path)

        # 验证必要的列是否存在
        required_columns = ['采集时间', '线号', '物品名称', '任务单号', '垛号', '包装',
                            '生产总数', '采集总数', '采集率', '库存组织']
        for col in required_columns:
            if col not in df.columns:
                raise ValueError(f"Excel文件中缺少必要列: {col}")

        # 将采集率转换为数值类型
        df['采集率'] = pd.to_numeric(df['采集率'], errors='coerce')

        # 筛选采集率小于100的记录
        filtered_df = df[df['采集率'] < 100].copy()


        # 关键修正：在筛选后的DataFrame上添加垛号生成时间列
        filtered_df['垛号生成时间'] = filtered_df['垛号'].apply(extract_duano_time)

        # 检查是否有解析失败的记录
        failed_count = filtered_df['垛号生成时间'].isna().sum()
        if failed_count > 0:
            failed_duanhao = filtered_df[filtered_df['垛号生成时间'].isna()]['垛号'].unique()
            print(f"警告: {failed_count} 条记录的垛号时间解析失败，示例垛号: {failed_duanhao[:5]}")

        # 按库存组织分组
        grouped = {}
        for org, group in filtered_df.groupby('库存组织'):
            # 将每个分组转换为记录列表（字典列表）
            grouped[org] = group.to_dict('records')

        return grouped
    except Exception as e:
        raise RuntimeError(f"处理生产工单采集报表失败: {str(e)}")


def display_production_order_analysis(request):
    """展示生产工单分析数据"""
    context = {}
    data = request.session.get('production_order_analysis_data')
    if not data:
        return redirect('production_order_analysis_upload')

    # 获取查询参数
    item_name = request.GET.get('item_name', '').strip()
    duanumber = request.GET.get('duanumber', '').strip()
    line_number = request.GET.get('line_number', '').strip()
    start_time = request.GET.get('start_time', '')
    end_time = request.GET.get('end_time', '')

    try:
        start_time_dt = datetime.strptime(start_time, '%Y-%m-%d') if start_time else None
        end_time_dt = datetime.strptime(end_time, '%Y-%m-%d') if end_time else None
    except ValueError:
        start_time_dt = None
        end_time_dt = None

    # 应用过滤条件，并将每个库存组织的记录转换为 Paginator 对象
    grouped_list = []
    for org, records in data.items():
        filtered_records = []

        for record in records:
            # 物品名称过滤
            if item_name and item_name.lower() not in str(record.get('物品名称', '')).lower():
                continue
            # 垛号过滤
            if duanumber and duanumber.lower() not in str(record.get('垛号', '')).lower():
                continue
            # 线号过滤
            if line_number and line_number != str(record.get('线号', '')):
                continue

            # 时间范围过滤
            record_time_str = record.get('采集时间', '')
            if record_time_str:
                try:
                    if isinstance(record_time_str, str):
                        record_time = datetime.strptime(record_time_str, '%Y-%m-%d %H:%M:%S')
                    else:
                        record_time = record_time_str
                except:
                    record_time = None
                if start_time_dt or end_time_dt:
                    if record_time is None:
                        continue
                    if start_time_dt and record_time.date() < (start_time_dt.date() if start_time_dt else None):
                        continue
                    if end_time_dt and record_time.date() > (end_time_dt.date() if end_time_dt else None):
                        continue

            filtered_records.append(record)

        if filtered_records:
            paginator = Paginator(filtered_records, 10)  # 每页10条
            page_number = request.GET.get(f'page_{org}') or 1  # 使用不同的 page 参数区分不同库存组织
            try:
                page_obj = paginator.page(page_number)
            except EmptyPage:
                page_obj = paginator.page(1)
            grouped_list.append({
                'org': org,
                'page_obj': page_obj,
                'has_pagination': paginator.num_pages > 1
            })

    # 全局分页：每页展示5个库存组织
    group_paginator = Paginator(grouped_list, 5)
    group_page_number = request.GET.get('group_page') or 1
    try:
        group_page_obj = group_paginator.page(group_page_number)
    except EmptyPage:
        group_page_obj = group_paginator.page(1)

    context.update({
        'grouped_data': group_page_obj,
        'is_paginated': group_page_obj.has_other_pages(),
        'filters': {
            'item_name': item_name,
            'duanumber': duanumber,
            'line_number': line_number,
            'start_time': start_time,
            'end_time': end_time,
        }
    })
    return render(request, 'display_production_order_analysis.html', context)


def extract_duano_time(duanumber):
    try:


        # 第一步：确保输入为字符串
        if pd.isna(duanumber):  # 处理 NaN 或 None
            print("[DEBUG] 输入为 NaN 或 None，跳过")
            return None

        # 强制转字符串 + 清除前后空格（兼容 int/float）
        try:
            duanumber = str(int(float(duanumber)))  # 安全转数字再转字符串
        except:
            duanumber = str(duanumber).strip()

        # 移除非数字字符
        duanumber = re.sub(r'[^\d]', '', duanumber)

        # 验证长度是否足够
        if len(duanumber) < 14:
            print(f"[DEBUG] 垛号长度不足: {len(duanumber)}, 垛号: {duanumber}")
            return None

        # 截取时间部分
        time_str = duanumber[2:16]


        # 提取年月日时分字段
        year = time_str[1:5]

        month = time_str[5:7]

        day = time_str[7:9]

        hour = time_str[9:11]

        minute = time_str[11:13]



        # 验证是否全是数字
        if not (year.isdigit() and month.isdigit() and day.isdigit() and hour.isdigit() and minute.isdigit()):

            return None

        # 构造标准时间格式
        datetime_str = f"{year}-{month}-{day} {hour}:{minute}"
        datetime_str = datetime_str.strip()  # 关键修复点：清除隐藏字符



        return datetime_str
    except Exception as e:
        print(f"[FATAL ERROR] 未知错误发生在垛号解析中: {repr(duanumber)}, 错误: {str(e)}")
        import traceback
        traceback.print_exc()
        return None

def aging_analysis_upload(request):
    """上传库存Excel文件进行酒龄分析"""
    if request.method == 'POST':
        try:
            file = request.FILES['file']
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'aging_analysis')
            os.makedirs(temp_dir, exist_ok=True)
            file_path = os.path.join(temp_dir, file.name)

            # 保存文件
            with open(file_path, 'wb+') as f:
                for chunk in file.chunks():
                    f.write(chunk)

            # 处理酒龄数据
            result = process_aging_data(file_path)

            # 存入 session
            request.session['aging_analysis_data'] = result

            # 删除临时文件
            os.remove(file_path)

            return redirect('aging_analysis_display')

        except Exception as e:
            return render(request, 'aging_analysis_upload.html', {'error': str(e)})
    return render(request, 'aging_analysis_upload.html')


def process_aging_data(file_path):
    """处理库存Excel文件，按库房+物品名称+酒龄区间统计可用数量"""
    df = pd.read_excel(file_path)

    # 必要字段检查
    required_columns = ['所属库房', '物品编码', '物品名称', '可用数量', '酒龄（天）']
    for col in required_columns:
        if col not in df.columns:
            raise ValueError(f"Excel文件中缺少必要列: {col}")

    # 清洗数据
    df = df[required_columns].dropna()
    df['可用数量'] = pd.to_numeric(df['可用数量'], errors='coerce').fillna(0)
    df['酒龄（天）'] = pd.to_numeric(df['酒龄（天）'], errors='coerce').fillna(0)

    # 分类酒龄区间
    df['酒龄区间'] = df['酒龄（天）'].apply(classify_aging)

    # 分组汇总
    grouped = df.groupby(['所属库房', '物品名称', '酒龄区间'])['可用数量'].sum().unstack(fill_value=0).reset_index()

    # 转为可序列化结构
    result = {}
    for warehouse, group_df in grouped.groupby('所属库房'):
        items = []
        for _, row in group_df.iterrows():
            item_data = {
                '物品名称': row['物品名称'],
                '<30天': row.get('<30天', 0),
                '30-60天': row.get('30-60天', 0),
                '60-90天': row.get('60-90天', 0),
                '≥90天': row.get('≥90天', 0),
            }
            items.append(item_data)
        result[warehouse] = items

    return result

def aging_analysis_display(request):
    """展示酒龄分析结果"""
    data = request.session.get('aging_analysis_data')
    if not data:
        return redirect('aging_analysis_upload')

    # 转换数据结构为前端可处理格式
    transformed_data = []
    for warehouse, items in data.items():
        warehouse_data = {
            "warehouse": warehouse,
            "items": []
        }
        for item in items:
            # 确保数值类型正确
            item_data = {
                "name": item["物品名称"],
                "intervals": {
                    "<30天": float(item.get("<30天", 0)),
                    "30-60天": float(item.get("30-60天", 0)),
                    "60-90天": float(item.get("60-90天", 0)),
                    "≥90天": float(item.get("≥90天", 0))
                }
            }
            warehouse_data["items"].append(item_data)
        transformed_data.append(warehouse_data)

    context = {
        'grouped_data': json.dumps(transformed_data, ensure_ascii=False)
    }
    return render(request, 'aging_analysis_display.html', context)

def classify_aging(age_days):
    if age_days < 30:
        return '<30天'
    elif 30 <= age_days < 60:
        return '30-60天'
    elif 60 <= age_days < 90:
        return '60-90天'
    else:
        return '≥90天'


# 二厂大屏

def factory_screen_upload(request):
    """上传大屏Excel文件进行分析"""

    if request.method == 'POST':
        try:
            file = request.FILES['file']
            temp_dir = os.path.join(settings.MEDIA_ROOT, 'factory_screen')
            os.makedirs(temp_dir, exist_ok=True)
            file_path = os.path.join(temp_dir, file.name)
            with open(file_path, 'wb+') as f:
                for chunk in file.chunks():
                    f.write(chunk)
            result = process_factory_screen_data(file_path)
            serializable_data = convert_df_and_series(result)
            request.session['factory_screen_data'] = serializable_data
            os.remove(file_path)
            return redirect('factory_screen_display')
        except Exception as e:
            return render(request, 'factory_screen_upload.html', {'error': str(e)})
    else:
        # 处理 GET 请求，显示上传页面
        return render(request, 'factory_screen_upload.html')

def factory_screen_display(request):
    context = {}
    screen_data = request.session.get('factory_screen_data')

    if not screen_data:
        return redirect('factory_screen_upload')
    context['screen_data'] = screen_data
    return render(request, 'factory_screen_display.html', context)



def process_factory_screen_data(file_path):
    try:
        df = pd.read_excel(file_path)
        # 数据清洗
        df = df.dropna(
            subset=['任务类型', '任务状态', '产线', '物料编码', '物品名称', '包装名称', '需求数', '已发数', '创建时间'])

        # 转换日期格式
        df['创建时间'] = pd.to_datetime(df['创建时间']).dt.strftime('%Y-%m-%d %H:%M:%S')

        # 过滤条件
        now = datetime.now()
        one_day_ago = now - timedelta(days=1)
        df = df[
            (df['任务状态'].isin(['已创建', '进行中'])) &
            (df['已发数'] < df['需求数']) &
            (pd.to_datetime(df['创建时间']) >= one_day_ago) &
            (df['产线'].astype(int).isin([1, 2, 3, 5, 7, 8]))  # 筛选指定产线
            ]

        # 按创建时间升序排序
        df = df.sort_values(by='创建时间')

        # 返回所有任务组成的列表（不按产线分组）
        tasks = df.to_dict(orient='records')

        return tasks

    except Exception as e:
        raise RuntimeError(f"处理数据失败: {str(e)}")

