import csv
import itertools
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime, timedelta, time
import csv
import io
from openpyxl import load_workbook
from celery.result import AsyncResult

from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import transaction, connection
from django.db.models import Sum, Count, Q, F
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView

from apis.base_data.management.commands.exp_statistics import Command
from apis.base_data.management.commands.update_import_group import BatchDeptUpdateForm, \
    batch_update_dept_by_group_names, BatchDeptUpdate
from apis.base_data.serializers import ProjectSerializer, ProjectCreateUpdateSerializer, ProjectUpdateLogSerializer, \
    ProjectWarnSerializer, ReportRecordUploadSerializer, GroupCustomerInfoSerializer, \
    GroupCustomerInfoCreateUpdateSerializer, GroupInformationStatisticsSerializer, \
    GroupInformationStatisticsCreateUpdateSerializer, MobileUserRecordSerializer, \
    MobileUserRecordCreateUpdateSerializer, SpecialLineManagementSerializer, \
    SpecialLineManagementCreateUpdateSerializer, InformationPlatformRecordSerializer, \
    InformationPlatformRecordCreateUpdateSerializer, BusinessOpportunityRecordSerializer, \
    BusinessOpportunityRecordCreateUpdateSerializer, ArrearsInformationRecordSerializer, \
    ArrearsInformationRecordCreateUpdateSerializer
from common.models.base_data import ProjectInfo, ProjectInfoUpdateLog, ProjectWarn, GroupCustomerInfo, \
    GroupInformationStatistics, MobileUserRecord, SpecialLineManagement, InformationPlatformRecord, \
    BusinessOpportunityRecord, ArrearsInformationRecord, ImportTask
from common.models.system import Dept, Users, Dictionary
# from common.scheduler import logger
import logging
from utils.filters import DataLevelPermissionsFilter
from utils.json_response import DetailResponse, ErrorResponse, SuccessResponse
from utils.models import get_fieds_values
from utils.report_service import ProjectImportServer, template_import
from utils.utils import get_client_ip, get_file_extension, ExcelParser, CSVParser
from utils.viewset import CustomModelViewSet
from common.tasks import async_import_task
from django.http import JsonResponse


class ProjectViewSet(CustomModelViewSet):
    queryset = ProjectInfo.objects.all()
    serializer_class = ProjectSerializer
    create_serializer_class = ProjectCreateUpdateSerializer
    update_serializer_class = ProjectCreateUpdateSerializer
    permission_classes_map = {
        'count': [IsAuthenticated],
        'dept_count': [IsAuthenticated],
        'creator_count': [IsAuthenticated],
        'finish': [IsAuthenticated],
    }

    def list(self, request, *args, **kwargs):
        no_update_day = request.GET.get('no_update_day')

        queryset = self.filter_queryset(self.get_queryset())
        if no_update_day and no_update_day.isdigit():
            no_update_time = datetime.now() - timedelta(int(no_update_day))
            queryset = queryset.filter(update_time__lte=no_update_time)

        page = self.paginate_queryset(queryset)
        if page is not None:
            serializer = self.get_serializer(page, many=True, request=request)
            return self.get_paginated_response(serializer.data)
        serializer = self.get_serializer(queryset, many=True, request=request)
        return SuccessResponse(data=serializer.data, msg='获取成功')

    def finish(self, request, *args, **kwargs):
        project = request.data.get('project')
        summarize = request.data.get('summarize', '')
        instance = ProjectInfo.objects.filter(id=project).first()
        if not instance:
            return ErrorResponse(msg='项目信息不存在')
        if instance.status != '1':
            return ErrorResponse(msg='当前状态无法完成')

        before = get_fieds_values(instance)
        with transaction.atomic():
            # 更新数据
            instance.status = '2'
            instance.summarize = summarize
            instance.save(update_fields=('status', 'summarize', 'update_time'))
            # 变更记录
            after = get_fieds_values(instance)
            ProjectInfoUpdateLog.objects.create(
                project_id=instance.id, before=before, after=after, creator_id=self.request.user.id)
            # 消除预警
            ProjectWarn.objects.filter(project_id=instance.id).update(is_handled=True)

        return DetailResponse(msg='成功')

    def count(self, request):
        projects = self.get_queryset().filter(status='1')
        projects = DataLevelPermissionsFilter().filter_queryset(request, projects, self)
        project_count = projects.count()
        project_price = projects.aggregate(price=Sum('price')).get('price', 0)

        warn_time = datetime.now() - timedelta(days=7)
        need_update_count = projects.filter(update_time__lte=warn_time).values('id').count()
        # warns = ProjectWarn.objects.filter(project__is_del=False)
        # warns = DataLevelPermissionsFilter().filter_queryset(request, warns, self)
        # warn_count = warns.count()
        # warn_no_read_count = warns.filter(read_status='0').count()

        return DetailResponse(data={
            'project_count': project_count,
            'need_update_count': need_update_count,
            'project_price': project_price,
            'warn_count': 0,
            'warn_no_read_count': 0
        })

    def dept_count(self, request):
        warn_time = datetime.now() - timedelta(days=7)

        projects = self.filter_queryset(self.get_queryset()).filter(status='1')
        projects = DataLevelPermissionsFilter().filter_queryset(request, projects, self)
        data = projects.values('dept_id').annotate(
            count=Count('id'), price=Sum('price'), need_update=Count('id', Q(update_time__lte=warn_time)),
            yellow_num=Count("id", Q(important_level="2")), red_num=Count("id", Q(important_level="3")),
            green_num=Count("id", Q(important_level="1"))) \
            .values('dept_id', 'count', 'price', 'need_update', 'yellow_num', 'red_num', 'green_num') \
            .order_by('-count')

        result = []
        depts = Dept.objects.exclude(level='1')
        for dept in depts:
            dept_data = {'dept_id': dept.id, 'dept_name': dept.name, 'count': 0, 'price': 0, 'need_update': 0,
                         'yellow_num': 0, 'red_num': 0, 'green_num': 0}
            for i in data:
                if i['dept_id'] == dept.id:
                    dept_data['count'] = i['count']
                    dept_data['price'] = i['price']
                    dept_data['need_update'] = i['need_update']
                    dept_data['yellow_num'] = i['yellow_num']
                    dept_data['red_num'] = i['red_num']
                    dept_data['green_num'] = i['green_num']
            result.append(dept_data)
        return DetailResponse(data=result)

    def creator_count(self, request):
        warn_time = datetime.now() - timedelta(days=7)

        projects = self.filter_queryset(self.get_queryset()).filter(status='1')
        projects = DataLevelPermissionsFilter().filter_queryset(request, projects, self)
        data = projects.values('creator_id').annotate(
            count=Count('id'), price=Sum('price'),
            need_update=Count('id', Q(update_time__lte=warn_time)),
            yellow_num=Count("id", Q(important_level="2")),
            red_num=Count("id", Q(important_level="3")),
            green_num=Count("id", Q(important_level="1"))
        ) \
            .values('creator_id', 'count', 'price', 'need_update', 'yellow_num', 'red_num', 'green_num') \
            .order_by('-count')

        result = []
        users = Users.objects.filter(dept_id=self.request.user.dept_id, type="1")
        for user in users:
            dept_data = {'creator_id': user.id, 'user_name': user.name, 'count': 0, 'price': 0, 'need_update': 0,
                         'yellow_num': 0, 'red_num': 0, 'green_num': 0}
            for i in data:
                if i['creator_id'] == user.id:
                    dept_data['count'] = i['count']
                    dept_data['price'] = i['price']
                    dept_data['need_update'] = i['need_update']
                    dept_data['yellow_num'] = i['yellow_num']
                    dept_data['red_num'] = i['red_num']
                    dept_data['green_num'] = i['green_num']
            result.append(dept_data)
        return DetailResponse(data=result)


class ProjectImportViewSet(APIView):
    parser_classes = (MultiPartParser, FormParser)

    def post(self, request):
        """导入"""
        # 验证文件格式
        serializer = ReportRecordUploadSerializer(data=request.data)
        serializer.is_valid(raise_exception=True)
        file_path = serializer.validated_data.get('file_path')
        # file_name = serializer.validated_data.get('file_name')
        # 验证表头长度
        import xlrd
        sheet = xlrd.open_workbook(file_path).sheet_by_index(0)
        values = [sheet.cell_value(0, col) for col in range(sheet.ncols) if sheet.cell_value(0, col)]

        import_server = ProjectImportServer()
        imports_keys = import_server.template_type_data.get('import_key')
        if len(values) != len(imports_keys):
            return ErrorResponse(msg='文件格式错误')

        num, errors = template_import(import_server, file_path, request.user.id)
        if num:
            return DetailResponse(msg='导入成功{}条'.format(num), data=errors)
        elif type(errors) == list:
            return DetailResponse(msg='导入失败', data=errors)
        else:
            return DetailResponse(msg=errors)


class ProjectUpdateLogViewSet(CustomModelViewSet):
    queryset = ProjectInfoUpdateLog.objects.filter(project__is_del=False)
    serializer_class = ProjectUpdateLogSerializer


class ProjectWarnViewSet(CustomModelViewSet):
    queryset = ProjectWarn.objects.filter(project__is_del=False)
    serializer_class = ProjectWarnSerializer

    def retrieve(self, request, *args, **kwargs):
        instance = self.get_object()

        # 访问者 == 跟进人员，标记为已读
        if request.user.id == instance.creator_id:
            instance.read_status = '1'
            instance.save(update_fields=('read_status', 'update_time'))
        serializer = self.get_serializer(instance)
        return DetailResponse(data=serializer.data, msg='获取成功')


class GroupCustomerInfoViewSet(CustomModelViewSet):
    queryset = GroupCustomerInfo.objects.all()
    serializer_class = GroupCustomerInfoSerializer
    create_serializer_class = GroupCustomerInfoCreateUpdateSerializer
    update_serializer_class = GroupCustomerInfoCreateUpdateSerializer
    permission_classes_map = {
        'group_detail': [IsAuthenticated],
        "check_test": [IsAuthenticated]
    }
    filter_fields = {
        'group_name': ['icontains'],
        'group_id': ['icontains'],
        'dept_id': ['exact'],
        'industry_category': ['exact'],
        'group_actual_level': ['exact'],
        'creator_id': ['exact'],
        'dept_belong_id': ['exact'],
        'group_address': ['icontains'],
    }
    search_fields = ['group_id', 'group_name']

    def group_detail(self, request):
        statistics_id = request.GET.get('id')
        if not statistics_id:
            return ErrorResponse(msg='缺少参数id')
        customer_infos = self.queryset.filter(id=statistics_id).select_related('statistics').all()
        serializer = GroupCustomerInfoSerializer(customer_infos, many=True)
        return DetailResponse(data=serializer.data)

    @action(methods=['GET'], detail=False, permission_classes=[IsAuthenticated])
    def select(self, request):
        params = request.query_params
        dept_id = params.get('dept_id', -1)

        if dept_id != -1:
            queryset = self.get_queryset().filter(dept_id=dept_id).values(value=F('id'), label=F('group_name'))
        else:
            queryset = self.get_queryset().values(value=F('id'), label=F('group_name'))
        return DetailResponse(data=queryset)

    def list(self, request, *args, **kwargs):
        # 可根据创建时间进行过滤
        create_days = request.GET.get('create_days')
        queryset = self.filter_queryset(self.get_queryset())
        page = self.paginate_queryset(queryset)

        serializer = self.get_serializer(page, many=True)
        return self.get_paginated_response(serializer.data)

    def check_test(self, request):
        command_group = Command()
        command_group.handle()
        return DetailResponse(data=[])


class GroupInformationStatisticSet(CustomModelViewSet):
    queryset = GroupInformationStatistics.objects.filter(group__is_del=False)
    serializer_class = GroupInformationStatisticsSerializer
    create_serializer_class = GroupInformationStatisticsCreateUpdateSerializer
    update_serializer_class = GroupInformationStatisticsCreateUpdateSerializer
    permission_classes_map = {
        'group_detail': [IsAuthenticated],
    }

    # def group_detail(self, request):
    #     statistics_id = request.GET.get('id')
    #     if not statistics_id:
    #         return ErrorResponse(msg='缺少参数id')
    #     # 查询 GroupInformationStatistics 详情并关联 GroupCustomerInfo
    #     statistics = self.get_queryset().select_related('group').get(id=statistics_id)
    #     serializer = self.get_serializer(statistics)
    #     return DetailResponse(data=serializer.data)


class MobileUserRecordViewSet(CustomModelViewSet):
    queryset = MobileUserRecord.objects.filter(group__is_del=False)
    serializer_class = MobileUserRecordSerializer
    create_serializer_class = MobileUserRecordCreateUpdateSerializer
    update_serializer_class = MobileUserRecordCreateUpdateSerializer

    def destroy(self, request, *args, **kwargs):
        now_date = datetime.today().date()

        obj = self.get_object()
        statistics, _ = GroupInformationStatistics.objects.get_or_create(group_id=obj.group_id)
        with transaction.atomic():
            obj.delete()
            # 日期不为null 并且 大于当前日期, 状态正常 需要减去记录数量
            # TODO 日期到期，但is_exp=False (日期到期的统一用定时任务处理)
            if obj.group_5g_coin_exp_date and obj.group_5g_coin_exp_date >= now_date:
                statistics.our_net_user_cnt -= obj.our_net_users
                statistics.wiring_box_cnt -= obj.wiring_box_penetration
                statistics.joint_pay_user_cnt -= obj.joint_pay_users
                statistics.group_5g_coin_user_cnt -= obj.our_net_5g_coin_users
            if obj.lt_exp_date and obj.lt_exp_date >= now_date:
                statistics.lt_user_cnt -= obj.lt_users
                statistics.lt_contract_gift_user_cnt -= obj.lt_contract_gift_users
            if obj.dx_exp_date and obj.dx_exp_date >= now_date:
                statistics.dx_user_cnt -= obj.dx_users
                statistics.dx_contract_gift_user_cnt -= obj.dx_contract_gift_users
            statistics.save(update_fields=(
                'our_net_user_cnt', 'wiring_box_cnt', 'joint_pay_user_cnt',
                'group_5g_coin_user_cnt', 'lt_user_cnt', 'lt_contract_gift_user_cnt',
                'dx_user_cnt', 'dx_contract_gift_user_cnt', 'update_time'))

        return DetailResponse(data=[], msg='删除成功')


class SpecialLineManagementViewSet(CustomModelViewSet):
    queryset = SpecialLineManagement.objects.filter(group__is_del=False)
    serializer_class = SpecialLineManagementSerializer
    create_serializer_class = SpecialLineManagementCreateUpdateSerializer
    update_serializer_class = SpecialLineManagementCreateUpdateSerializer

    def destroy(self, request, *args, **kwargs):
        now_date = datetime.today().date()

        obj = self.get_object()
        statistics, _ = GroupInformationStatistics.objects.get_or_create(group_id=obj.group_id)
        with transaction.atomic():
            obj.delete()
            # 日期不为null 并且 大于当前日期, 状态正常 需要减去记录数量
            # TODO 日期到期，但is_exp=False (日期到期的统一用定时任务处理)
            # 数据专线：更新 数据专线条数、数据专线合计月收入、存量数据专线条数（我网、异网合计）
            if obj.data_line_expiry and obj.data_line_expiry >= now_date:
                statistics.data_line_cnt -= obj.data_line_count
                statistics.data_line_income -= obj.data_line_income
                statistics.stock_data_line_cnt -= obj.data_line_count
            # 互联网专线：更新 互联网专线条数、互联网专线合计月收入、存量互联网专线条数（我网、异网合计）
            if obj.internet_line_expiry and obj.internet_line_expiry >= now_date:
                statistics.internet_line_cnt -= obj.internet_line_count
                statistics.internet_line_income -= obj.internet_line_income
                statistics.stock_internet_line_cnt -= obj.internet_line_count
            # LT专线：更新  LT数据专线条数、LT互联网专线条数、存量数据专线条数、存量互联网专线条数、异网专线合计月收入
            if obj.lt_expiry and obj.lt_expiry >= now_date:
                statistics.lt_data_line_cnt -= obj.lt_data_count
                statistics.lt_internet_line_cnt -= obj.lt_internet_count
                statistics.other_net_income -= obj.lt_data_fee + obj.lt_internet_fee
                statistics.stock_data_line_cnt -= obj.lt_data_count
                statistics.stock_internet_line_cnt -= obj.lt_internet_count
            # DX专线：更新  DX数据专线条数、DX互联网专线条数、存量数据专线条数、存量互联网专线条数、异网专线合计月收入
            if obj.dx_expiry and obj.dx_expiry >= now_date:
                statistics.dx_data_line_cnt -= obj.dx_data_count
                statistics.dx_internet_line_cnt -= obj.dx_internet_count
                statistics.other_net_income -= obj.dx_data_fee + obj.dx_internet_fee
                statistics.stock_data_line_cnt -= obj.dx_data_count
                statistics.stock_internet_line_cnt -= obj.dx_internet_count
            # GD专线：更新  GD数据专线条数、GD互联网专线条数、存量数据专线条数、存量互联网专线条数、异网专线合计月收入
            if obj.gd_expiry and obj.gd_expiry >= now_date:
                statistics.gd_data_line_cnt -= obj.gd_data_count
                statistics.gd_internet_line_cnt -= obj.gd_internet_count
                statistics.other_net_income -= obj.gd_data_fee + obj.gd_internet_fee
                statistics.stock_data_line_cnt -= obj.gd_data_count
                statistics.stock_internet_line_cnt -= obj.gd_internet_count
            statistics.save()

        return DetailResponse(data=[], msg='删除成功')


class InformationPlatformRecordViewSet(CustomModelViewSet):
    queryset = InformationPlatformRecord.objects.filter(is_del=False)
    serializer_class = InformationPlatformRecordSerializer
    create_serializer_class = InformationPlatformRecordCreateUpdateSerializer
    update_serializer_class = InformationPlatformRecordCreateUpdateSerializer

    def destroy(self, request, *args, **kwargs):
        obj = self.get_object()
        obj.is_del = True
        obj.save()
        return DetailResponse(data=[], msg='删除成功')


class BusinessOpportunityRecordViewSet(CustomModelViewSet):
    queryset = BusinessOpportunityRecord.objects.filter(is_del=False)
    serializer_class = BusinessOpportunityRecordSerializer
    create_serializer_class = BusinessOpportunityRecordCreateUpdateSerializer
    update_serializer_class = BusinessOpportunityRecordCreateUpdateSerializer

    def destroy(self, request, *args, **kwargs):
        obj = self.get_object()
        obj.is_del = True
        obj.save()
        return DetailResponse(data=[], msg='删除成功')


class ArrearsInformationRecordViewSet(CustomModelViewSet):
    queryset = ArrearsInformationRecord.objects.filter()
    serializer_class = ArrearsInformationRecordSerializer
    create_serializer_class = ArrearsInformationRecordCreateUpdateSerializer
    update_serializer_class = ArrearsInformationRecordCreateUpdateSerializer


# class ArrearsInformationRecordImportViewSet(APIView):
#     parser_classes = (MultiPartParser, FormParser)
#     MAX_BATCH_SIZE = 50000  # 单批次最大处理量
#     WORKER_THREADS = 4  # 并行校验线程数
#
#     @transaction.non_atomic_requests  # 禁用全局事务
#     def post(self, request):
#         """异步批量导入（支持20万条/分钟）"""
#         task = ImportTask.objects.create(
#             user=request.user,
#             file_size=request.FILES['file'].size,
#             ip=get_client_ip(request)
#         )
#         file = request.FILES['file']
#
#         # 1. 读取文件内容（转为字节）
#         file_content = file.read()  # 读取后文件指针到末尾，需重置
#         file.seek(0)  # 重置指针以便后续使用
#
#         # 2. 构建可序列化的文件元数据
#         file_data = {
#             'name': file.name,
#             'size': file.size,
#             'content_type': file.content_type,
#             'content': file_content,  # 字节数据（bytes类型可JSON序列化）
#         }
#
#         # 启动异步处理（避免阻塞主线程）
#         from common.tasks import async_import_task
#         async_import_task.delay(task.id, file_data)
#
#         return SuccessResponse(
#             data={'task_id': task.id},
#             msg='导入任务已受理（预计5分钟/10万条）',
#         )
#
#     # -------------------- 核心处理逻辑（异步任务专用）--------------------
#     @staticmethod
#     def process_import(task_id, file: InMemoryUploadedFile):
#         """核心导入处理（无HTTP上下文）"""
#         task = ImportTask.objects.get(id=task_id)
#         task.status = 'processing'
#         task.save()
#
#         try:
#             # 1. 快速清空（仅全量导入）
#             if task.clear_before_import:
#                 ArrearsInformationRecord.objects.filter(is_del=False).delete()  # 批量删除
#                 task.cleared_count = task.total_records  # 假设前端已传总数
#
#             # 2. 流式解析文件（内存友好）
#             parser = CSVParser(file) if file.name.endswith('.csv') else ExcelParser(file)
#             total = 0
#             valid_batches = []
#
#             # 3. 并行校验（多线程）
#             with ThreadPoolExecutor(max_workers=ArrearsInformationRecordImportViewSet.WORKER_THREADS) as executor:
#                 futures = []
#                 for chunk in parser.chunk_iter(chunk_size=1000):
#                     futures.append(executor.submit(
#                         ArrearsInformationRecordImportViewSet._validate_chunk,
#                         chunk,
#                         task.user
#                     ))
#
#                 for future in futures:
#                     valid, errors = future.result()
#                     task.errors.extend(errors)
#                     valid_batches.append(valid)
#                     total += len(valid)
#                     task.save(update_fields=['valid_count', 'errors'])
#
#             # 4. 批量插入（原始SQL）
#             if valid_batches:
#                 ArrearsInformationRecordImportViewSet._bulk_insert(
#                     itertools.chain(*valid_batches),
#                     task.user
#                 )
#
#             task.status = 'success'
#             task.save()
#
#         except Exception as e:
#             task.status = 'failed'
#             task.error_message = str(e)
#             task.save()
#
#     # -------------------- 并行校验模块 --------------------
#     @staticmethod
#     def _validate_chunk(chunk, user):
#         """单批次数据校验（线程安全）"""
#         valid, errors = [], []
#         group_cache = GroupCache.get_instance()  # 全局缓存
#         biz_cache = BizCategoryCache.get_instance()
#
#         for row_num, row in enumerate(chunk, 1):
#             try:
#                 # 快速校验（纯内存操作）
#                 group_name = row['集团名称'].strip().lower()
#                 if not group_name or group_name not in group_cache:
#                     errors.append(ArrearsImportError(row_num, '集团不存在', row))
#                     continue
#
#                 biz_name = row['业务大类'].strip().lower()
#                 if not biz_name or biz_name not in biz_cache:
#                     errors.append(ArrearsImportError(row_num, '业务大类不存在', row))
#                     continue
#
#                 # 格式校验（无数据库操作）
#                 min_date = ExcelParser._parse_date(row['最小账期'])
#                 max_date = ExcelParser._parse_date(row['最大账期'])
#                 if min_date > max_date:
#                     errors.append(ArrearsImportError(row_num, '账期范围错误', row))
#                     continue
#
#                 amount = ExcelParser._parse_amount(row['全量欠费（元）'])
#                 if amount < 0:
#                     errors.append(ArrearsImportError(row_num, '金额为负', row))
#                     continue
#
#                 valid.append({
#                     'group_id': group_cache[group_name],
#                     'business_category_id': biz_cache[biz_name],
#                     'min_billing_period': min_date,
#                     'max_billing_period': max_date,
#                     'total_arrears': amount,
#                     'creator_id': user.id,
#                     'create_time': datetime.now()
#                 })
#
#             except Exception as e:
#                 errors.append(ArrearsImportError(row_num, str(e), row))
#
#         return valid, errors
#
#     # -------------------- 批量插入模块 --------------------
#     @staticmethod
#     def _bulk_insert(records, user):
#         """原始SQL批量插入（50万条/分钟）"""
#         if not records:
#             return
#
#         fields = [
#             'group_id', 'business_category_id', 'min_billing_period',
#             'max_billing_period', 'total_arrears', 'creator_id', 'create_time'
#         ]
#
#         params = [tuple(record[field] for field in fields) for record in records]
#
#         with connection.cursor() as cursor:
#             cursor.executemany(f"""
#                 INSERT INTO arrears_information_record ({', '.join(fields)})
#                 VALUES (%s, %s, %s, %s, %s, %s, %s)
#                 ON DUPLICATE KEY UPDATE total_arrears=VALUES(total_arrears)
#             """, params)
#
#         # # 批量记录操作日志（每10万条）
#         # OperationLog.objects.bulk_create([
#         #     OperationLog(
#         #         user=user,
#         #         action='BULK_IMPORT',
#         #         detail=f'批量导入{len(records)}条欠费数据',
#         #         ip='127.0.0.1'  # 从task获取
#         #     )
#         # ], ignore_conflicts=True)
#
#     @staticmethod
#     def get_total_rows(file: InMemoryUploadedFile) -> int:
#         """
#         安全获取文件有效数据行数（跳过标题行，过滤空行）
#         :param file: 上传的文件对象（支持CSV/Excel）
#         :return: 有效数据行数（不包含标题行）
#         """
#         ext = get_file_extension(file.name)  # 复用工具函数
#         if ext not in {'csv', 'xlsx'}:
#             return 0  # 非支持类型返回0
#
#         # 保存当前文件指针位置（避免影响后续解析）
#         original_pos = file.tell()
#         file.seek(0)
#
#         try:
#             if ext == 'csv':
#                 return CSVParser(file).total_rows
#             else:
#                 return ExcelParser(file).total_rows
#         finally:
#             file.seek(original_pos)  # 恢复文件指针


class ArrearsInformationRecordImportViewSet(APIView):
    """欠费导入视图集（含完整导入功能）"""
    parser_classes = (MultiPartParser, FormParser)

    def post(self, request):
        """提交异步导入任务"""
        # 🔴 全量清空控制（危险操作，生产需谨慎）
        clear_before_import = request.headers.get('X-Clear-Before-Import') == 'true'

        # 1. 文件校验
        if 'file' not in request.FILES:
            return JsonResponse({'msg': '请上传CSV/Excel文件', 'code': 400}, status=400)

        file = request.FILES['file']
        file_extension = get_file_extension(file.name)
        if file_extension not in ('csv', 'xlsx'):
            return JsonResponse({'msg': '仅支持CSV/Excel文件', 'code': 400}, status=400)

        # 基本数据验证
        try:
            if file_extension == 'csv':
                decoded_file = file.read().decode('utf-8-sig')
                reader = csv.DictReader(io.StringIO(decoded_file))
                CSVParser(file)._validate_header(reader.fieldnames)
            else:
                wb = load_workbook(io.BytesIO(file.read()), read_only=True, data_only=True)
                ws = wb.active
                header_row = next(ws.iter_rows(min_row=1, max_row=1, values_only=False), [])
                header = [cell.value if cell.value is not None else f'空列_{cell.column_letter}' for cell in header_row]
                ExcelParser(file)._validate_header(header)
        except Exception as e:
            return JsonResponse({'msg': f'文件表头验证失败: {str(e)}', 'code': 400}, status=400)

        """异步批量导入（支持20万条/分钟）"""
        task = ImportTask.objects.create(
            user=request.user,
            file_size=request.FILES['file'].size,
            ip=get_client_ip(request)
        )
        # 2. 启动异步任务
        try:
            file.seek(0)  # 重置文件指针
            result = async_import_task.delay(request.user.id, clear_before_import, file.read(), file_extension)

            print(result)

            # # 同步调用任务
            # result = async_import_task(request.user.id, clear_before_import, file.read(), file_extension)
            # print(result)
        except Exception as e:
            return JsonResponse({'msg': f'异步任务启动失败: {str(e)}', 'code': 500}, status=500)

        return JsonResponse(
            {
                'task_id': task.id,
                'status': 'PENDING',
                'message': '导入任务已提交，请通过GET /api/import-status/{task_id}查询状态'
            },
            status=200
        )


class ArrearsImportStatusView(APIView):
    """查询导入任务状态"""

    def get(self, request, task_id):
        task = AsyncResult(task_id)
        response_data = {
            'task_id': task.id,
            'status': task.status,
            'result': task.result
        }
        return SuccessResponse(data=response_data)


class BatchDeptUpdateView(APIView):
    parser_classes = (MultiPartParser, FormParser)

    def post(self, request):
        batchDeptUpdate = BatchDeptUpdate(APIView)
        form = BatchDeptUpdateForm(request.POST, request.FILES)
        if not form.is_valid():
            return JsonResponse({'status': 'error', 'message': str(form.errors)}, 400)

        file = request.FILES['file']
        if file.size > batchDeptUpdate.MAX_FILE_SIZE or not batchDeptUpdate._is_valid_ext(file.name):
            return JsonResponse({'status': 'error', 'message': '文件格式或大小不符'}, 400)

        try:
            df = batchDeptUpdate._parse_file(file)
            update_mapping, errors = batchDeptUpdate._validate_dataframe(df)
        except Exception as e:
            return JsonResponse({'status': 'error', 'message': f'解析失败：{str(e)}'}, 400)
        #
        # if errors:
        #     return batchDeptUpdate._error_response(errors)

        result = batch_update_dept_by_group_names(update_mapping)
        return batchDeptUpdate._success_response(result, len(df))
