#!/usr/bin/env python
# -*- encoding:    utf-8 -*-
# Author      :    yao low_design@163.com
# Date        :    2025/02/18 15:00:33


import traceback
import datetime
from odoo import _, api, fields, models
import json
import base64
import logging
from odoo.exceptions import ValidationError, UserError
from io import BytesIO
from openpyxl import load_workbook

_logger = logging.getLogger(__name__)


class SpreadSheetExcel(models.Model):
    _inherit = 'spreadsheet.excel'

    redis_key = fields.Char(string='索引字段', help='用于redis缓存的key值')
    redis_count = fields.Integer(string='缓存数量', help='缓存的数量')
    redis_expire = fields.Integer(string='缓存时间', help='缓存的时间')
    country_id = fields.Char('国家', help='国家')
    supplier_id = fields.Char('供应商', help='供应商')
    brand_id = fields.Char('品牌', help='品牌')
    date_from = fields.Date(string='开始日期', help='价格表开始日期')

    @api.with_redis_client
    def do_parser_data(self, redis_client=None):
        """
        解析 Excel 文件，将解析后的数据按两级存储到 Redis：
        第一层：使用批次字段（例如 sequence）作为 Redis key，对应一个 Hash；
        第二层：每条记录的 Redis key（即 Excel 表头的 redis_key 字段值）作为 hash 的 field，
                值为该条记录的 JSON 数据。
        - country_id: 匹配国家名获取 ID
        - supplier_id: 匹配供应商名获取 ID
        - brand_id: 匹配品牌名获取 ID
        """
        self.ensure_one()
        if not self.file:
            self._log_import_result(False, "没有上传文件")
            return False

        if not self.redis_key:
            self._log_import_result(False, "没有设置索引字段")
            return False

        try:
            # 1. 读取文件：base64 解码，构造 BytesIO 流
            file_content = base64.b64decode(self.file)
            stream = BytesIO(file_content)
            # 2. 解析 Excel 文件
            wb = load_workbook(stream, data_only=True, read_only=True)
            total_records = 0
            # data_hash = {}
            # # 假设 "redis_key" 字段对应 Excel 中每条记录的唯一标识
            # for sheet in wb.sheetnames:
            #     ws = wb[sheet]
            #     rows = list(ws.iter_rows(values_only=True))
            #     if rows:
            #         headers = rows[0]
            #         try:
            #             redis_key_idx = headers.index(self.redis_key)
            #         except ValueError:
            #             raise UserError(f"Excel 表头中未找到字段 '{self.redis_key}'")
            #         first_row = rows[1] if len(rows) > 1 else None
            #         if first_row:
            #             row_dict = {headers[i]: first_row[i] if i < len(first_row) else None for i in
            #                         range(len(headers))}
            #             first_country_name = row_dict.get('国家')
            #             first_supplier_name = row_dict.get('供应商')
            #             first_brand_name = row_dict.get('品牌')
            #             if first_country_name and first_supplier_name and first_brand_name:
            #                 self.country_id = first_country_name
            #                 self.supplier_id = first_supplier_name
            #                 self.brand_id = first_brand_name
            #         for row in rows[1:]:
            #             record = {headers[i]: row[i] if i < len(row) else None for i in range(len(headers))}

            #             # 转换 record_key 为字符串，避免布尔类型
            #             record_key = record.get(self.redis_key)
            #             if record_key is None:
            #                 continue
            #             record_key = str(record_key)
            #             data_hash[record_key] = json.dumps(record, ensure_ascii=False)
            #             total_records += 1

            # # 假定 self.sequence 是这一批数据的唯一标识，将其转换为字符串
            # redis_main_key = str(self.sequence)
            # if redis_client:
            #     BATCH_SIZE = 500  # 每批次写入，防止内存过高
            #     items = list(data_hash.items())
            #     for i in range(0, len(items), BATCH_SIZE):
            #         batch = dict(items[i:i + BATCH_SIZE])
            #         redis_client.redis_conn.hset(redis_main_key, mapping=batch)
            #     if self.redis_expire:
            #         redis_client.redis_conn.expire(redis_main_key, self.redis_expire)
            # 【优化】不再使用全量 data_hash，改为批量流式处理
            redis_main_key = str(self.sequence)
            batch_size = 1000   # 【优化】批量大小可根据硬件和业务调整
            batch = {}          # 【优化】当前批次，内存中只存一小批

            for sheet in wb.sheetnames:
                ws = wb[sheet]
                rows = ws.iter_rows(values_only=True)
                headers = next(rows)
                try:
                    redis_key_idx = headers.index(self.redis_key)
                except ValueError:
                    raise UserError(f"Excel 表头中未找到字段 '{self.redis_key}'")
                first_row = next(rows, None)
                if first_row:
                    row_dict = {headers[i]: first_row[i] if i < len(first_row) else None for i in
                                range(len(headers))}
                    first_country_name = row_dict.get('国家')
                    first_supplier_name = row_dict.get('供应商')
                    first_brand_name = row_dict.get('品牌')
                    if first_country_name and first_supplier_name and first_brand_name:
                        self.country_id = first_country_name
                        self.supplier_id = first_supplier_name
                        self.brand_id = first_brand_name
                    # 【优化】首行也要处理进Redis
                    record_key = row_dict.get(self.redis_key)
                    if record_key is not None:
                        batch[str(record_key)] = json.dumps(row_dict, ensure_ascii=False)
                        total_records += 1
                        if len(batch) >= batch_size:
                            redis_client.redis_conn.hset(redis_main_key, mapping=batch)
                            batch.clear()

                # 【优化】继续处理剩下的所有行，流式处理
                for row in rows:
                    record = {headers[i]: row[i] if i < len(row) else None for i in range(len(headers))}
                    record_key = record.get(self.redis_key)
                    if record_key is None:
                        continue
                    batch[str(record_key)] = json.dumps(record, ensure_ascii=False)
                    total_records += 1
                    if len(batch) >= batch_size:
                        redis_client.redis_conn.hset(redis_main_key, mapping=batch)
                        batch.clear()
                # 【优化】每个sheet最后不足一批的也要flush
                if batch:
                    redis_client.redis_conn.hset(redis_main_key, mapping=batch)
                    batch.clear()

            if self.redis_expire:
                redis_client.redis_conn.expire(redis_main_key, self.redis_expire)

            self.redis_count = total_records
            self._log_import_result(True, f"成功解析并缓存 {total_records} 条记录")
            return True

        except Exception as e:
            self._log_import_result(False, f"解析Excel文件失败: {str(e)}", include_traceback=True)
            return False

    def cron_excel_import(self):
        """
        定时任务执行导入询价单 - 优化版
        添加锁机制防止并发执行
        """
        # 获取锁ID (可以使用任意唯一整数)
        lock_id = 1234567890  # 为这个任务指定一个唯一的锁ID

        # 尝试获取PostgreSQL咨询锁，如果获取不到则直接返回
        self.env.cr.execute("SELECT pg_try_advisory_lock(%s)", (lock_id,))
        lock_acquired = self.env.cr.fetchone()[0]

        if not lock_acquired:
            _logger.info("定时任务cron_excel_import已在其他进程中运行，跳过本次执行")
            return

        try:
            # 分开处理不同类别，便于后续扩展和维护
            purchase_files = self.env['spreadsheet.excel'].search([
                ('is_exec', '=', False), ('category', '=', 'import_purchase')])
            sale_files = self.env['spreadsheet.excel'].search([
                ('is_exec', '=', False), ('category', '=', 'import_sale')])

            # 批量处理
            if purchase_files:
                for i in purchase_files:
                    i.do_import_purchase()
            if sale_files:
                for i in sale_files:
                    i.do_import_sale()

        finally:
            # 无论执行是否成功，都释放锁
            self.env.cr.execute("SELECT pg_advisory_unlock(%s)", (lock_id,))

    def get_product_list(self):
        """
        从Excel文件中提取产品编码列表 - 优化版
        """
        if not self.file:
            return []

        product_list = []
        file_content = base64.b64decode(self.file)
        stream = BytesIO(file_content)

        # 对于大文件使用read_only模式
        wb = load_workbook(stream, data_only=True, read_only=True)

        for sheet in wb.sheetnames:
            ws = wb[sheet]
            _logger.info(f"处理工作表: {sheet}")

            # 使用iter_rows进行高效迭代
            for row_idx, row in enumerate(ws.iter_rows(min_row=1, values_only=True), 1):
                if row and row[0]:  # 检查A列值
                    code = str(row[0]).strip()
                    if code:
                        product_list.append(code)
                # 【优化】防止爆内存，默认最多取前10万条，可根据实际情况调整
                if len(product_list) >= 100000:
                    _logger.warning(f"产品编码超过10万条，仅返回前10万条")
                    break

        _logger.info(f"从Excel文件中提取了 {len(product_list)} 个产品编码")
        return product_list

    def do_import_purchase(self):
        """
        导入采购数据 - 优化版
        """
        self.ensure_one()  # 确保单记录处理

        try:
            product_list = self.get_product_list()
            if not product_list:
                self._log_import_result(False, "没有获取到有效的产品编码")
                return False

            # 记录处理的文件和产品数量
            _logger.info(f"开始导入采购数据，文件ID: {self.id}，产品数量: {len(product_list)}")

            # 添加事务控制
            with self.env.cr.savepoint():
                result = self.env['purchase.order'].cron_create_purchase(product_list)

            self._log_import_result(bool(result), f"成功导入{len(product_list)}个产品" if result else "导入失败")
            return result

        except ValueError as e:
            self._log_import_result(False, f"数据格式错误: {str(e)}")
        except Exception as e:
            self._log_import_result(False, f"导入异常: {str(e)}", include_traceback=True)

        return False

    def do_import_sale(self):
        """批量处理多个采购导入文件"""
        self.ensure_one()  # 确保单记录处理

        try:
            product_list = self.get_product_list()
            if not product_list:
                self._log_import_result(False, "没有获取到有效的产品编码")
                return False

            # 记录处理的文件和产品数量
            _logger.info(f"开始导入销售数据，文件ID: {self.id}，产品数量: {len(product_list)}")

            # 添加事务控制
            with self.env.cr.savepoint():
                result = self.env['sale.order'].cron_create_sale(product_list)

            self._log_import_result(bool(result),
                                    f"成功导入{len(product_list)}个产品" if result else "导入失败")
            return result

        except ValueError as e:
            self._log_import_result(False, f"数据格式错误: {str(e)}")
        except Exception as e:
            self._log_import_result(False, f"导入异常: {str(e)}", include_traceback=True)

    def do_parser_data_batch(self):
        """批量处理多个销售数据解析任务"""
        results = {'success': 0, 'failed': 0}

        for record in self:
            try:
                result = record.do_parser_data()
                if result:
                    results['success'] += 1
                else:
                    results['failed'] += 1
            except Exception as e:
                _logger.error(f"处理文件ID: {record.id} 时出错: {str(e)}")
                results['failed'] += 1

        _logger.info(f"批量解析完成，成功: {results['success']}，失败: {results['failed']}")
        return results

    def _log_import_result(self, success, message, include_traceback=False):
        """集中处理导入结果记录"""
        if include_traceback:
            message = f"{message}\n{traceback.format_exc()}"

        log_method = _logger.info if success else _logger.error
        log_method(message)

        self.write({
            'is_exec': True,
            'is_success': success,
            'exec_msg': message,
        })
