import scrapy
import json
import requests
from ..settings import (
    COOKIE_URL,
)

class ProjectSpider(scrapy.Spider):
    name = "project"
    max_retry_times = 3  # 最大重试次数

    headers = {
        'sunway-remember-me': "",
        'Content-Type': 'application/json'
    }
    
    url_templates = {
        'project': "https://lims.fdbatt.com/secure/basemodule/business/ordtasks/custom/searchable",
        'method': "https://lims.fdbatt.com/secure/basemodule/business/ordtasks/{}",
        'sample_get': "https://lims.fdbatt.com/secure/basemodule/business/folders/{}/test-matrix-row-vals/queries",
        'sample_post': "https://lims.fdbatt.com/secure/basemodule/business/folders/{}/orders/queries",
        'test_time': "https://lims.fdbatt.com/secure/basemodule/datacenter/record-tracks/{}/record/queries/raw"
    }

    def __init__(self, pages, all="true", *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.pages = int(pages) if pages else 50
        if all == "true":
            self.pages = self.get_pages()
        self.logger.info("lims爬虫的目标爬取页数是{}， 是否全量爬取{}".format(self.pages, all))

    def get_pages(self):
        self.headers['sunway-remember-me'] = self.get_cookie()
        url = self.url_templates['project']
        body = self._build_project_payload(1)
        try:
            res = requests.post(url=url, json=body, headers=self.headers, verify=False)
            total_pages = int(res.json()['totalPages'])
        except:
            total_pages = 1
        return total_pages

    def get_cookie(self):
        res = requests.get(COOKIE_URL)
        return res.json()['cookie']

    def start_requests(self):
        for page in range(self.pages):
            self.headers['sunway-remember-me'] = self.get_cookie()
            yield scrapy.Request(
                url=self.url_templates['project'],
                method='POST',
                body=json.dumps(self._build_project_payload(page+1)),
                headers=self.headers,
                callback=self.parse_project,
                meta={'retry_times': 0}  # 初始化重试次数
            )

    def _build_project_payload(self, page_num):
        return {
            "p": {
                "f": {"searchFlag": "1", "exportAsync": "1"},
                "n": page_num,
                "s": 50,
                "qf": {}
            }
        }

    def _handle_unauthorized(self, response, callback, meta=None):
        """处理未授权响应，刷新Cookie并重试请求"""
        retry_times = response.meta.get('retry_times', 0) + 1
        
        if retry_times > self.max_retry_times:
            self.logger.error(f"超过最大重试次数 {self.max_retry_times}，放弃请求: {response.url}")
            return None

        self.logger.warning(f"检测到未授权响应，第 {retry_times} 次重试: {response.url}")
        self.headers['sunway-remember-me'] = self.get_cookie()
        
        # 准备重试的元数据
        retry_meta = response.meta.copy()
        retry_meta['retry_times'] = retry_times
        
        # 特殊处理：POST请求需要复制原始请求体
        if response.request.method == 'POST':
            return scrapy.Request(
                url=response.url,
                method='POST',
                body=response.request.body,
                headers=self.headers,
                callback=callback,
                meta=retry_meta
            )
        else:
            return scrapy.Request(
                url=response.url,
                method=response.request.method,
                headers=self.headers,
                callback=callback,
                meta=retry_meta
            )

    def parse_project(self, response):
        """解析项目数据（含授权检查）"""
        if "core.security.login.unauthorized" in response.text:
            retry_req = self._handle_unauthorized(response, self.parse_project)
            if retry_req:
                yield retry_req
                return

        if response.status != 200:
            return

        data = response.json()
        for row in data.get("rows", []):
            project_id = row.get('id')
            folder_id = row.get('folderId')
            folder_no = row.get('ext$', {}).get('folderno')
            row['folder_no'] = folder_no
            if project_id:
                yield scrapy.Request(
                    url=self.url_templates['method'].format(project_id),
                    method='GET',
                    headers=self.headers,
                    callback=self.parse_method,
                    meta={'project_data': row, 'retry_times': 0}
                )
            if folder_id:
                yield scrapy.Request(
                    url=self.url_templates['sample_get'].format(folder_id),
                    method='GET',
                    headers=self.headers,
                    callback=self.parse_sample_get,
                    meta={'folder_id': folder_id, 'folder_no': folder_no, 'retry_times': 0}
                )

    def parse_method(self, response):
        """解析Method数据（含授权检查）"""
        if "core.security.login.unauthorized" in response.text:
            retry_req = self._handle_unauthorized(response, self.parse_method)
            if retry_req:
                yield retry_req
                return

        try:
            method_data = json.loads(response.text)
        except json.JSONDecodeError:
            self.logger.error("Method数据解析失败")
            return
        son_id = response.meta['project_data']['id']
        data = {"p": {"f": {}, "n": 1, "s": 50, "qf": {}}}
        merged_data = response.meta['project_data'].copy()
        merged_data.update({
            'method_details': method_data
        })
        yield scrapy.Request(
            url=self.url_templates['test_time'].format(son_id),
            method='POST',
            headers=self.headers,
            body=json.dumps(data),
            callback=self.parse_time,
            meta=merged_data
        )
        yield merged_data

    def parse_time(self, response):
        """解析时间数据（含授权检查）"""
        if "core.security.login.unauthorized" in response.text:
            retry_req = self._handle_unauthorized(response, self.parse_time)
            if retry_req:
                yield retry_req
                return

        if response.status != 200:
            return
        try:
            time_data = json.loads(response.text)
        except json.JSONDecodeError:
            self.logger.error("测试时间数据解析失败")
            return
        merged_data = response.meta
        merged_data.update({
            'test_time': time_data
        })
        yield merged_data

    def parse_sample_get(self, response):
        """处理Sample的GET请求（含授权检查）"""
        if "core.security.login.unauthorized" in response.text:
            retry_req = self._handle_unauthorized(response, self.parse_sample_get)
            if retry_req:
                yield retry_req
                return

        folder_id = response.meta['folder_id']
        folder_no = response.meta['folder_no']
        try:
            sample_ids = json.loads(response.text)
        except json.JSONDecodeError:
            self.logger.error("Sample GET解析失败")
            return

        yield scrapy.Request(
            url=self.url_templates['sample_post'].format(folder_id),
            method='POST',
            headers=self.headers,
            body=json.dumps({"p": {"f": {}, "n": -1, "s": 50, "qf": {}}}),
            callback=self.parse_sample_post,
            meta={'folder_id': folder_id, 'folder_no': folder_no, 'sample_ids': sample_ids, 'retry_times': 0}
        )

    def parse_sample_post(self, response):
        """处理Sample的POST请求（含授权检查）"""
        if "core.security.login.unauthorized" in response.text:
            retry_req = self._handle_unauthorized(response, self.parse_sample_post)
            if retry_req:
                yield retry_req
                return

        try:
            sample_data = json.loads(response.text)
        except json.JSONDecodeError:
            self.logger.error("Sample POST解析失败")
            return

        yield {
            'folder_id': response.meta['folder_id'],
            'folder_no': response.meta['folder_no'],
            'sample_ids': response.meta['sample_ids'],
            'sample_codes': sample_data.get('rows', [])
        }