import scrapy
import urllib.parse
import pymssql
import json


class CnkiGridSpider(scrapy.Spider):
    name = "cnki_grid"
    base_url = "https://kns.cnki.net"
    start_url = "https://kns.cnki.net/kns8s/brief/grid"

    conn = pymssql.connect(
            server='192.168.3.147',
            port=1433,
            user='sa',
            password='cde3CDE#',
            database='dms',
            charset='utf8'
        )
    
    cursor = conn.cursor()

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 1,
        'DOWNLOAD_DELAY': 3,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0',
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'COOKIES_ENABLED': True,
        'LOG_LEVEL': 'INFO',
    }

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0',
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br, zstd',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'Origin': 'https://kns.cnki.net',
        'Referer': 'https://kns.cnki.net',
        'Connection': 'keep-alive',
    }


    def create_formdata(self, keyword: str):
        query_json = {
            "Platform": "NZKPT",
            "Resource": "JOURNAL",
            "Classid": "YSTT4HG0",
            "Products": "",
            "QNode": {
                "QGroup": [
                    {
                        "Key": "Subject",
                        "Title": "",
                        "Logic": 0,
                        "Items": [
                            {
                                "Field": "SU",
                                "Value": keyword,
                                "Operator": "TOPRANK",
                                "Logic": 0,
                                "Title": "主题"
                            }
                        ],
                        "ChildItems": []
                    }
                ]
            },
            "ExScope": 1,
            "SearchType": 2,
            "Rlang": "CHINESE",
            "KuaKuCode": "",
            "Expands": {},
            "SearchFrom": 1
        }

        return {
            "boolSearch": "true",
            "QueryJson": json.dumps(query_json, ensure_ascii=False),
            "pageNum": "1",
            "pageSize": "20",
            "dstyle": "listmode",
            "aside": f"主题：{keyword}",
            "searchFrom": "资源范围：学术期刊",
            "subject": "",
            "language": "CHS",
            "uniplatform": "NZKPT",
            "CurPage": "1"
        }

    def start_requests(self):
        """直接 POST formdata 到 CNKI grid 接口"""
        
        # ✅ 使用参数化查询，避免 { } 格式错误和 SQL 注入风险
        self.cursor.execute("""
            SELECT DISTINCT 
                ckm.keyword,
                ckm.department,
                cwm.id AS website_id
            FROM crawler_keyword_manager ckm
            JOIN crawler_website_manager cwm
                ON cwm.website_url = %s
            WHERE cwm.search_scope = 0 OR ckm.department = cwm.search_scope;
        """, (self.base_url,))  # 👈 这里传入参数
        rows = self.cursor.fetchall()

       
        # 先访问首页（可以保留，也可去掉）
        yield scrapy.Request(self.base_url, callback=self.parse_list)

        # ✅ 遍历关键词
        for idx, (keyword, department, website_id) in enumerate(rows, start=1):
            if not keyword:
                continue

            keyword = keyword.strip()

            # URL 编码
            encoded = urllib.parse.quote_plus(keyword)
            formdata = self.create_formdata(encoded)

            self.logger.info(f"📘 [{idx}] 生成搜索URL: {self.start_url}, 部门: {department}")

            yield scrapy.FormRequest(
                url=self.start_url,
                headers=self.headers,
                formdata=formdata,
                callback=self.parse_list,
                meta={
                    'keyword': keyword,
                    'department': department,
                    'website_id': website_id,
                    'idx': idx,
                },
                dont_filter=True
            )

    def parse_list(self, response):
        """解析 CNKI grid 返回的结果页"""
        rows = response.css('tr')
        self.logger.info(f"发现 {len(rows)} 条结果")

        keyword = response.meta.get('keyword')  
        website_id = response.meta.get('website_id') 
        department = response.meta.get('department') 

        for row in rows:
            title = row.css('td.name a::text').get()
            href = row.css('td.name a::attr(href)').get()
            if not title or not href:
                continue
            title = title.strip()
            full_url = urllib.parse.urljoin(self.base_url, href)
            self.logger.info(f"📄 {title} -> {full_url}")
            yield response.follow(
                full_url,
                headers=self.headers,
                callback=self.parse_detail,
                meta={'title': title,'keyword': keyword,'department': department,'website_id': website_id,},
                dont_filter=True
            )

    def parse_detail(self, response):
        """解析文章详情页"""
        self.logger.info(f"🎯 解析详情页: {response.url}")
        self.logger.info(f"🎯 内容: {response.text}")
        # 判断是否存在
        self.cursor.execute("""
            SELECT 1 
            FROM crawler_data_source AS cds 
            WHERE cds.source_url = %s
        """, (response.url,))
        exists = self.cursor.fetchone() is not None

        if exists:
            return

        keyword = response.meta.get('keyword')  
        website_id = response.meta.get('website_id') 
        department = response.meta.get('department') 

        title = response.meta.get('title', '')
        authors = response.css('h3#authorpart span a::text').getall()
        summary = response.css('span#ChDivSummary::text').get(default='').strip()
        pdf_url = response.css('li.btn-dlpdf a::attr(href)').get()
        pub_date = response.css('div.head-time span::text').re_first(r'(\d{4}-\d{2}-\d{2})')
        self.logger.info(title)


        yield {
            'keyword': keyword,
            'website': self.base_url,
            'website_id': website_id,
            'department': department,
            'title': title.strip() if title else None,
            'author': [a.strip() for a in authors] if authors else [],
            'publication_date': pub_date.strip() if pub_date else None,
            'url': response.url,
            'attachment_url': response.urljoin(pdf_url) if pdf_url else None,
            'full_text': '',
            'publish_date': pub_date,
            'is_paid': 1,
            'summary': summary,
            'source_url': response.url
        }



        # yield {
        #     'title': title,
        #     'authors': ','.join(authors),
        #     'summary': summary,
        #     'pdf_url': pdf_url,
        #     'pub_date': pub_date,
        #     'url': response.url,
        # }
