import scrapy
import urllib.parse
import pymssql
import json
import re
import base64
from datetime import datetime


class LinkSpringerSpider(scrapy.Spider):
    name = "link-springer"
    base_url = "https://link.springer.com"

    conn = pymssql.connect(
            server='192.168.3.147',
            port=1433,
            user='sa',
            password='cde3CDE#',
            database='dms',
            charset='utf8'
        )
    
    cursor = conn.cursor()

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 1,
        'DOWNLOAD_DELAY': 3,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0',
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'COOKIES_ENABLED': True,
        'LOG_LEVEL': 'INFO',
    }

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:144.0) Gecko/20100101 Firefox/144.0',
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br, zstd',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'Origin': base_url,
        'Referer': base_url,
        'Connection': 'keep-alive',
    }

    def start_requests(self):
        """直接 POST formdata 到 CNKI grid 接口"""
        
        # ✅ 使用参数化查询，避免 { } 格式错误和 SQL 注入风险
        self.cursor.execute("""
            SELECT DISTINCT 
                ckm.keyword,
                ckm.department,
                cwm.id AS website_id
            FROM crawler_keyword_manager ckm
            JOIN crawler_website_manager cwm
                ON cwm.website_url = %s
            WHERE cwm.search_scope = 0 OR ckm.department = cwm.search_scope;
        """, (self.base_url,))  # 👈 这里传入参数
        rows = self.cursor.fetchall()

       
        # 先访问首页（可以保留，也可去掉）
        # yield scrapy.Request(self.base_url, callback=self.parse_list)

          # ✅ 搜索页 URL 模板
        search_url_template = self.base_url + "/search?query={query}&sortBy=relevance"

        # 先访问首页（可以保留，也可去掉）
        yield scrapy.Request(self.base_url, callback=self.parse)

        # ✅ 遍历关键词
        for idx, (keyword, department, website_id) in enumerate(rows, start=1):
            if not keyword:
                continue

            keyword = keyword.strip()

            # URL 编码
            encoded = urllib.parse.quote_plus(keyword)
            full_url = search_url_template.format(query=encoded)

            self.logger.info(f"📘 [{idx}] 生成搜索URL: {full_url}, 部门: {department}")

            # ✅ 传递 meta 信息
            yield scrapy.Request(
                full_url,
                callback=self.parse_list,
                meta={'keyword': keyword,'department': department,'website_id': website_id,'idx': idx,},
                dont_filter=True
            )

    def parse_list(self, response):
        """解析 parse list 返回的结果页"""
        rows = response.css('[data-test="search-result-item"]')
        self.logger.info(f"发现 {len(rows)} 条结果")

        keyword = response.meta.get('keyword')  
        website_id = response.meta.get('website_id') 
        department = response.meta.get('department') 
        count = response.meta.get('total_count') 

        for row in rows:
            title = row.css('.app-card-open__heading *::text').get()
            href = row.css('.app-card-open__link::attr(href)').get()
            if not title or not href:
                continue
            title = title.strip()
            full_url = urllib.parse.urljoin(self.base_url, href)
            self.logger.info(f"📄 {title} -> {full_url}")
            yield response.follow(
                full_url,
                headers=self.headers,
                callback=self.parse_detail,
                meta={'title': title,'keyword': keyword,'department': department,'website_id': website_id,},
                dont_filter=True
            )
       # 提取翻页信息
        next_url = response.css('[data-test="next-page"]::attr(href)').get()
        if next_url:
            self.logger.info(f"📄 发现下一页按钮: {next_url}")
            full_url = urllib.parse.urljoin(self.base_url, next_url)
            yield scrapy.Request(
                full_url,  # 这里要用 full_url，不是 next_url
                callback=self.parse_list,
                meta={
                    'keyword': keyword,
                    'department': department,
                    'website_id': website_id,
                    'total_count': count
                },
                dont_filter=True
            )
        else:
            self.logger.info("📄 未发现下一页按钮，可能是最后一页")

        
    def parse_detail(self, response):
        """解析文章详情页"""
        self.logger.info(f"🎯 解析详情页: {response.url}")
        # self.logger.info(f"🎯 内容: {response.text}")
        # 判断是否存在
        self.cursor.execute("""
            SELECT 1 
            FROM crawler_data_source AS cds 
            WHERE cds.source_url = %s
        """, (response.url,))
        exists = self.cursor.fetchone() is not None

        if exists:
            return

        keyword = response.meta.get('keyword')  
        website_id = response.meta.get('website_id') 
        department = response.meta.get('department') 


        title = response.css('.c-article-title::text')
        authors = response.css('[data-test="author-name"] a::text').getall()
        summary = ''.join(response.css('#Abs1-content *::text').getall()).strip() or ''
        full_text = ''.join(response.css('#Sec1-content *::text').getall()).strip() or ''
        pdf_url = response.css('a.c-pdf-download__link::attr(href)').get()
        pub_date = response.xpath('//dt[contains(text(), "First published")]/following-sibling::dd[1]/text()').get()
        date_obj = datetime.strptime(pub_date, "%d %b %Y")
        formatted_date = date_obj.strftime("%Y-%m-%d") 
        is_paid = 0 if len(full_text) > 0 else 1
        
        yield {
            'keyword': keyword,
            'website': self.base_url,
            'website_id': website_id,
            'department': department,
            'title': title.strip() if title else None,
            'author': [a.strip() for a in authors] if authors else [],
            'attachment_url': response.urljoin(pdf_url) if pdf_url else None,
            'full_text': full_text,
            'publish_date': formatted_date,
            'is_paid': is_paid,
            'summary': summary,
            'source_url': response.url
        }

    def createJournal(self,keyword, resultcount, pageno):
        xml_content = f'''<?xml version="1.0"?>
        <SearchTerm xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
        <Category>journal</Category>
        <SubCategory />
        <ContentType>journal</ContentType>
        <Criterias>
            <NameValue>
            <Name>freetext</Name>
            <Value>{keyword}</Value>
            </NameValue>
            <NameValue>
            <Name>OriginalFreeText</Name>
            <Value>{keyword}</Value>
            </NameValue>
        </Criterias>
        <Facets />
        <RequestTime>0001-01-01T00:00:00</RequestTime>
        <AuthorCriteria />
        <PublicationDate>
            <IsSelectedDate>false</IsSelectedDate>
        </PublicationDate>
        <Excludes />
        </SearchTerm>'''

        # 编码过程
        base64_encoded = base64.b64encode(xml_content.encode('utf-8')).decode('utf-8')
        searchterm = urllib.parse.quote(base64_encoded)

        return f"searchterm={searchterm}&resultcount={resultcount}&category=journal&pageno={pageno}"