import scrapy
import scrapy.http
import re
import json
from wisdom.crawler.items import NewsItem
class EskuaixunSpider(scrapy.Spider):

    name = "eskuaixun"
    allowed_domains = ["kuaixun.eastmoney.com", "np-weblist.eastmoney.com"]


    fast_column_dict = {
        "102": "全球直播",
        "101": "焦点",
        "103": "上市公司",
        "110": "中国",
        "111": "美国",
        "112": "欧元区",
        "113": "英国",
        "114": "日本",
        "115": "加拿大",
        "116": "澳洲",
        "117": "新兴市场",
        "118": "中国央行",
        "119": "美联储",
        "120": "欧洲央行",
        "121": "英国央行",
        "122": "日本央行",
        "123": "加拿大央行",
        "124": "澳洲联储",
        "125": "中国数据",
        "126": "美国数据",
        "127": "欧元区数据",
        "128": "英国数据",
        "129": "日本数据",
        "130": "加拿大数据",
        "131": "澳洲数据",
        "105": "全球股市",
        "106": "商品",
        "107": "外汇",
        "108": "债券",
        "109": "基金"
    }

    def start_requests(self):
        for column_id,_ in self.fast_column_dict.items():
            url = f"https://np-weblist.eastmoney.com/comm/web/getFastNewsList?client=web&biz=web_724&fastColumn={column_id}&sortEnd=&pageSize=100&req_trace=1712850508945&_=1712850508946&callback=jQuery18308332721648860277_1712850508832"
            yield  scrapy.FormRequest(url)
    def parse(self, response: scrapy.http.Response):
        # build next url
        # time.time()*100 = js timestamp in ms
        # b'jQuery18308332721648860277_1712850508832({"code":"1","data":{"fastNewsList":[{"code":"202404123042269669","image":[],"pinglun_Num":1,"realSort":"1712910540069669","share":84,"showTime":"2024-04-12 16:29:00","stockList":[],"summary":"","title":""})
        # https://finance.eastmoney.com/a/202404123042285287.html
        text : bytes = response.body
        fin_attr: str = ""
        fast_col_match = re.search(r'fastColumn=(\d+)', response.url)
        if fast_col_match:
            fast_col = fast_col_match.group(1)
            fast_col_name = self.fast_column_dict.get(fast_col, "")
            if fast_col_name:
                # 金融属性
                fin_attr = f"{fast_col_name}"
        json_data_match = re.search(r'jQuery\d+_\d+\((.*)\)', text.decode("utf-8"))  
        if json_data_match:  
            # 去除前后的括号，获取JSON字符串  
            json_string = json_data_match.group(1)  
            try:  
                # 解析JSON字符串  
                data = json.loads(json_string)  
                # 处理解析后的数据，例如提取新闻列表  
                fast_news_list = data.get('data', {}).get('fastNewsList', [])  
                for news_item in fast_news_list:  
                    # 在这里处理每个新闻项，例如打印标题
                    news = NewsItem()
                    title = news_item.get('title', '')  
                    summary = news_item.get('summary', '')
                    showTime = news_item.get('showTime', '')
                    code = news_item.get('code', '')
                    stockList = news_item.get('stockList', [])
                    if code == "" or title == "" or summary == "" or showTime == "":
                        continue
                    source_url = f"https://finance.eastmoney.com/a/{code}.html"
                    news['title'] = title
                    news['content'] = summary
                    news['source'] = "东方财富网"
                    news['published_date'] = showTime
                    news['url'] = source_url
                    news['code'] = code
                    news['category'] = NewsItem.QUICK
                    news['stock_list'] = stockList
                    if fin_attr:
                        news['fin_tags'] = [fin_attr]
                        if fin_attr == "上市公司":
                            news['listed_company'] = "1"
                        elif fin_attr in ["中国","美国","欧元区","英国","日本","加拿大","澳洲"]:
                            news['country'] = fin_attr
                    yield news
            except json.JSONDecodeError as e:  
                self.log('Error parsing JSON: %s' % e)  
        else:  
            self.log('No JSON data found in response.')