import pandas as pd
import scrapy

from taoguba.items import TaogubaExcleItem


class StructpipeSpider(scrapy.Spider):
    name = "StructPipe"
    allowed_domains = ["tgb.cn"]
    custom_settings = {
        'DOWNLOAD_DELAY': 2,
        'CONCURRENT_REQUESTS': 1
    }

    def start_requests(self):
        s = 'gdp_user_id=gioenc-28egdbb4%2C0d71%2C5216%2Caag9%2Cc0adb1g64de6; loginStatus=account; creatorStatus7420135=true; agree=enter; tgbuser=7420135; tgbpwd=2c577d824b19b867b103bf1cb7a5300cdb336eeafab913b5b6bc3867c64f9c48xoordmukl1cydmk; 893eedf422617c96_gdp_gio_id=gioenc-6531024; 893eedf422617c96_gdp_cs1=gioenc-6531024; 893eedf422617c96_gdp_session_id=cf9feb59-35b4-4df1-87cb-10eb133f26c1; JSESSIONID=ZTg1NzRhYzQtZDlkNS00MGMwLWEwOTktZjM2OGQwZGYzM2Vl; acw_tc=0a09669a17404825663063275ec461ff32f954772a139d5da31c8a9ad85dca; 893eedf422617c96_gdp_sequence_ids=%7B%22globalKey%22%3A292%2C%22VISIT%22%3A37%2C%22PAGE%22%3A133%2C%22CUSTOM%22%3A124%7D; 893eedf422617c96_gdp_session_id_cf9feb59-35b4-4df1-87cb-10eb133f26c1=true'
        s = 'gdp_user_id=gioenc-28egdbb4%2C0d71%2C5216%2Caag9%2Cc0adb1g64de6; creatorStatus7420135=true; agree=enter; acw_tc=0a09669a17405637052287640ec45f41390aae00d6a0f7090842fd26279ef8; JSESSIONID=ZWJiZGM3ZDMtMmUzYy00NDU2LWJhYTMtMzA1MWYwN2YwYjZj; loginStatus=phone; tgbuser=11724735; tgbpwd=0d626b17f6bc4bc23cfe89f7659e1c81f40f3228cbe623465b0e530e1e822fa9oj2kqrxjr4x0id3; 893eedf422617c96_gdp_gio_id=gioenc-00635624; 893eedf422617c96_gdp_session_id=a609f37f-507b-4ae6-a3d2-e03286cbfcac; 893eedf422617c96_gdp_cs1=gioenc-00635624; Actionshow2=true; 893eedf422617c96_gdp_sequence_ids=%7B%22globalKey%22%3A333%2C%22VISIT%22%3A41%2C%22PAGE%22%3A148%2C%22CUSTOM%22%3A146%7D; 893eedf422617c96_gdp_session_id_a609f37f-507b-4ae6-a3d2-e03286cbfcac=true'
        cookies = {dt.split('=')[0]: dt.split('=')[-1] for dt in s.split("; ")}
        headers = {
            'x-requested-with': 'XMLHttpRequest',
            'Accept': 'application/json, text/javascript, */*; q=0.01'
        }

        df = pd.read_json('aa.json', orient='records')
        for index, row in df.iterrows():
            quoted_url = row['quoted_url']
            if quoted_url:
                yield scrapy.Request(
                    url=quoted_url,
                    callback=self.parse,
                    cookies=cookies,
                    headers=headers,
                    meta={'id': row['id']}
                )

    def parse(self, response):
        item = TaogubaExcleItem()
        item['id'] = response.meta.get('id')
        item['quoted_text'] = response.json().get('dto')
        yield item

