'''
1. 因为要设置Browse Url和访问网页，所以，在这个位置只保留一个独立ID即可

'''
import logging
import time
import random
import json
import scrapy
from scraper.ScrapeConfigManager import ScrapeConfigManager
from scraper.items import IndexPageItem
from scrapy.spiders import Spider


class IndexPageSpider(Spider):
    name = 'IndexPage'
    custom_settings = {
        'ITEM_PIPELINES': {
            'scraper.pipelines.IndexPagePipeline': 400
        }
    }

    def __init__(self, sourceId='', *args, **kwargs):
        """Spider for scraping urls from index pages. Does not scrape article data, just urls."""
        self.sourceId = sourceId
        self.config = ScrapeConfigManager().get_config(sourceId)
        self.indexPageUrls = self.config['index_pages_urls']['urls']
        self.first_set_xpaths = self.config['Index_Page_Xpath']['first_set_xpaths']
        self.first_set_xpath_prev_bind = self.config['Index_Page_Xpath']['first_set_xpath_prev_bind']
        self.ctypeInfo_xpath = self.config['Index_Page_Add_Info']['ctypeinfo_xpath']

    def start_requests(self):
        '''
        Because the mechanism in different websites is different
        so we extract the urls in different way
        and in different method we need to specify the indexPage of each website
        '''
        logging.info('INFO : Start scrape the url of frontend page from {sourceId}'.format(
            sourceId=self.sourceId
        ))
        random.shuffle(self.indexPageUrls)
        for indexpageurl in self.indexPageUrls:
            if self.sourceId == 'complaint_12365auto':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_12365auto,
                    dont_filter=True
                )
            elif self.sourceId == 'complaint_315auto':
                time.sleep(random.randint(5, 10))
                cookies = {
                    'acw_tc': '3d30538617337096249436438e6b7ae61d6ecf2bf9c4f4ed1ff90dace3',
                    'cdn_sec_tc': '3d30538617337096249436438e6b7ae61d6ecf2bf9c4f4ed1ff90dace3',
                    'Hm_lvt_79202654929826050bb4ca874005b683': '1731314757,1733709706',
                    'HMACCOUNT': 'AE97396C8F607E00',
                    'Hm_lpvt_79202654929826050bb4ca874005b683': '1733710055'
                }
                headers = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
                }
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_315auto,
                    dont_filter=True,
                    cookies=cookies,
                    headers=headers
                )
            elif self.sourceId == 'complaint_315qc':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_315qc,
                    dont_filter=True
                )
            elif self.sourceId == 'complaint_aqsiqauto':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_aqsiqauto,
                    dont_filter=True
                )
            elif self.sourceId == 'complaint_autohome':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_autohome,
                    dont_filter=True
                )
            elif self.sourceId == 'complaint_qctsw':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_qctsw,
                    dont_filter=True
                )
            elif self.sourceId == 'complaint_qiche365':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_qiche365,
                    dont_filter=True
                )
            elif self.sourceId == 'complaint_qichemen':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    callback=self.parse_indexPage_complaint_qichemen,
                    dont_filter=True
                )
            elif self.sourceId == 'complaint_tousu16888':
                time.sleep(random.randint(5, 10))
                yield scrapy.Request(
                    url=indexpageurl,
                    cookies=self.config['index_pages_urls']['cookie'],
                    callback=self.parse_indexPage_complaint_tousu16888,
                    dont_filter=True
                )

            elif self.sourceId == 'forum_autohome':
                time.sleep(random.randint(5, 10))
                _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
                yield scrapy.Request(
                    url=_indexpageurl,
                    callback=self.parse_indexPage_forum_autohome,
                    dont_filter=True,
                    meta={'series_src' : series_src,
                          'brand_src' : brand_src}
                )

            elif self.sourceId == 'forum_dongchedi':
                time.sleep(random.randint(5, 10))
                _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
                headers = {
                    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1'
                }
                logging.info('============================')
                logging.info(_indexpageurl)
                logging.info('============================')
                yield scrapy.Request(
                    url=_indexpageurl,
                    callback=self.parse_indexPage_forum_dongchedi,
                    dont_filter=True,
                    meta={'series_src' : series_src,
                          'brand_src' : brand_src},
                    headers=headers
                )

            elif self.sourceId == 'forum_pcauto':
                time.sleep(random.randint(5, 10))
                _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
                yield scrapy.Request(
                    url=_indexpageurl,
                    callback=self.parse_indexPage_forum_pcauto,
                    dont_filter=True,
                    meta={'series_src' : series_src,
                          'brand_src' : brand_src}
                )

            elif self.sourceId == 'forum_xcar':
                time.sleep(random.randint(5, 10))
                _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
                yield scrapy.Request(
                    url=_indexpageurl,
                    callback=self.parse_indexPage_forum_xcar,
                    dont_filter=True,
                    meta={'series_src': series_src,
                          'brand_src': brand_src}
                )

            elif self.sourceId == 'forum_yiche':
                time.sleep(random.randint(5, 10))
                _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
                yield scrapy.Request(
                    url=_indexpageurl,
                    callback=self.parse_indexPage_forum_yiche,
                    dont_filter=True,
                    meta={'series_src' : series_src,
                          'brand_src' : brand_src}
                )


            # TODO 20231026 Add CuZu topic, and only focus on ID. Series
            # elif self.sourceId == 'cuzu_autohome':
            #     time.sleep(random.randint(5, 10))
            #     _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
            #     yield scrapy.Request(
            #         url=_indexpageurl,
            #         callback=self.parse_indexPage_cuzu_autohome,
            #         dont_filter=True,
            #         meta={'series_src' : series_src,
            #               'brand_src' : brand_src}
            #     )
            # elif self.sourceId == 'cuzu_dongchedi':
            #     time.sleep(random.randint(5, 10))
            #     _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
            #     yield scrapy.Request(
            #         url=_indexpageurl,
            #         callback=self.parse_indexPage_cuzu_dongchedi,
            #         dont_filter=True,
            #         meta={'series_src' : series_src,
            #               'brand_src' : brand_src,
            #               '_indexpageurl' : _indexpageurl}
            #     )
            # TODO 先隐藏掉这个代码，因为暂时没找到爬取的逻辑
            # elif self.sourceId=='cuzu_yiche':
            #     time.sleep(random.randint(5, 10))
            #     _indexpageurl, series_src, brand_src = indexpageurl.split('@@')
            #     yield scrapy.Request(
            #         url=_indexpageurl,
            #         callback=self.parse_indexPage_cuzu_yiche,
            #         dont_filter=True,
            #         meta={'series_src' : series_src,
            #               'brand_src' : brand_src}
            #     )

            else:
                logging.info('ERROR : sourceId = {} is incorrect'.format(
                    self.sourceId)
                )

    def parse_indexPage_complaint_12365auto(self, response):
        '''
        Crawl from Mobile Phone web:
            - https://m.12365auto.com/Server/forComplain.ashx?act=GetComAppend&i=1&s=200
        The corresponding Web Page:
            - http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-1.shtml
        '''
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url
        ))
        item = IndexPageItem()
        item['urls'] = []
        response_list = json.loads(response.body_as_unicode())
        for res in response_list:
            if 'Url' in res.keys(): # and 'Info' in res.keys(): TODO 20231023 删除网页中原始的投诉标签，只留下文本数据，提供给后续NLP服务解析即可
                detail_page_url = res['Url'] # + '@@' + res['Info']
                item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : IndexPage extraction in complaint_12365auto is incorrect')
        return item

    def parse_indexPage_complaint_315auto(self, response):
        '''
        Crawl from json file content:
            - http://315.auto.china.com.cn/ajax/index.html?page=1&bid=&sid=&mid=
        Corresponding PC web:
            - https://315.auto.china.com.cn/zlts/complaint-list.html
        '''
        logging.info('INFO : {sourceId} Adding Urls from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url
        ))
        item = IndexPageItem()
        item['urls'] = []
        response_dict = json.loads(response.body_as_unicode())
        if 'data' in response_dict.keys():
            if 'list' in response_dict['data']:
                response_list = response_dict['data']['list']
                for res in response_list:
                    detail_page_url = res.get('id')
                    item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : IndexPage extraction in complaint_315auto is incorrect')
        else:
            logging.info('ERROR : IndexPage extraction in complaint_315auto is incorrect')
        return item

    def parse_indexPage_complaint_315qc(self, response):
        '''
        Crawl from mobile web :
            - http://www.315qc.com/Mobile/Carcomplaints/index
            ( The display in mobile-frontend maybe needs some modify )
        Corresponding PC web :
            - http://www.315qc.com/Home/Carcomplaints/index
        '''
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url
        ))
        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        __div_list = response.xpath(".//div[@class='carsjd-list']")
        for __div in __div_list:
            href = __div.xpath("./a/@href").extract_first()
            brand = __div.xpath("./div[@class='carslid_jskd']/span[1]/text()").extract_first().replace('投诉品牌：', '')
            series = __div.xpath("./div[@class='carslid_jskd']/span[2]/text()").extract_first().replace('投诉车型：', '')
            hrefs.append(href + '@@' + brand + '@@' + series)

        # TODO 20231025 because the brand and series only exits in index page
        # for xpath in self.first_set_xpaths:
        #     hrefs.extend(response.xpath(xpath).extract())

        for href in hrefs:
            if isinstance(href, str):
                detail_page_url = href
                item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} extract href error : {href}-{href_type}'.format(
                    sourceId=self.sourceId,
                    href=href,
                    href_type=str(type(href)),
                ))
        return item

    def parse_indexPage_complaint_aqsiqauto(self, response):
        '''
        Crawl from PC web :
            - https://www.aqsiqauto.com/complaint/index?complaint_status=1,3,4,5,7
        '''
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        for xpath in self.first_set_xpaths:
            hrefs.extend(response.xpath(xpath).extract())
        for href in hrefs:
            if isinstance(href, str):
                detail_page_url = href
                item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} extract href error : {href}-{href_type}'.format(
                    sourceId=self.sourceId,
                    href=href,
                    href_type=str(type(href)),
                ))
        return item

    def parse_indexPage_complaint_autohome(self, response):
        '''
        Crawl from JSON-like file
            - https://tousu.m.autohome.com.cn/api/complaint/exposure/index/list?cityCode=&carBrandId=&type=&progress=&pageNo=1&pageSize=20&dispatchType=isDefaultLoadData
        Corresponding PC web :
            - https://tousu.m.autohome.com.cn/index
        Modify in 20220711 :
            - The json-like content already have title and content
        '''
        logging.info('INFO : {sourceId} Adding URLs from Index Page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        item = IndexPageItem()
        item['urls'] = list()
        res = json.loads(response.body_as_unicode())
        if res.get('returncode') == 0:
            _result_list = res['result']['list']
            for _r in _result_list:
                _id = _r.get('id')
                if _id:
                    detail_page_url = str(_id)
                    item['urls'].append(detail_page_url)
                else:
                    logging.info('ERROR : {sourceId} the response from {URL}={_id} is incorrect'.format(
                        sourceId=self.sourceId, URL=response.url, _id=_id,
                    ))
        else:
            logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                sourceId=self.sourceId, URL=response.url
            ))
        return item

    def parse_indexPage_complaint_qctsw(self, response):
        '''
        Crawl from JSON-like file
            - https://www.qctsw.com/api/app/base/complain/simplePage?currentPage=1&pageSize=10&status=more&hasNextPage=true&complainPageParam=NOW
        Corresponding PC web:
            - http://qctsw.com/tousu/tousu.html
        '''
        logging.info('INFO : {sourceId} Adding Urls from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url
        ))
        item = IndexPageItem()
        item['urls'] = []
        res = json.loads(response.body_as_unicode())
        if res.get('code') == 200:
            if 'data' in res.keys():
                _res_list = res['data']['list']
                for _r in _res_list:
                    _id = _r['id']
                    detail_page_url = str(_id)
                    item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                    sourceId=self.sourceId,
                    URL=response.url
                ))
        else:
            logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                sourceId=self.sourceId,
                URL=response.url
            ))
        return item

    def parse_indexPage_complaint_qiche365(self, response):
        '''
        Crawl from PC web frontend
            - https://www.qiche365.org.cn/index/complaints/index.html
        '''
        logging.info('INFO : {sourceId} Adding Urls from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url
        ))
        item = IndexPageItem()
        item['urls'] = []
        hrefs = list()
        for xpath in self.first_set_xpaths:
            hrefs.extend(response.xpath(xpath).extract())
        for href in hrefs:
            if isinstance(href, str):
                detail_page_url = href
                item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                    sourceId=self.sourceId,
                    URL=response.url
                ))
        return item

    def parse_indexPage_complaint_qichemen(self, response):
        '''
        Crawl from PC web:
            - https://www.qichemen.com/complain.html
        '''
        logging.info('INFO : {sourceId} Adding URLs from Index Page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url
        ))
        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        for xpath in self.first_set_xpaths:
            hrefs.extend(response.xpath(xpath).extract())
        for href in hrefs:
            if isinstance(href, str):
                detail_page_url = href
                item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                    sourceId=self.sourceId,
                    URL=response.url
                ))
        return item

    def parse_indexPage_complaint_tousu16888(self, response):
        '''
        Crawl from Mobile web:
            - https://m.tousu.16888.com/
        '''
        logging.info('INFO : {sourceId} Adding URLs from Index Page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url
        ))
        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        for xpath in self.first_set_xpaths:
            hrefs.extend(response.xpath(xpath).extract())
        for href in hrefs:
            if isinstance(href, str):
                detail_page_url = href
                item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                    sourceId=self.sourceId,
                    URL=response.url
                ))
        return item



    def parse_indexPage_forum_autohome(self, response):
        """
        Crawl from JSON-like frontend
            - https://a.athm.cn/club.app.autohome.com.cn/club_v9.6.0/club/bbs/topics?r=0&b=5976&bt=c&channelid=1&gr=1&tc=0&ss=5976&o=0&p=1&s=20&cityid=0&tagid=0&v=11.55.0&did=dea97d9e_a54f_46d2_9c7a_62ed7795561f&n=0&m=1&pm=2&advurl=networkid%3D0%26city%3D%26idfa%3D0%26d1%3Ddea97d9e_a54f_46d2_9c7a_62ed7795561f%26province%3D%26devicemodel%3DMuMu%26a%3D2%26v%3D11.55.0%26pkgname%3Dcom.cubic.autohome%26visitinfo%3Ddea97d9e_a54f_46d2_9c7a_62ed7795561f%7C%7Cd4c6fc2e33acd2ca40039fd5f3d77503%7C%7C10.9.16%7C%7C03%7C%7C6991%26conn%3D1%26wdc%3D%26isretry%3D0%26screen_width%3D1600%26sdkv%3D1.0.0%26level%3D%26pageid%3D89d8fa6a-78ee-4ae1-bec5-6b6b3b0a6eb3%26screen_hight%3D900%26aaid%3D%26screen_orientation%3D0%26mac%3D%26pm%3D2%26devicebrand%3DAndroid%26screen_density%3D1.875%26deviceid%3D%26platform%3D2%26loadid%3D6965d196_8469_487a_a1bf_e9792ca65608_9652246903092%26sou%3D%26islistmodel%3D1%26brand%3D%26gps_city%3D%26ua%3DAndroid%096.0.1%09autohome%0911.55.0%09Android%26ip%3D%26series%3D%26os_version%3D6.0.1%26areaid%3D%26aid%3De58391039dfbeb33%26wdt%3D%26wd%3D&useridept=&auth=&uptopic=1&shownps=1&bbsname=AION+Y%E8%AE%BA%E5%9D%9B
        Corresponding PC web frontend
            - https://club.autohome.com.cn/bbs/thread/30a957dd0d1f9f79/106741449-1.html
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        item = IndexPageItem()
        item['urls'] = list()
        res = json.loads(response.body_as_unicode())

        _res_list = res['result']['list']
        series_src = response.meta['series_src']
        brand_src = response.meta['brand_src']

        for _r in _res_list:
            _bizid = _r.get('carddata').get('bizid')
            if _bizid:
                detail_page_url = str(_bizid) + '@@' + brand_src + '@@' + series_src
                item['urls'].append(detail_page_url)
            else:
                logging.info("ERROR : {sourceId} could not extract detail page url".format(
                    sourceId=self.sourceId
                ))
        return item

    def parse_indexPage_forum_dongchedi(self, response):
        """
        Crawl from Mobile web frontend
            - https://www.dongchedi.com/community/4426/wenda
        Corresponding PC web frontend
            - https://www.dongchedi.com/community/1757/dongtai-release
            - directly replace the www within usage of m
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        brand_src = response.meta['brand_src']
        series_src = response.meta['series_src']

        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        for xpath in self.first_set_xpaths:
            _urls = response.xpath(xpath).extract()
            for _url in _urls:
                if isinstance(_url, str):
                    if '/ugc/article' in _url:
                        hrefs.append(_url)
                else:
                    logging.info("ERROR : {sourceId} could not extract detail page url".format(
                        sourceId=self.sourceId
                    ))
        for href in hrefs:
            detail_page_url = href + '@@' + brand_src + '@@' + series_src
            item['urls'].append(detail_page_url)
        return item

    def parse_indexPage_forum_pcauto(self, response):
        """
        Crawl from JSON-like web frontend
            - https://m.pcauto.com.cn/bbs/3g/loadOrderTopic.ajax?orderBy=postat&fid=32718&pageNo=1&pageSize=
            (Tips : we crawl the data from the json like response, not
            directly crawl the index page urls from web)
        Corresponding PC web frontend
            - https://m.pcauto.com.cn/bbs/topic-23868172.html
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        brand_src = response.meta['brand_src']
        series_src = response.meta['series_src']

        res = json.loads(response.body_as_unicode())
        item = IndexPageItem()
        item['urls'] = list()

        if res.get('status') == 0:
            if 'data' in res.keys():
                _res_data = res['data']
                for _r in _res_data:
                    _tid = _r['tid']
                    detail_page_url = str(_tid) + '@@' + brand_src + '@@' + series_src
                    item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                    sourceId=self.sourceId,
                    URL=response.url
                ))
        else:
            logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                sourceId=self.sourceId,
                URL=response.url
            ))
        return item

    def parse_indexPage_forum_xcar(self, response):
        """
        Crawl from Mobile web frontend
            - https://a.xcar.com.cn/bbs/forum-d-1857.html
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        brand_src = response.meta['brand_src']
        series_src = response.meta['series_src']

        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()

        for xpath in self.first_set_xpaths:
            hrefs.extend(response.xpath(xpath).extract())
        for href in hrefs:
            if isinstance(href, str):
                detail_page_url = href + '@@' + brand_src + '@@' + series_src
                item['urls'].append(detail_page_url)
            else:
                logging.info('ERROR : {sourceId} extract href error {href}-{href_type}'.format(
                    sourceId=self.sourceId,
                    href=href,
                    href_type=str(type(href))
                ))
        return item

    def parse_indexPage_forum_yiche(self, response):
        """
        Crawl from Mobile web frontend
            - https://baa.m.yiche.com/Arteon/index-0-1-2.html
        Corresponding PC web frontend
            - https://baa.yiche.com/Arteon/index-0-1-2.html
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        brand_src = response.meta['brand_src']
        series_src = response.meta['series_src']

        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        for xpath in self.first_set_xpaths:
            _url_list = response.xpath(xpath).extract()
            for _url in _url_list:
                if isinstance(_url, str):
                    hrefs.append(_url)
                else:
                    logging.info('ERROR : {sourceId} extract href error {_url}-{_url_type}'.format(
                        sourceId=self.sourceId,
                        _url=_url,
                        _url_type=str(type(_url))
                    ))
        for href in hrefs:
            detail_page_url = href + '@@' + brand_src + '@@' + series_src
            item['urls'].append(detail_page_url)
        return item


    # TODO 20231026 Add CuZu topics
    def parse_indexPage_cuzu_autohome(self, response):
        """
        Crawl from Json-lie web frontend
            - https://koubeiipv6.app.autohome.com.cn/autov9.13.0/alibi/seriesAlibiList.ashx?seriesid=5785&grade=0&pageindex=1&pagesize=50&isstruct=1&order=1&appversion=11.55.0&from=1
        And the corresponding PC frontend , please refer to
            - https://k.autohome.com.cn/6265?order=1
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        series_src = response.meta['series_src']
        brand_src = response.meta['brand_src']

        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        res = json.loads(response.body_as_unicode())

        if res.get('returncode') == 0:
            if 'result' in res.keys():
                _result_list = res['result']['list']
                for _r in _result_list:
                    Koubeiid = _r.get('Koubeiid', None)
                    posttime = _r.get('posttime', 'None')
                    if Koubeiid:
                        detail_page_url = str(Koubeiid) + '@@' + brand_src + '@@' + series_src + '@@' + str(posttime)
                        item['urls'].append(detail_page_url)
                    else:
                        continue
            else:
                logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                    sourceId=self.sourceId,
                    URL=response.url
                ))
        else:
            logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                sourceId=self.sourceId,
                URL=response.url
            ))
        for href in hrefs:
            # detail_page_url = href
            item['urls'].append(href)
        return item

    def parse_indexPage_cuzu_dongchedi(self, response):
        """
        Crawl from Json-lie web frontend
            - https://m.dongchedi.com/motor/pc/car/series/get_review_list?aid=1230&app_name=automobile_web&series_id=4586&part_id=S0&sort_by=create_time&page=1&count=50
        And the corresponding PC frontend page
            - https://www.dongchedi.com/auto/series/score/5217-x-S0-x-create_time-x-1
        As for the detailed page , please refer to
            - https://m.dongchedi.com/koubei/7294087009352552486
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        series_src = response.meta['series_src']
        brand_src = response.meta['brand_src']
        _indexpageurl = response.meta['_indexpageurl']

        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        res = json.loads(response.body_as_unicode())

        if res['status'] == 0:
            review_list = res['data']['review_list']
            for review in review_list:
                gid_str = review.get('gid_str', None)
                if gid_str:
                    detail_page_url = _indexpageurl + '@@' + gid_str + '@@' + brand_src + '@@' + series_src
                    hrefs.append(detail_page_url)
                else:
                    continue
        else:
            logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                sourceId=self.sourceId,
                URL=response.url
            ))
        for href in hrefs:
            # detail_page_url = href
            item['urls'].append(href)
        return item

    #
    # def parse_indexPage_cuzu_yiche(self, response):
    #     """
    #     Crawl index page from PC web frontend page
    #         - https://dianping.yiche.com/dazhongid4-6835/koubei/tid_1_-11/
    #     We directly crawl complaints from PC web frontend page, because we could directly crawl content per requests 3rd party package
    #     """
    #     logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
    #         sourceId=self.sourceId,
    #         URL=response.url,
    #     ))
    #     series_src = response.meta['series_src']
    #     brand_src = response.meta['brand_src']
    #
    #     item = IndexPageItem()
    #     item['urls'] = list()
    #     hrefs = list()
    #     for xpath in self.first_set_xpaths:
    #         _urls = response.xpath(xpath).extract()
    #         for _url in _urls:
    #             if isinstance(_url, str):
    #                 hrefs.append(_url)
    #             else:
    #                 logging.info("ERROR : {sourceId} could not extract detail page url".format(
    #                     sourceId=self.sourceId
    #                 ))
    #     for href in hrefs:
    #         detail_page_url = href + '@@' + brand_src + '@@' + series_src
    #         item['urls'].append(detail_page_url)
    #     return item





    '''
        def parse_indexPage_complaint_tousu315(self, response):
            logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
                sourceId=self.sourceId,
                URL=response.url,
            ))
            item = IndexPageItem()
            item['urls'] = list()
            for xpath in self.first_set_xpaths:
                item['urls'].extend(response.xpath(xpath).extract())
            return item
    '''






