'''
# 20241203
1. 将 懂车帝-懂车分 / 汽车之家-口碑 作为 in-time & automatically 爬虫机制
'''
import logging
import time
import random
import json
import scrapy
from datetime import datetime
from scraper.ScrapeConfigManager import ScrapeConfigManager
from scraper.items import IndexPageItem
from scrapy.spiders import Spider


class IndexPageSpider(Spider):
    name = 'IndexPage'
    custom_settings = {
        'ITEM_PIPELINES': {
            'scraper.pipelines.IndexPagePipeline': 400
        }
    }

    def __init__(self, sourceId='', *args, **kwargs):
        """Spider for scraping urls from index pages. Does not scrape article data, just urls."""
        self.sourceId = sourceId
        self.config = ScrapeConfigManager().get_config(sourceId)
        self.indexPageUrls = self.config['index_pages_urls']['urls']
        self.first_set_xpaths = self.config['Index_Page_Xpath']['first_set_xpaths']
        self.first_set_xpath_prev_bind = self.config['Index_Page_Xpath']['first_set_xpath_prev_bind']
        self.browser_url_bind = self.config['Index_Page_Xpath']['first_set_browse_url']
        self.ctypeInfo_xpath = self.config['Index_Page_Add_Info']['ctypeinfo_xpath']

    def start_requests(self):
        '''
        Because the mechanism in different websites is different
        so we extract the urls in different way
        and in different method we need to specify the indexPage of each website
        '''
        logging.info('INFO : Start scrape the url of frontend page from {sourceId}'.format(
            sourceId=self.sourceId
        ))
        random.shuffle(self.indexPageUrls)
        for indexpageurl in self.indexPageUrls:
            # TODO 20231026 Add CuZu topic, and only focus on ID. Series
            # TODO 20241203 split CuZu topic to specific crawler system , focus on various brand and series
            if self.sourceId == 'autohome_public_praise':
                time.sleep(random.randint(5, 10))
                _indexpageurl, _model_id, brand_src, series_src = indexpageurl.split('@@')
                yield scrapy.Request(
                    url=_indexpageurl,
                    callback=self.parse_indexPage_cuzu_autohome,
                    dont_filter=True,
                    meta={'series' : series_src,
                          'brand' : brand_src,
                          '_indexpageurl' : _indexpageurl,
                          'sourceId' : self.sourceId}
                )
            elif self.sourceId == 'dongchedi_public_praise':
                time.sleep(random.randint(5, 10))
                _indexpageurl, _model_id, brand_src, series_src = indexpageurl.split('@@')
                yield scrapy.Request(
                    url=_indexpageurl,
                    callback=self.parse_indexPage_cuzu_dongchedi,
                    dont_filter=True,
                    meta={'series' : series_src,
                          'brand' : brand_src,
                          '_indexpageurl' : _indexpageurl,
                          'sourceId' : self.sourceId}
                )
            else:
                logging.info('ERROR : sourceId = {} is incorrect'.format(
                    self.sourceId)
                )

    def timeStamp2date(self, timeStamp):
        try:
            t_date = time.localtime(timeStamp)
            t_date = time.strftime("%Y-%m-%d %H:%M:%S", t_date)[:10]
        except:
            t_date = str(datetime.now())[:10]
        return t_date

    def varify_create_time(self, create_time):
        if isinstance(create_time, str):
            try:
                datetime.strptime(create_time, '%Y-%m-%d')
                return create_time
            except:
                return str(datetime.now())[:10]
        else:
            return str(datetime.now())[:10]


    # TODO 20231026 Add CuZu topics
    def parse_indexPage_cuzu_autohome(self, response):
        """
        Crawl from Json-lie web frontend
            - https://koubeiipv6.app.autohome.com.cn/autov9.13.0/alibi/seriesAlibiList.ashx?seriesid=5785&grade=0&pageindex=1&pagesize=50&isstruct=1&order=1&appversion=11.55.0&from=1
        And the corresponding PC frontend , please refer to
            - https://k.autohome.com.cn/6265?order=1
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        meta = {'series': series_src,
                'brand': brand_src,
                '_indexpageurl': _indexpageurl,
                'sourceId': self.sourceId}
        series = response.meta['series']
        brand = response.meta['brand']
        _indexpageurl = response.meta['_indexpageurl']
        sourceId = response.meta['sourceId']

        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        res = json.loads(response.body_as_unicode())

        if res.get('returncode') == 0:
            if 'result' in res.keys():
                _result_list = res['result']['list']
                for _r in _result_list:
                    url_temp = _r.get('Koubeiid', None)
                    create_time = _r.get('posttime', '')
                    create_time = self.varify_create_time(create_time)
                    buyprice = _r.get('buyprice', '')
                    boughtDate = _r.get('boughtDate', '')
                    buyplace = _r.get('buyplace', '')
                    multiImages = _r.get('multiImages', [])
                    if len(multiImages):
                        image_url = multiImages[0]['img']
                    else:
                        image_url = ''
                    specname = _r.get('specname', '')
                    if url_temp:
                        hrefs.append(
                            json.dumps({
                                'create_time': create_time,
                                'sourceId': sourceId,
                                'url_temp': url_temp,
                                'price': buyprice,
                                'boughtdate': boughtDate,
                                'address': buyplace,
                                'brand': brand,
                                'series': series,
                                'image_url': image_url,
                                'carspec_name': specname
                            }, ensure_ascii=False)
                        )
                    else:
                        continue
            else:
                logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                    sourceId=self.sourceId,
                    URL=response.url
                ))
        else:
            logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                sourceId=self.sourceId,
                URL=response.url
            ))
        for href in hrefs:
            item['urls'].append(href)
        return item

    def parse_indexPage_cuzu_dongchedi(self, response):
        """
        Crawl from Json-lie web frontend
            - https://m.dongchedi.com/motor/pc/car/series/get_review_list?aid=1230&app_name=automobile_web&series_id=4586&part_id=S0&sort_by=create_time&page=1&count=50
        And the corresponding PC frontend page
            - https://www.dongchedi.com/auto/series/score/5217-x-S0-x-create_time-x-1
        As for the detailed page , please refer to
            - https://m.dongchedi.com/koubei/7294087009352552486
        """
        logging.info('INFO : {sourceId} Adding URLs from Index page {URL}'.format(
            sourceId=self.sourceId,
            URL=response.url,
        ))
        brand = response.meta['brand']
        series = response.meta['series']
        sourceId = response.meta['sourceId']
        _indexpageurl = response.meta['_indexpageurl']

        item = IndexPageItem()
        item['urls'] = list()
        hrefs = list()
        res = json.loads(response.body_as_unicode())

        if res['status'] == 0:
            review_list = res['data']['review_list']
            for review in review_list:
                gid_str = review.get('gid_str', None)
                URL = self.browser_url_bind.format(gid_str)
                buy_car_info = review.get('buy_car_info', None)
                if buy_car_info:
                    boughtdate = buy_car_info.get('bought_time', '')
                    address = buy_car_info.get('location', '')
                    price = buy_car_info.get('price', '')
                    carspec_name = buy_car_info.get('car_name', '')
                else:
                    boughtdate = ''
                    address = ''
                    price = ''
                    carspec_name = ''
                image_urls = review.get('image_urls', [])
                create_time = review.get('create_time', None)
                create_time = self.timeStamp2date(create_time)
                update_time = create_time

                if len(image_urls):
                    image_url = image_urls[0].get('url', '')
                else:
                    image_url = ''
                content = review.get('content', '')
                if gid_str:
                    detail_page_url = json.dumps({
                        'create_time': create_time,
                        'sourceId': sourceId,
                        'update_time': update_time,
                        'url_temp': gid_str,
                        'price': price,
                        'boughtdate': boughtdate,
                        'address': address,
                        'image_url': image_url,
                        'brand': brand,
                        'series': series,
                        'carspec_name': carspec_name,
                        'title': '',
                        'customer_voice': self.clean_sentences(content),
                        'URL': URL
                    }, ensure_ascii=False)
                    # detail_page_url = _indexpageurl + '@@' + gid_str + '@@' + brand_src + '@@' + series_src
                    hrefs.append(detail_page_url)
                else:
                    continue
        else:
            logging.info('ERROR : {sourceId} response from {URL} is incorrect'.format(
                sourceId=self.sourceId,
                URL=response.url
            ))
        for href in hrefs:
            # detail_page_url = href
            item['urls'].append(href)
        return item


    def clean_sentences(self, ori_sentence):
        if isinstance(ori_sentence, str):
            ...
        elif isinstance(ori_sentence, list):
            ori_sentence = ' '.join(ori_sentence)
        else:
            return ''
        ori_sentence = ori_sentence. \
            replace('\r', ''). \
            replace('\n', ''). \
            replace('\t', ''). \
            replace('"', ''). \
            replace("'", ""). \
            replace('  ', ""). \
            replace('“', ' '). \
            replace('”', ' ')
        return ori_sentence






