# -*- coding: UTF-8 -*-
import base64
import copy
import logging
import time, random, json, uuid
import requests
import scrapy
from scraper.ScrapeConfigManager import ScrapeConfigManager
from scraper.items import ComplaintItem, CuZuItem
from util.DatabaseManager import DatabaseManager
from scrapy import Request
from lxml import etree
from copy import deepcopy

class NewsSpider(scrapy.Spider):
    '''
    Because the websites need to crawl is different to each other
    So we define different func to crawl the websites
    Tips :
        - All the return value is String-like
        - Preprocess the String-like sentence in Django-backend
    '''
    name = "news"
    custom_settings = {
        'ITEM_PIPELINES': {
            'scraper.pipelines.ScraperPipeline': 400
        },
    }

    # For some reason renaming the parameter to leads to problems
    # Please note it actually refers to a config name, not a source id

    def __init__(self, sourceId='', *args, **kwargs):
        """Scrape spider for scraping urls without crawling."""
        # delimiter char to separate xpath and regex
        self.dbManager = DatabaseManager(sourceId)

        self.start_urls = self.dbManager.getQueuedUrls()
        self.config = ScrapeConfigManager().check_cfile(sourceId)[2]
        self.sourceId = self.config['source_Id_idmi']['sourceid']
        self.first_set_xpath_prev_bind = self.config['Index_Page_Xpath']['first_set_xpath_prev_bind']
        self.first_set_browse_url = self.config['Index_Page_Xpath']['first_set_browse_url']

        self.source_web_origin = self.config['Demand_info']['source_web_origin']
        self.save_source_image = self.config['Demand_info']['save_source_image']
        self.info_category = self.config['Demand_info']['info_category']

    def start_requests(self):
        '''
        Start Requests -> check the sourceId of each config file to specify which func to execute
        '''
        for url in self.start_urls:
            # Complainant 类型
            if self.sourceId == 'complaint_12365auto':
                __url_unique = copy.deepcopy(url)
                yield Request(
                    url=self.first_set_xpath_prev_bind + __url_unique,
                    callback=self.parse_detailPage_complaint_12365auto,
                    dont_filter=True,
                    meta={
                        '_url_unique':__url_unique
                    }
                )

            elif self.sourceId == 'complaint_315auto':
                __url_unique = copy.deepcopy(url)
                url = self.first_set_xpath_prev_bind.format(__url_unique)
                yield Request(
                    url=url,
                    callback=self.parse_detailPage_complaint_315auto,
                    dont_filter=True,
                    meta={'_url_unique' : __url_unique}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'complaint_315qc':
                __url_unique = copy.deepcopy(url)
                target_url, brand, series = __url_unique.split('@@')
                yield Request(
                    url=self.first_set_xpath_prev_bind + target_url,
                    callback=self.parse_detailPage_complaint_315qc,
                    dont_filter=True,
                    meta={'_url_unique' : __url_unique,
                          'brand_src' : brand,
                          'series_src' : series}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'complaint_aqsiqauto':
                _url_unique = copy.deepcopy(url)
                url = url.replace('dianji_tx(', '').replace(')', '')
                yield Request(
                    url=self.config['Index_Page_Xpath']['first_set_xpath_prev_bind'].format(url),
                    callback=self.parse_detailPage_complaint_aqsiqauto,
                    dont_filter=True,
                    meta={'_url_unique' : _url_unique}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'complaint_autohome':
                _url_unique = copy.deepcopy(url)
                __browse_url = self.first_set_browse_url.format(url)
                yield scrapy.Request(
                    url=self.first_set_xpath_prev_bind.format(_url_unique),
                    callback=self.parse_detailPage_complaint_autohome,
                    dont_filter=True,
                    meta={'_url_unique' : _url_unique,
                          '__browse_url' : __browse_url}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'complaint_qctsw':
                _url_unique = copy.deepcopy(url)
                __browse_url = self.first_set_browse_url.format(url)
                yield Request(
                    url=self.first_set_xpath_prev_bind.format(_url_unique),
                    callback=self.parse_detailPage_complaint_qctsw,
                    dont_filter=True,
                    meta={'_url_unique' : _url_unique,
                          '__browse_url' : __browse_url}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'complaint_qiche365':
                _url_unique = copy.deepcopy(url)
                yield Request(
                    url=self.config['Index_Page_Xpath']['first_set_xpath_prev_bind'] + _url_unique,
                    callback=self.parse_detailPage_complaint_qiche365,
                    dont_filter=True,
                    meta={'_url_unique' : _url_unique}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'complaint_qichemen':
                _url_unique = copy.deepcopy(url)
                yield scrapy.Request(
                    url=url,
                    callback=self.parse_detailPage_complaint_qichemen,
                    dont_filter=True,
                    meta={'_url_unique' : _url_unique}
                )
                time.sleep(random.randint(2, 5))
            elif self.sourceId == 'complaint_tousu16888':
                _url_unique = copy.deepcopy(url)
                yield scrapy.Request(
                    url=self.first_set_xpath_prev_bind + _url_unique,
                    callback=self.parse_detailPage_complaint_tousu16888,
                    dont_filter=True,
                    meta={'_url_unique' : _url_unique}
                )
                time.sleep(random.randint(2, 5))



            # Forum 类型
            elif self.sourceId == 'forum_autohome':
                _url_unique = copy.deepcopy(url)
                _target_url, _fid = url.split('@@')
                __browse_url = self.first_set_browse_url.format(_target_url)
                yield scrapy.Request(
                    url=self.first_set_xpath_prev_bind.format(_target_url),
                    callback=self.parse_detailPage_forum_autohome,
                    dont_filter=True,
                    meta={'fid' : _fid,
                          'browse_url' : __browse_url,
                          '_url_unique' : _url_unique}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'forum_dongchedi':
                _url_unique = copy.deepcopy(url)
                _target_url, _fid = url.split('@@')
                headers = {
                    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1'
                }
                cookies = {
                    'ttwid': '1%7CFPHQlVJlcmHEdPP6wZGStVMX9eMzx00FgErtuTfLYiI%7C1730793391%7Cce73de235225cfaccac60264520942e94717fcaf2ec1a53522738c7a08106037',
                    'tt_webid': '7433700821328217624',
                    'city_name': '%E5%8C%97%E4%BA%AC',
                    'HMACCOUNT': 'AE97396C8F607E00',
                    '_gid': 'GA1.2.1817282010.1733882629',
                    'Hm_lvt_3e79ab9e4da287b5752d8048743b95e6': '1731570635,1732504575,1733733520',
                    'Hm_lpvt_3e79ab9e4da287b5752d8048743b95e6': '1733903961',
                    '_ga_YB3EWSDTGF': 'GS1.1.1733903963.46.0.1733903963.60.0.0',
                    '_ga': 'GA1.2.1900786594.1730793394'
                }
                yield scrapy.Request(
                    url= 'https://m.dongchedi.com' + _target_url,
                    callback=self.parse_detailPage_forum_dongchedi,
                    dont_filter=True,
                    meta={'fid': _fid,
                          '_url_unique': _url_unique},
                    headers=headers,
                    cookies=cookies
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'forum_pcauto':
                _url_unique = copy.deepcopy(url)
                _target_url, _fid = url.split('@@')
                yield scrapy.Request(
                    url=self.first_set_xpath_prev_bind.format(_target_url),
                    callback=self.parse_detailPage_forum_pcauto,
                    dont_filter=True,
                    meta={'fid': _fid,
                          '_url_unique': _url_unique}
                )
                time.sleep(random.randint(2, 5))

            elif self.sourceId == 'forum_xcar':
                _url_unique = copy.deepcopy(url)
                _target_url, _fid = url.split('@@')
                yield scrapy.Request(
                    url=self.first_set_xpath_prev_bind + _target_url,
                    callback=self.parse_detailPage_forum_xcar,
                    dont_filter=True,
                    meta={'fid': _fid,
                          '_url_unique': _url_unique}
                )
                time.sleep(random.randint(2, 5))

            # elif self.sourceId == 'forum_yiche':
            #     _url_unique = copy.deepcopy(url)
            #     _target_url, _brand_src, _series_src = url.split('@@')
            #     yield scrapy.Request(
            #         url=self.first_set_xpath_prev_bind + _target_url,
            #         callback=self.parse_detailPage_forum_yiche,
            #         dont_filter=True,
            #         meta={'brand_src': _brand_src,
            #               'series_src': _series_src,
            #               '_url_unique': _url_unique}
            #     )
            #     time.sleep(random.randint(2, 5))

            # # TODO 20231026 Add CuZu topic, and only focus on ID. Series
            # elif self.sourceId == 'cuzu_autohome':
            #     _url_unique = copy.deepcopy(url)
            #     Koubeiid, _brand_src, _series_src, posttime = url.split('@@')
            #     # __browse_url = self.first_set_browse_url.format(url) only if we get the show_id , and then generate the browse url
            #     yield scrapy.Request(
            #         url=self.first_set_xpath_prev_bind.format(Koubeiid),
            #         callback=self.parse_detailPage_cuzu_autohome,
            #         dont_filter=True,
            #         meta={'brand_src': _brand_src,
            #               'series_src': _series_src,
            #               '_url_unique': _url_unique,
            #               'posttime' : posttime}
            #     )
            #     time.sleep(random.randint(10, 20))
            #
            # elif self.sourceId == 'cuzu_dongchedi':
            #     _url_unique = copy.deepcopy(url)
            #     _indexpageurl, gid_str, _brand_src, _series_src = url.split('@@')
            #     __browse_url = self.first_set_browse_url.format(gid_str)
            #     yield scrapy.Request(
            #         url=_indexpageurl,
            #         callback=self.parse_detailPage_cuzu_dongchedi,
            #         dont_filter=True,
            #         meta={
            #             'brand_src': _brand_src,
            #             'series_src': _series_src,
            #             '__browse_url': __browse_url,
            #             '_url_unique': _url_unique,
            #             'gid_str' : gid_str
            #         }
            #     )
            #     time.sleep(random.randint(5, 10))
            #

            else:
                logging.info('ERROR : {sourceId}---{URL} is not correct'.format(
                    sourceId=self.sourceId,
                    URL=url
                ))

    def _ADD_INFO(self, item) -> dict:
        item['sourceId'] = self.sourceId
        item['source_web_origin'] = self.source_web_origin
        item['save_source_image'] = self.save_source_image
        item['info_category'] = self.info_category
        return item

    def _JOIN_LIST(self, sentence_list) -> str:
        '''
        Combine the sentence list , and then preprocess the sentence
        '''
        if isinstance(sentence_list, list):
            sentence = ''.join(sentence_list)
        else:
            sentence = sentence_list
        sentence = ''.join([_.strip() for _ in sentence])
        sentence = sentence.replace('\t', '') \
            .replace('\r', '').replace('\n', '')
        return sentence

    def parse_detailPage_complaint_12365auto(self, response):
        '''
        Crawl from Mobile like web frontend:
        Detail Page sample:
            - http://m.12365auto.com/zlts/20220711/770820.shtml
        '''
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Complaint from {URL}'.format(
            sourceId=self.sourceId, URL=response_url
        ))
        _url_unique = response.meta['_url_unique']
        item = ComplaintItem()
        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item = self._ADD_INFO(item=item)

        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['brand_src'] = response.xpath(self.config['Detail_Page_Xpath']['brand_src']).extract_first()
        item['series_src'] = response.xpath(self.config['Detail_Page_Xpath']['series_src']).extract_first()
        item['model_src'] = response.xpath(self.config['Detail_Page_Xpath']['model_src']).extract_first()

        __image_urls = response.xpath(self.config['Detail_Page_Xpath']['image_urls']).extract()
        if len(__image_urls):
            item['image_urls'] = json.dumps(__image_urls, ensure_ascii=False)
        else:
            item['image_urls'] = json.dumps([], ensure_ascii=False)
        return item

    def parse_detailPage_complaint_315auto(self, response):
        """
        Crawl from PC frontend page :
            - https://315.auto.china.com.cn/zlts/complain-details.html?id=15234
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Complaint from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        _url_unique = response.meta['_url_unique']
        item = ComplaintItem()
        item = self._ADD_INFO(item=item)
        item['url'] = response_url
        item['_url_unique'] = _url_unique
        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['brand_src'] = response.xpath(self.config['Detail_Page_Xpath']['brand_src']).extract_first()
        item['series_src'] = response.xpath(self.config['Detail_Page_Xpath']['series_src']).extract_first()
        item['model_src'] = response.xpath(self.config['Detail_Page_Xpath']['model_src']).extract_first()

        __image_urls = response.xpath(self.config['Detail_Page_Xpath']['image_urls']).extract()
        if len(__image_urls):
            item['image_urls'] = json.dumps(__image_urls, ensure_ascii=False)
        else:
            item['image_urls'] = json.dumps([], ensure_ascii=False)

        return item

    def parse_detailPage_complaint_315qc(self, response):
        """
        Detail Page :
            - http://www.315qc.com/Mobile/Carcomplaints/view/id/548586
        Corresponding PC page :
            - http://www.315qc.com/Home/Carcomplaints/view/id/548586
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Complaint from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))

        _url_unique = response.meta['_url_unique']
        brand_src = response.meta['brand_src']
        series_src = response.meta['series_src']

        item = ComplaintItem()
        item = self._ADD_INFO(item=item)
        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['brand_src'] = brand_src
        item['series_src'] = series_src
        item['model_src'] = ''

        item['image_urls'] = json.dumps([], ensure_ascii=False)
        return item

    def parse_detailPage_complaint_aqsiqauto(self, response):
        """
        Detail Page :
            - https://www.aqsiqauto.com/complaint/qichetousu_info?id=28057
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Complaint from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        _url_unique = response.meta['_url_unique']
        item = ComplaintItem()
        item = self._ADD_INFO(item=item)

        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item['title'] = ""
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['brand_src'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['brand_src']).extract()
        )
        item['series_src'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['series_src']).extract()
        )
        item['model_src'] = ''

        __image_urls = response.xpath(self.config['Detail_Page_Xpath']['image_urls']).extract()

        item['image_urls'] = json.dumps([_ for _ in __image_urls if _.startswith('http')], ensure_ascii=False)

        return item

    def parse_detailPage_complaint_autohome(self, response):
        """
        Crawl from JSON-like file
            - https://tousu.m.autohome.com.cn/api/complaint/complaint/complaints/85534
        Corresponding PC page :
            - https://tousu.m.autohome.com.cn/complaintdetail?detailId=85534
        TODO : 这里直接通过正则方式提取出_id, 然后将对应的浏览界面补齐
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Complaint from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        _url_unique = response.meta['_url_unique']
        __browse_url = response.meta['__browse_url']

        item = ComplaintItem()
        item = self._ADD_INFO(item=item)
        _res_json = json.loads(response.body_as_unicode())

        item['_url_unique'] = _url_unique
        item['url'] = __browse_url

        if _res_json.get('returncode') == 0:
            item['title'] = _res_json.get('result').get('detail').get('title')
            item['text'] = _res_json.get('result').get('detail').get('content')

            carSeriesTypeName = _res_json.get('result').get('detail').get('carSeriesTypeName')
            if len(carSeriesTypeName):
                item['series_src'] = carSeriesTypeName
            else:
                item['series_src'] = ''
            carBrandName = _res_json.get('result').get('detail').get('carBrandName')
            if len(carBrandName):
                item['brand_src'] = carBrandName
            else:
                item['brand_src'] = ''
            item['model_src'] = ''

            __image_urls = _res_json.get('result').get('detail').get('images')
            if len(__image_urls):
                item['image_urls'] = json.dumps(__image_urls, ensure_ascii=False)
            else:
                item['image_urls'] = json.dumps([], ensure_ascii=False)

        return item

    def parse_detailPage_complaint_qctsw(self, response):
        """
        Crawl from JSON-like file
            - https://www.qctsw.com/api/app/base/complain/simplePage?currentPage=1&pageSize=10&status=more&hasNextPage=true&complainPageParam=NOW
        Corresponding PC url
            - https://www.qctsw.com/complaint/detail/311018
        """
        response_url = response.url
        logging.info("INFO : {sourceId} Adding Complaints from {URL}".format(
            sourceId=self.sourceId,
            URL=response_url,
        ))
        _res_json = json.loads(response.body_as_unicode())
        _url_unique = response.meta['_url_unique']
        __browse_url = response.meta['__browse_url']
        item = ComplaintItem()

        item['sourceId'] = 'complaint_qctsw'
        item['save_source_image'] = self.save_source_image
        item['source_web_origin'] = self.source_web_origin
        item['info_category'] = self.info_category

        item['url'] = __browse_url
        item['_url_unique'] = _url_unique

        if _res_json['code'] == 200:
            item['title'] = self._JOIN_LIST(_res_json['data']['title'])
            _text = _res_json['data']['baseInfo']['content']
            _html_text = etree.HTML(_text)
            item['text'] = self._JOIN_LIST(_html_text.xpath(".//text()"))

            item['brand_src'] = _res_json['data']['brandName']
            item['series_src'] = _res_json['data']['seriesName']
            item['model_src'] = ''
        else:
            logging.info("ERROR : {sourceId} detail page from {URL}".format(
                sourceId=self.sourceId,
                URL=response_url
            ))
            item['title'] = None
            item['text'] = None
            item['model_src'] = None
            item['oem_src'] = None
        item['image_urls'] = json.dumps(
            [], ensure_ascii=False
        )
        return item

    def parse_detailPage_complaint_qiche365(self, response):
        """
        Crawl from PC frontend page
            - https://www.qiche365.org.cn/index/complaints/detail/id/82054.html
        """
        response_url = response.url
        logging.info("INFO : {sourceId} Adding Complaints from {URL}".format(
            sourceId=self.sourceId,
            URL=response_url,
        ))
        _url_unique = response.meta['_url_unique']
        item = ComplaintItem()
        item = self._ADD_INFO(item=item)

        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['brand_src'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['brand_src']).extract()
        )
        item['series_src'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['series_src']).extract()
        )
        item['model_src'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['model_src']).extract()
        )

        item['image_urls'] = json.dumps([], ensure_ascii=False)

        return item

    def parse_detailPage_complaint_qichemen(self, response):
        """
        Crawl from PC frontend page
            - https://www.qichemen.com/ts271212.html
        """
        response_url = response.url
        logging.info("INFO : {sourceId} Adding Complaints from {URL}".format(
            sourceId=self.sourceId,
            URL=response_url,
        ))
        _url_unique = response.meta['_url_unique']
        item = ComplaintItem()
        item = self._ADD_INFO(item=item)

        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        brand_src = ''.join(
            response.xpath(self.config['Detail_Page_Xpath']['brand_src']).extract()
        )
        item['brand_src'] = brand_src

        series_src = ''.join(
            response.xpath(self.config['Detail_Page_Xpath']['series_src']).extract()
        )
        item['series_src'] = series_src

        model_src = ''.join(
            response.xpath(self.config['Detail_Page_Xpath']['model_src']).extract()
        )
        item['model_src'] = model_src

        __image_urls = response.xpath(
            self.config['Detail_Page_Xpath']['image_urls']
        ).extract()
        if len(__image_urls):
            item['image_urls'] = json.dumps(
                [self.config['Detail_Page_Img']['image_url_bind'] + _ for _ in __image_urls], ensure_ascii=False
            )
        else:
            item['image_urls'] = json.dumps(
                [], ensure_ascii=False
            )
        return item

    def parse_detailPage_complaint_tousu16888(self, response):
        """
        Crawl from Mobile frontend page
            - https://m.tousu.16888.com/detail.html?id=112822
        """
        response_url = response.url
        logging.info("INFO : {sourceId} Adding Complaints from {URL}".format(
            sourceId=self.sourceId,
            URL=response_url,
        ))
        _url_unique = response.meta['_url_unique']
        item = ComplaintItem()
        item = self._ADD_INFO(item=item)

        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['brand_src'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['brand_src']).extract()
        )
        item['series_src'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['series_src']).extract()
        )
        item['model_src'] = ''

        __image_urls = response.xpath(
            self.config['Detail_Page_Xpath']['image_urls']
        )
        if len(__image_urls):
            item['image_urls'] = json.dumps(
                __image_urls, ensure_ascii=False
            )
        else:
            item['image_urls'] = json.dumps(
                [], ensure_ascii=False
            )

        return item



    def parse_detailPage_forum_12365auto(self, response):
        """
        Crawl from Mobile frontend page
            - http://m.12365auto.com/threadforum/227295.shtml#f=s
        """
        response_url = response.url
        logging.info("INFO : {sourceId} Adding Complaints from {URL}".format(
            sourceId=self.sourceId,
            URL=response_url,
        ))
        model_en = response.meta['model_en']
        _url_unique = response.meta['_url_unique']
        item = ComplaintItem()
        item['url'] = response_url
        item['_url_unique'] = _url_unique
        item = self._ADD_INFO(item=item)
        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )
        item['model_en'] = model_en
        item['model_src'] = model_en
        item['oem_src'] = None
        item['add_info'] = None
        return item

    def parse_detailPage_forum_autohome(self, response):
        """
        Crawl from JSON-like file
            - https://forum.app.autohome.com.cn/forum_v9.9.7/forum/club/topicinfo?pm=2&v=10.2.5&t=102844358&i=0&uid=0&ry=0
        Corresponding PC frontend page :
            - https://club.autohome.com.cn/bbs/thread/30a957dd0d1f9f79/102844358-1.html
        """
        _url_unique = response.meta['_url_unique']
        fid = response.meta['fid']
        browse_url = response.meta['browse_url']
        response_url = browse_url
        logging.info('INFO : {sourceId} Adding forums from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        item = ComplaintItem()
        item['url'] = response_url
        item['_url_unique'] = _url_unique
        item = self._ADD_INFO(item=item)

        _res_json = json.loads(response.body_as_unicode())
        if _res_json['returncode'] == 0:
            item['title'] = self._JOIN_LIST(_res_json['result']['topicinfo'][0]['title'])
            item['text'] = self._JOIN_LIST(_res_json['result']['topicinfo'][0]['summary'])
        else:
            logging.info('ERROR : {sourceId} Adding forums from {URL}'.format(
                sourceId=self.sourceId,
                URL=response_url
            ))
        item['fid'] = fid
        item['model_src'] = ''

        item['image_urls'] = json.dumps(
            [], ensure_ascii=False
        )
        return item

    def parse_detailPage_forum_dongchedi(self, response):
        """
        Crawl from Mobile frontend page
            - https://m.dongchedi.com/ugc/article/1737974707092484
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding forums from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        fid = response.meta['fid']
        _url_unique = response.meta['_url_unique']

        item = ComplaintItem()
        item['url'] = response_url
        item['_url_unique'] = _url_unique
        item = self._ADD_INFO(item)
        item['title'] = ''
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )
        item['fid'] = fid
        item['model_src'] = ''

        item['image_urls'] = json.dumps(
            [], ensure_ascii=False
        )
        return item


    def parse_detailPage_forum_pcauto(self, response):
        """
        Crawl from Mobile frontend page
            - https://m.pcauto.com.cn/bbs/topic-22124163.html
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Forums from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        fid = response.meta['fid']
        _url_unique = response.meta['_url_unique']

        item = ComplaintItem()

        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item = self._ADD_INFO(item)

        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['fid'] = fid
        item['model_src'] = ''

        item['image_urls'] = json.dumps(
            [], ensure_ascii=False
        )

        return item

    def parse_detailPage_forum_xcar(self, response):
        """
        Crawl from Mobile frontend page
            - https://a.xcar.com.cn/bbs/thread-98270098-0.html?zoneclick=126508
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Forums from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        fid = response.meta['fid']
        _url_unique = response.meta['_url_unique']

        item = ComplaintItem()

        item['_url_unique'] = _url_unique
        item['url'] = response_url

        item = self._ADD_INFO(item)

        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['fid'] = fid
        item['model_src'] = ''

        __image_urls = response.xpath(
            self.config['Detail_Page_Xpath']['image_urls']
        ).extract()
        if len(__image_urls):
            item['image_urls'] = json.dumps(
                __image_urls, ensure_ascii=False
            )
        else:
            item['image_urls'] = json.dumps(
                [], ensure_ascii=False
            )

        return item

    def parse_detailPage_forum_yiche(self, response):
        """
        Crawl from Mobile frontend page
            - https://baa.yiche.com/Arteon/ask-41984887.html
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Forums from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        fid = response.meta['fid']
        _url_unique = response.meta['_url_unique']

        item = ComplaintItem()
        item['_url_unique'] = _url_unique
        item['url'] = response_url
        item = self._ADD_INFO(item)
        item['title'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['title']).extract()
        )
        item['text'] = self._JOIN_LIST(
            response.xpath(self.config['Detail_Page_Xpath']['text']).extract()
        )

        item['fid'] = fid
        item['model_src'] = ''

        item['image_urls'] = json.dumps(
            [], ensure_ascii=False
        )

        return item

    #
    # # TODO 20231026 Add CuZu topics
    # def parse_detailPage_cuzu_autohome(self, response):
    #     """
    #     Crawl from Json-like frontend page
    #         - https://koubeiipv6.app.autohome.com.cn/autov9.13.0/alibi/evaluationdata.ashx?appversion=11.55.0&eid=5184340
    #     And the corresponding PC web frontend page
    #         - https://k.autohome.com.cn/detail/view_01hckhe4f46mrkgd1k6gr00000.html
    #     Unlike the other websites , we should replace the show_id to get the corresponding PC web frontend page
    #     """
    #     response_url = response.url
    #     logging.info('INFO : {sourceId} Adding Forums from {URL}'.format(
    #         sourceId=self.sourceId,
    #         URL=response_url
    #     ))
    #     brand_src = response.meta['brand_src']
    #     series_src = response.meta['series_src']
    #     _url_unique = response.meta['_url_unique']
    #     # posttime = response.meta['posttime']
    #
    #     item = CuZuItem()
    #     item = self._ADD_INFO(item)
    #     item['_url_unique'] = _url_unique
    #     _res_json = json.loads(response.body_as_unicode())
    #
    #     if _res_json['returncode'] == 0:
    #         # __showId = _res_json.get('result').get('showId')
    #         # item['url'] = self.first_set_browse_url.format(show_id=__showId)
    #         item['url'] = _res_json['result'].get('webUrl', '')
    #
    #         sceneList = _res_json['result']['sceneList']
    #         if len(sceneList):
    #             item['content_list'] = json.dumps(_res_json, ensure_ascii=False)
    #         else:
    #             item['content_list'] = json.dumps([], ensure_ascii=False)
    #     else:
    #         item['url'] = ''
    #         item['content_list'] = json.dumps([], ensure_ascii=False)
    #         logging.info('ERROR : {sourceId} Adding forums from {URL}'.format(
    #             sourceId=self.sourceId,
    #             URL=response_url
    #         ))
    #
    #     item['brand_src'] = brand_src
    #     item['series_src'] = series_src
    #     item['model_src'] = ''
    #     item['image_urls'] = json.dumps([], ensure_ascii=False)
    #
    #     return item
    #
    #
    # def parse_detailPage_cuzu_dongchedi(self, response):
    #     """
    #     Crawl from Json-lie web frontend page
    #         - https://m.dongchedi.com/motor/pc/car/series/get_review_list?aid=1230&app_name=automobile_web&series_id=4586&part_id=S0&sort_by=create_time&page=1&count=50
    #     The corresponding PC web frontend page
    #         - https://www.dongchedi.com/koubei/7290747768732127267
    #     """
    #     brand_src = response.meta['brand_src']
    #     series_src = response.meta['series_src']
    #     __browse_url = response.meta['__browse_url']
    #     _url_unique = response.meta['_url_unique']
    #     gid_str = response.meta['gid_str']
    #     response_url = __browse_url
    #
    #     logging.info('INFO : {sourceId} Adding Forums from {URL}'.format(
    #         sourceId=self.sourceId,
    #         URL=response_url
    #     ))
    #
    #     item = CuZuItem()
    #     item = self._ADD_INFO(item)
    #     item['_url_unique'] = _url_unique
    #     item['url'] = __browse_url
    #     _res_json = json.loads(response.body_as_unicode())
    #
    #     __image_urls = []
    #     __content = ''
    #     if _res_json['status'] == 0 and 'data' in _res_json.keys():
    #         review_list = _res_json['data']['review_list']
    #         if review_list:
    #             for review in review_list:
    #                 review_gid_str = review['gid_str']
    #                 if review_gid_str==gid_str:
    #                     __image_urls = review.get('image_urls', [])
    #                     __content = review['content']
    #                     break
    #     item['content_list'] = __content if __content else ''
    #
    #     item['brand_src'] = brand_src
    #     item['series_src'] = series_src
    #     item['model_src'] = ''
    #
    #     item['image_urls'] = json.dumps(
    #             [],
    #             ensure_ascii=False
    #         )
    #
    #     return item




