# -*- coding: UTF-8 -*-
import base64
import copy
import logging
import time, random, json, uuid
import requests
import scrapy
from scraper.ScrapeConfigManager import ScrapeConfigManager
from scraper.items import ComplaintItem, CuZuItem
from util.DatabaseManager import DatabaseManager
from scrapy import Request
from lxml import etree
from copy import deepcopy

class NewsSpider(scrapy.Spider):
    '''
    Because the websites need to crawl is different to each other
    So we define different func to crawl the websites
    Tips :
        - All the return value is String-like
        - Preprocess the String-like sentence in Django-backend
    '''
    name = "news"
    custom_settings = {
        'ITEM_PIPELINES': {
            'scraper.pipelines.ScraperPipeline': 400
        },
    }

    # For some reason renaming the parameter to leads to problems
    # Please note it actually refers to a config name, not a source id

    def __init__(self, sourceId='', *args, **kwargs):
        """Scrape spider for scraping urls without crawling."""
        # delimiter char to separate xpath and regex
        self.dbManager = DatabaseManager(sourceId)

        self.start_urls = self.dbManager.getQueuedUrls()
        self.config = ScrapeConfigManager().check_cfile(sourceId)[2]
        self.sourceId = self.config['source_Id_idmi']['sourceid']
        self.first_set_xpath_prev_bind = self.config['Index_Page_Xpath']['first_set_xpath_prev_bind']
        self.first_set_browse_url = self.config['Index_Page_Xpath']['first_set_browse_url']

        self.source_web_origin = self.config['Demand_info']['source_web_origin']
        self.save_source_image = self.config['Demand_info']['save_source_image']
        self.info_category = self.config['Demand_info']['info_category']

    def start_requests(self):
        '''
        Start Requests -> check the sourceId of each config file to specify which func to execute
        '''
        for url in self.start_urls:
            # TODO 20231026 Add CuZu topic, and only focus on ID. Series
            # TODO 20241203 Split CuZu topic into 1 specific web crawler system
            if self.sourceId == 'dongchedi_public_praise':
                _url_unique = copy.deepcopy(url)
                _url_unique = json.loads(_url_unique)
                create_time = _url_unique['create_time']
                sourceId = _url_unique['sourceId']
                update_time = _url_unique['update_time']
                url_temp = _url_unique['url_temp']
                price = _url_unique['price']
                boughtdate = _url_unique['boughtdate']
                address = _url_unique['address']
                image_url = _url_unique['image_url']
                brand = _url_unique['brand']
                series = _url_unique['series']
                carspec_name = _url_unique['carspec_name']
                title = ''
                customer_voice = _url_unique['customer_voice']
                URL = _url_unique['URL']
                # __browse_url = self.first_set_browse_url.format(url) only if we get the show_id , and then generate the browse url
                yield scrapy.Request(
                    url='https://cn.bing.com/?FORM=Z9FD1',
                    callback=self.parse_detailPage_cuzu_dongchedi,
                    dont_filter=True,
                    meta={
                        'create_time':create_time,
                        'sourceId' : sourceId,
                        'update_time': update_time,
                        'url_temp' : url_temp,
                        'price': price,
                        'boughtdate': boughtdate,
                        'address': address,
                        'image_url': image_url,
                        'brand': brand,
                        'series': series,
                        'carspec_name': carspec_name,
                        'customer_voice': customer_voice,
                        'title': title,
                        'URL': URL
                        }
                )
                time.sleep(random.randint(10, 20))
            elif self.sourceId == 'autohome_public_praise':
                _url_unique = copy.deepcopy(url)
                _url_unique = json.loads(_url_unique)
                create_time = _url_unique['create_time']
                sourceId = _url_unique['sourceId']
                url_temp = _url_unique['url_temp']
                price = _url_unique['price']
                boughtdate = _url_unique['boughtdate']
                address = _url_unique['address']
                brand = _url_unique['brand']
                series = _url_unique['series']
                image_url = _url_unique['image_url']
                carspec_name = _url_unique['carspec_name']

                __request_url = self.first_set_xpath_prev_bind.format(url_temp)
                yield scrapy.Request(
                    url=__request_url,
                    callback=self.parse_detailPage_cuzu_autohome,
                    dont_filter=True,
                    meta={
                        'create_time':create_time,
                        'sourceId' : sourceId,
                        'url_temp' : url_temp,
                        'price': price,
                        'boughtdate': boughtdate,
                        'address': address,
                        'brand': brand,
                        'series': series,
                        'image_url': image_url,
                        'carspec_name': carspec_name
                        }
                )


            else:
                logging.info('ERROR : {sourceId}---{URL} is not correct'.format(
                    sourceId=self.sourceId,
                    URL=url
                ))

    def _ADD_INFO(self, item) -> dict:
        item['sourceId'] = self.sourceId
        item['source_web_origin'] = self.source_web_origin
        item['save_source_image'] = self.save_source_image
        item['info_category'] = self.info_category
        return item

    def _JOIN_LIST(self, sentence_list) -> str:
        '''
        Combine the sentence list , and then preprocess the sentence
        '''
        if isinstance(sentence_list, list):
            sentence = ''.join(sentence_list)
        else:
            sentence = sentence_list
        sentence = ''.join([_.strip() for _ in sentence])
        sentence = sentence.replace('\t', '') \
            .replace('\r', '').replace('\n', '')
        return sentence

    # TODO 20231026 Add CuZu topics
    def parse_detailPage_cuzu_autohome(self, response):
        """
        Crawl from Json-like frontend page
            - https://koubeiipv6.app.autohome.com.cn/autov9.13.0/alibi/evaluationdata.ashx?appversion=11.55.0&eid=5184340
        And the corresponding PC web frontend page
            - https://k.autohome.com.cn/detail/view_01hckhe4f46mrkgd1k6gr00000.html
        Unlike the other websites , we should replace the show_id to get the corresponding PC web frontend page
        """
        response_url = response.url
        logging.info('INFO : {sourceId} Adding Forums from {URL}'.format(
            sourceId=self.sourceId,
            URL=response_url
        ))
        create_time = response.meta['create_time']
        sourceId = response.meta['sourceId']
        url_temp = response.meta['url_temp']
        price = response.meta['price']
        boughtdate = response.meta['boughtdate']
        address = response.meta['address']
        brand = response.meta['brand']
        series = response.meta['series']
        image_url = response.meta['image_url']
        carspec_name = response.meta['carspec_name']

        item = CuZuItem()
        _res_json = json.loads(response.body_as_unicode())

        item['create_time'] = create_time
        item['sourceId'] = sourceId
        item['update_time'] = create_time
        item['url_temp'] = url_temp
        item['price'] = price
        item['boughtdate'] = boughtdate
        item['address'] = address
        item['brand'] = brand
        item['series'] = series
        item['image_url'] = image_url
        item['carspec_name'] = carspec_name

        if _res_json['returncode']==0:
            result = _res_json['result']
            URL = result['webUrl']
            title = result['summary']
            __sceneList = result['sceneList']
            customer_voice_list = []
            for __scene in __sceneList:
                __feeling = scene['feeling']
                customer_voice_list.append(__feeling)
        else:
            URL = ''
            customer_voice_list = []
            title = ''

        item['customer_voice'] = self.clean_sentences(customer_voice_list)
        item['title'] = self.clean_sentences(title)
        item['URL'] = URL

        return item


    def parse_detailPage_cuzu_dongchedi(self, response):
        create_time = response.meta['create_time']
        sourceId = response.meta['sourceId']
        update_time = response.meta['update_time']
        url_temp = response.meta['url_temp']
        price = response.meta['price']
        boughtdate = response.meta['boughtdate']
        address = response.meta['address']
        image_url = response.meta['image_url']
        brand = response.meta['brand']
        series = response.meta['series']
        carspec_name = response.meta['carspec_name']
        customer_voice = response.meta['customer_voice']
        title = ''
        URL = response.meta['URL']

        item = CuZuItem()

        item['create_time'] = create_time
        item['sourceId'] = sourceId
        item['update_time'] = update_time
        item['url_temp'] = url_temp
        item['price'] = price
        item['boughtdate'] = boughtdate
        item['address'] = address
        item['image_url'] = image_url
        item['brand'] = brand
        item['series'] = series
        item['carspec_name'] = carspec_name
        item['customer_voice'] = customer_voice
        item['title'] = title
        item['URL'] = URL

        return item



    def clean_sentences(self, ori_sentence):
        if isinstance(ori_sentence, str):
            ...
        elif isinstance(ori_sentence, list):
            ori_sentence = ' '.join(ori_sentence)
        else:
            return ''
        ori_sentence = ori_sentence. \
            replace('\r', ''). \
            replace('\n', ''). \
            replace('\t', ''). \
            replace('"', ''). \
            replace("'", ""). \
            replace('  ', ""). \
            replace('“', ' '). \
            replace('”', ' ')
        return ori_sentence

