#! *_* coding:utf-8 *_*

from docx.shared import Inches,Pt
from docx.oxml.ns import qn
from docx import Document
from lxml import etree
from time import sleep
from settings import *
import requests
import json
import re

# A sharing bufer
shops = {}

class my_dianping_crawler_commonmethod():

    def getdata(self,url,headers):
            Base_page_Data = requests.get(url=url,headers = headers,allow_redirects=False)
            html = Base_page_Data.text;return html

    def cleardata(self):
        def woff_fonts(self, wait_for_mach):
            global standered_fonts
            match = []
            for i in wait_for_mach:
                print(i)
                for k, v in standered_fonts.items():
                    if v == i:
                        match.append(k)
            return match

    def _piplins(self):
        pass

# 获取信息并且解析
class my_producer(my_dianping_crawler_commonmethod):
    def cities_parse(self):
        # 获取  更多城市
        base_url = 'https://www.dianping.com/';headers = parameter().Headers_Main()
        while True:
            page1=my_dianping_crawler_commonmethod().getdata(base_url,headers);p_page1=etree.HTML(page1)
            ref1,name1 = p_page1.xpath("//div[@class='city-list J-city-list Hide']/a/@href"),p_page1.xpath("//div[@class='city-list J-city-list Hide']/a/text()")
            sleep(1);
            if ref1:
                break
        # 获取所有城市
        sleep(2);cities = []
        page2 = my_dianping_crawler_commonmethod().getdata(('https://'+ref1[0].split('//')[1]),headers=parameter().Headers_Main());p_page2 = etree.HTML(page2)
        ref2,name2 = p_page2.xpath('//div[@class="findHeight"]//a/@href'),p_page2.xpath('//div[@class="findHeight"]//a/text()')
        try:
            for x in range(3000):
                ref3, name3 = 'https://' + ref2[x].split('//')[1],name2[x]
                cities.append([ref3, name3])
        except IndexError:
            pass
        self._cuisine_parse(cities)
    # 获取所有菜系
    def _cuisine_parse(self,url):
        sleep(2);cuisine = []
        page1 = my_dianping_crawler_commonmethod().getdata(url[0][0],headers=parameter().Headers_Main());e_html = etree.HTML(page1)
        ref, name = e_html.xpath('//div[@class="sec-items"]//a/@href'), e_html.xpath( '//div[@class="sec-items"]//a/text()')
        cuisine.append([ref,name]);print(cuisine)
        self._business_parse(cuisine)
    # 获取所有商家
    def _business_parse(self,url):
        global shops
        while True:
            sleep(1)
            page1 = my_dianping_crawler_commonmethod().getdata(url[0][0][0],headers=parameter().Headers_son());e_html = etree.HTML(page1)
            ref, name = e_html.xpath( '//*[@class="tit"]//a//@href'), e_html.xpath('//*[@class="tit"]//a//text()')
            shops[ref] = name;print(ref,name)
            if page1:
                break

# 解密
class my_consumer():
    parse_data = []
    wait_char = []
    final_data = []
    # 这里直接打开预先保存的json信息包[json包前端跟踪]
    with open(r'E:\note\spider\项目\ECommerceCrawlers\My_Project\My_DianpingCrawler\code2.html', 'r',
              encoding='utf-8') as  f:
        data = f.read()
        dict_data = json.loads(data)
    userId = dict_data["reviewAllDOList"][0]['reviewDataVO']['reviewData']["userId"]
    star = dict_data["reviewAllDOList"][0]['reviewDataVO']['reviewData']["star"]
    review = dict_data["reviewAllDOList"][0]['reviewDataVO']['reviewData']['reviewBody']
    re_delete_review = re.sub('</svgmtsi>', '', re.sub('<svgmtsi class="review">', '', review)).replace('，', '')
    # 此处也可以用repalce直接替换成uni
    for i in re_delete_review:
        if i == '&':
            i = 'u'
            parse_data.append(i)
        elif i == '#':
            i = 'n'
            parse_data.append(i)
        elif i == 'x':
            i = 'i'
            parse_data.append(i)
        else:
            parse_data.append(i)

    for its in parse_data:
        # 将中文与其他字符分开，当为非中文时，将他们加入缓冲区
        if len(bytes(its, encoding='utf-8')) != 3:
            wait_char.append(its)
        else:
            # 当为中文时，先将缓冲区加密字符进行解码并且插入，再插入当前中文字符
            if len(wait_char) != 0:
                match = my_dianping_crawler_commonmethod().cleardata().woff_fonts(''.join(wait_char).split(';'))
                for i in match:
                    final_data.append(i)
                    # 每执行一次就清理一次缓冲区，保证每次从缓冲区传入加密的字符都是当前最新的
                    wait_char.clear()
            # 缓冲区为空或者已插入最新解码字符后，插入当前中文字符
            final_data.append(its)