# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
import bs4
from fake_useragent import UserAgent
import requests
import re



def get_CSS_URL(htmlContent):
    result = re.findall('<link rel="stylesheet" type="text/css" href="//s3plus(.*?)">', htmlContent, re.S)
    cssUrl = 'http://s3plus' + result[0]
    return cssUrl

def get_CSS_Content(CSSUrl, CSS_collection):
    ua = UserAgent(verify_ssl=False)  # 忽略ssl验证
    headers = {
        'User-Agent':ua.random,
        'Accept-Language':'zh-CN,zh;q=0.9'
    }
    result = CSS_collection.find_one({'url':CSSUrl})
    if result:
        return result['content']
    else:
        try:
            CSSContent = requests.get(CSSUrl, headers=headers).text
        except:
            print("============CSS文件内容获取失败=============")
        else:
            css_item = {
                'url':CSSUrl,
                'content':CSSContent
            }
            CSS_collection.insert_one(css_item)
            return CSSContent

def get_SVG_URL(CSSContent):
    pattern = '[class^=".*?"].*?background-image: url[(](.*?)[)];background-repeat'
    SVG_list_url = list()
    try:
        result = re.findall(pattern, CSSContent, re.S)
    except:
        print("============CSS文件内容为空=============")
    else:
        for url in result:
            url = "http:" + url
            SVG_list_url.append(url)

    return SVG_list_url

def get_SVG_Content(SVG_list_url, SVG_collection):
    SVG_dic = dict()
    ua = UserAgent(verify_ssl=False)  # 忽略ssl验证
    headers = {
        'User-Agent':ua.random,
        'Accept-Language':'zh-CN,zh;q=0.9'
    }
    for url in SVG_list_url:
        result = SVG_collection.find_one({'url':url})
        if result:
            SVG_dic[url] = result['content']
        else:
            try:
                SVGContent = requests.get(url, headers=headers).text
            except:
                print("============获取SVG内容出错=============")
            else:
                svg_item = {
                    'url':url,
                    'content':SVGContent
                }
                SVG_collection.insert_one(svg_item)
                SVG_dic[url] = SVGContent
    return SVG_dic


# 获取坐标信息
def get_Word_SVG_URL(prefix, CSSContent, SVG_dic):
    pattern = '\[class.*?="' + prefix + '.*?"\].*?background-image: url[(](.*?)[)];background-repeat'
    result = re.findall(pattern, CSSContent, re.S)
    if result:
        SVGUrl = 'http:' + result[0]
        return SVGUrl
    else:
        print("============未获取到坐标信息=============")



def get_Word_Point(className, CSSContent):
    point = re.findall(className + '{background:-(.*?).0px.*?-(.*?).0px', CSSContent, re.S)
    x = int(int(point[0][0])/14)
    y = int(point[0][1]) + 23
    return x, y

# 提取SVG字体 x、y格式
def get_Word_Content(SVGUrl, x, y, SVG_dic):
    SVGContent = SVG_dic[SVGUrl]
    result = re.findall('<text x="0" y="' + str(y) + '">(.*?)</text>', SVGContent, re.S)
    if result:
        return result[0][x]
    else:
        return get_Word_Content_B(SVGUrl, x, y, SVG_dic)

# 提取SVG字体 <textPath xlink:href="#12" textLength="364">
def get_Word_Content_B(SVGUrl, x, y, SVG_dic):
    SVGContent = SVG_dic[SVGUrl]
    soup = BeautifulSoup(SVGContent, 'lxml')
    paths = soup.find_all(name='path')
    key = dict()
    value = dict()
    for path in paths:
        index = re.findall('M0 (.*?) H600', str(path['d']))
        key[int(index[0])] = int(path['id'])

    datas = re.findall('<textPath xlink:href=".*?" textLength=".*?">(.*?)</textPath>', SVGContent, re.S)
    for data in datas:
        value[datas.index(data) + 1] = data

    return value[key[y]][x]

# 获取用户名称
def get_User_Name(comment_div):
    user_name_div = comment_div.find_all(name='div', attrs={'class':'dper-info'})[0]
    user_name = user_name_div.a.string.strip()
    return user_name

# 获取评分
def get_score(comment_div):
    score_tag = []
    score_all = comment_div.find(name='span', attrs={'class':'score'}).find_all('span')
    for s in score_all:
        score_tag.append(s.text.replace("\n", "").strip())
    return score_tag

# 喜欢的食物
def get_Food_List(comment_div):
    food = list()
    try:
        like_food_div = comment_div.find_all(name='div', attrs={'class':'review-recommend'})[0]
        food_a_list = like_food_div.find_all(name='a')
    except :
        pass
    else:
        for a in food_a_list:
            food.append(a.string)
    return food

# 获取发布时间
def get_Comment_Time(comment_div):
    time_span = comment_div.find_all(name='span', attrs={'class':'time'})[0]
    comment_time = time_span.string.strip()
    return comment_time

# 获取点赞、回复数量
def get_Zan_Reponse_Num(comment_div):
    num_em = comment_div.find_all(name='em', attrs={'class':'col-exp'})
    if num_em:
        if len(num_em) == 1:
            zan_num = num_em[0].string[1]
            response_num = 0
        else:
            zan_num = num_em[0].string[1]
            response_num = num_em[1].string[1]
    else:
        zan_num = 0
        response_num = 0
    return zan_num, response_num

# 解析评论内容
def get_Comment_Content(comment_div,  CSSContent, SVG_dic):
    try:
        comment = comment_div.find_all(name='div', attrs={'class':'review-words Hide'})[0]
    except:
        comment = comment_div.find_all(name='div', attrs={'class':'review-words'})[0]

    comment_content = list()
    for tag in comment:
        if isinstance(tag, bs4.element.Tag):
            if tag.name == 'svgmtsi':
                className = str(tag['class'][0])
                prefix = str(tag['class'][0])[0:2]
                # 获取对应SVG文件
                SVGUrl = get_Word_SVG_URL(prefix, CSSContent, SVG_dic)
                # 获取坐标
                x, y = get_Word_Point(className, CSSContent)
                # 解析字
                word = get_Word_Content(SVGUrl, x, y, SVG_dic)
                comment_content.append(word)
        elif isinstance(tag, bs4.element.NavigableString):
            comment_content.append(str(tag))
    return "".join(comment_content).strip()
