import datetime
import requests
import re
from urllib import parse
import xmltodict
from flask import render_template
import lxml
from lxml import etree


def weixin(catgory, page):
    """
    抓取搜狗微信的接口
    :return:
    """
    url = 'http://weixin.sogou.com/pcindex/pc/{}/{}.html'.format(catgory, page)
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.72 Safari/537.36"
    }
    html = requests.get(url, headers=headers).content.decode('utf-8')

    dom_tree = etree.HTML(html)

    titles = dom_tree.xpath("//h3/a/text()")
    urls = dom_tree.xpath("//h3/a/@href")
    imgs = dom_tree.xpath("//ul/li/div[@class=\"img-box\"]/a//@src")
    summaries = dom_tree.xpath("//ul/li/div[@class=\"txt-box\"]/p/text()")
    authors = dom_tree.xpath("//div[@class=\"s-p\"]/a/text()")
    create_time = dom_tree.xpath('//div[@class="s-p"]/span/@t')

    # print(create_time)

    new_list = list()
    for title, url, img, summary, author, create_t in zip(titles, urls, imgs, summaries, authors, create_time):
        img = parse.unquote(img)
        img = "https:" + img
        img = re.findall(r"""&url=(.*)""", img)[0]
        url = url.replace("http://mp.weixin.qq.com/s?", "")
        url = url.replace("https://mp.weixin.qq.com/s?", "")

        # 格式化时间
        dateArray = datetime.datetime.utcfromtimestamp(int(create_t))
        styletime = dateArray.strftime("%Y-%m-%d %H:%M:%S")

        new_list.append({
            "title": title,
            "url": url,
            "summary": summary,
            "img": img,
            "author": author,
            "datetime": styletime
        })

    return new_list


def news_detail(url):
    # data_url = "https://mp.weixin.qq.com/s?src=11&timestamp=1536060602&ver=1102&signature=VUth0ZWGauvmGj-qFSBYAmiLbYNe8*88AG23t9K27qCFTb*h8GL625MlZoxMXcbGkPx*2gmYft6lZ2P-akBPAzKzqWqNb1Wm7WTvp3IovzvsMxw27upXgZIrFRHoGaAe&new=1"
    # data_url = "https://mp.weixin.qq.com/s?__biz=MzAxMDU0MDYwMQ==&mid=2653009783&idx=1&sn=520db9716fe0e30f896126abdc364d86&chksm=809bd421b7ec5d37fd7d83a50d6acca53e91ac9bb8875957ce8021f421c4ef45145536a74beb&scene=21#wechat_redirect"
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.72 Safari/537.36"
    }
    html = requests.get(url, headers=headers).content.decode('utf-8', errors='ignore')
    # 显示scr资源
    html = html.replace("data-src", "src")

    # 隐藏公众号信息
    html = html.replace("""<div id="meta_content" class="rich_media_meta_list">""",
                        """<div id="meta_content" class="rich_media_meta_list" style="display:none">""")

    # 这一步是隐藏二维码
    html = html.replace('<div class="qr_code_pc">', '<div class="qr_code_pc" style="display:none">')

    # 使得微信weixinbridge.com 失效
    html = html.replace(r"""if(p&&p.badjs_rate&&(d=p.badjs_rate),w&&Math.random()<d){
u=u.replace(/uin\:(.)*\|biz\:(.)*\|mid\:(.)*\|idx\:(.)*\|sn\:(.)*\|/,"");
var E=new Image,S="https://badjs.weixinbridge.com/badjs?id="+w+"&level=4&from="+encodeURIComponent(location.host)+"&msg="+encodeURIComponent(u);
E.src=S.slice(0,1024);""", "")
    # 这里是把微信文章中 其他文章地址转为本地解析
    # html = html.replace('<a href="http://mp.weixin.qq.com/s?', '<a href="/detail?url=http://mp.weixin.qq.com/s?')
    html = html.replace('<a href="http://mp.weixin.qq.com/s?', '<a href="/detail?data=')
    html = html.replace('<a href="https://mp.weixin.qq.com/s?', '<a href="/detail?data=')

    return html


def search(words='哈哈'):
    """
    # 搜索 TODO
    :param words:
    :return:
    """
    url = 'http://weixin.sogou.com/weixin?type=2&s_from=input&query={}&ie=utf8&_sug_=n&_sug_type_='.format(words)
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.72 Safari/537.36"
    }
    html = requests.get(url).content.decode('utf-8')

    print(html)

    a = re.findall(r"""<ul class="news-list">(.*?)</ul>""", html)

    print(a)

    # dom_tree = etree.HTML(html)
    #
    # ul_html = dom_tree.xpath('//div[@class="news-box"]/ul/li/div[@class="txt-box"]/h3')
    #
    # print(ul_html)

    return "23333"


def category():
    pass


if __name__ == '__main__':
    search()
