# -*- coding: utf-8 -*-
import requests
from urllib.parse import urlencode
import time, re
from lxml import etree
from bs4 import BeautifulSoup
import pymysql
import hashlib, oss2
from io import BytesIO
from PIL import Image


def get_parameter(html):
    eroot = etree.HTML(html)
    titles = eroot.xpath('//li/div[@class="txt-box"]/h3/a')
    publishers = eroot.xpath('//div[@class="s-p"]/a/text()')
    detail_urls = eroot.xpath('//div[@class="txt-box"]//h3/a/@data-share')
    pattern = re.compile(".*?timeConvert..(\d+)...</s.*?", re.S)
    pub_times = pattern.findall(html)
    for (title, publisher, detail_url, pub_time) in zip(titles, publishers, detail_urls, pub_times):
        yield {
            "title": title.xpath('string(.)'),
            "publisher": publisher,
            "detail_url": detail_url,
            "pub_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(pub_time)))
        }


def get_index():
    base_url = "https://weixin.sogou.com/weixin?"
    data1 = {
        'query': '今日六合',
        'type': 1,

    }
    for i in range(1, 6):
        data = {
            'query': '今日六合',
            'type': 2,
            'page': i,
            'tsn': 3,
            'wxid': 'oIWsFt6MQ6FlhX38WIIUbgun_Abw',
            'usip': '今日六合',
        }
        queries1 = urlencode(data1)
        Referer = base_url + queries1
        queries = urlencode(data)
        url = base_url + queries
        print('========================================')
        print(url)
        print('========================================')
        # urls = 'https://weixin.sogou.com/link?url=dn9a_-gY295K0Rci_xozVXfdMkSQTLW6EzDJysI4ql5MPrOUp16838dGRMI7NnPqvCdq5i6Lioz2yzWJ7cVwewwvDqyjOWdz3LM9Uv3ELpSi5KCkUnAsz4b0OKDgDPPusIuL0sL3GXhpiLmXzAsM2bUUarHXiS4wYAZcP5xy9_7am7TkwWzB1-F-ho9jpgLtUkaBTF-0UHwvSWpcGBUl424OZCnxixtX&type=1&query=%E4%BB%8A%E6%97%A5%E5%85%AD%E5%90%88'
        # urls = 'http://mp.weixin.qq.com/profile?src=3&timestamp=' + str(int(time.time())) + '&ver=1&signature=5cdSi2H*GeWCZwXK6C5tfkZ9twAGp7dSxXf2twWqhanJKxm3ZCnPOo4-Jxf490diZwXZTROy*-reMr1VwIpWgw=='
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
            'Referer': Referer,
            # 'Cookie': 'tvfe_boss_uuid=6ac55dfa8a3d1ed0; pgv_pvid=6606918817; pgv_pvi=9434549248; RK=7rIN3xbpeq; ptcz=040507c14823de62a7133ee1d75ae5afc98431efe32f4ed68ab016427aa16d34; eas_sid=g1I5g5x3h7F4k1d828h6n4E0I1; uin_cookie=o0834925697; ied_qq=o0834925697; o_cookie=834925697; pac_uid=1_834925697; noticeLoginFlag=1; sig=h01d609d15c92c3b4b38852e367be9d4fe6070880ff8d174454b3dea929bc9c65912284f5658c07caa8',
            # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        }
        response = requests.get(url, headers=headers)
        time.sleep(10)
        response.encoding = 'utf-8'
        # print(response.text)
        html = response.text
        if html:
            parameters = get_parameter(html)
            for parameter in parameters:
                print(parameter["title"])
                print(parameter["publisher"])
                print(parameter["detail_url"])
                print(parameter["pub_time"])
                print('----------------------------------')
                header = {
                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
                response = requests.get(parameter["detail_url"], headers=header)
                time.sleep(3)
                response.encoding = 'utf-8'
                html = response.text
                comment_url = parameter["detail_url"]
                title = parameter["title"]
                pub_time = parameter["pub_time"]
                article = etree.HTML(html)
                content = article.xpath('//div[@id="js_content"]')[0]
                article_content = content.xpath('string(.)')
                print(article_content)
                print('``````````````````````````')
                soup = BeautifulSoup(response.text, 'lxml')
                div = soup.find_all(name='img')
                imgs = []
                for c in div:
                    urls = c.get('data-src')
                    if urls is not None:
                        imgs.append(urls)
                print(imgs)
                crearteweibo(comment_url, title, pub_time, article_content, imgs)


def mysql_server():
    # db = pymysql.connect(host='localhost', port=3306, user='root', passwd='root', db='app_wzjc', charset='utf8mb4')
    # db = pymysql.connect(host='221.225.81.216', port=23355, user='rps_data', passwd='sLNJ&Auqe9pXSs2(',
    #                      db='app_wzjc', charset='utf8mb4')
    db = pymysql.connect(host='rm-bp1i4s7mgs401rj2n684.mysql.rds.aliyuncs.com', port=3306, user='app_wzjc',
                         passwd='9uu_gHFAmYWRis27', db='app_wzjc', charset='utf8mb4')
    return db


def crearteweibo(url, title, data_time, content, imgs):
    try:
        img_list = []
        if imgs is not []:
            for img_url in imgs:
                try:
                    headers = {
                        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
                    }
                    inputs = requests.get(img_url, headers=headers)
                    time.sleep(3)
                    tmpIm = BytesIO(inputs.content)
                    im = Image.open(tmpIm)
                    w = im.size[0]
                    h = im.size[1]
                    if w >= 200 or h >= 200:
                        lost = img_url.split("/mmbiz_")[1].split('/')[0]
                        md = hashlib.md5()  # 构造一个md5
                        pat = str(img_url)
                        md.update(str(pat).encode())
                        img_url_md5 = md.hexdigest()
                        img_key = 'yuqing/10-63/' + str(img_url_md5) + '.' + lost
                        img_urls = 'https://yuwoyg.oss-cn-hangzhou.aliyuncs.com/' + img_key
                        # 阿里云主账号AccessKey拥有所有API的访问权限，风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维，请登录 https://ram.console.aliyun.com 创建RAM账号。
                        auth = oss2.Auth('LTAIhIK9hoKRcMOy', 'Llx7NO8k6VkHgMvMws7SguFSKhwydI')
                        # Endpoint以杭州为例，其它Region请按实际情况填写。
                        bucket = oss2.Bucket(auth, 'http://oss-cn-hangzhou-internal.aliyuncs.com', 'yuwoyg')
                        # # requests.get返回的是一个可迭代对象（Iterable），此时Python SDK会通过Chunked Encoding方式上传。
                        bucket.put_object(img_key, inputs)
                        print('ossossossossossossossoss')
                        img_list.append(img_urls)
                except Exception as e:
                    print('oss:' + str(e))
        if img_list == []:
            img_status = 0
        else:
            img_status = 1
        md = hashlib.md5()  # 构造一个md5
        pat = str(data_time) + str(title) + str(63)
        md.update(str(pat).encode())
        data_md5 = md.hexdigest()
        sql1 = "INSERT INTO recovery_data (title,url,pubtime,content,site_id,group_id,img_status,type,url_md5) " \
               "VALUES ('%s','%s','%s','%s',%s,%s,%s,3,'%s')" \
               % (title, url, data_time, content, 63, 10, img_status, data_md5)
        print('=================================')
        db = mysql_server()
        cursor = db.cursor()
        db.ping(reconnect=True)
        cursor.execute(sql1)
        print(sql1)
        data_id = db.insert_id()
        db.commit()
        if img_list is not []:
            for im in img_list:
                try:
                    sql2 = "INSERT IGNORE INTO recovery_data_img (data_id,img_url,group_id)VALUES (%s,'%s',%s)" % (
                        data_id, im, 10)
                    db.ping(reconnect=True)
                    cursor.execute(sql2)
                    print(sql2)
                    db.commit()
                except Exception as e:
                    print(e)
        db.close()
    except Exception as e:
        print('crearteweibo')
        print(e)


if __name__ == '__main__':
    while True:
        t = time.time()
        get_index()
        print('用时:' + str(time.time() - t))
        time.sleep(3600)
