# -*- coding: utf-8 -*-
import datetime
import random
import re
import requests
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import json
# from public.conifig import APPID, CROPID, ENTERPRISEID, REQUEST_HOST
from public.conifig import dby_url, account, accountKey, num, city, connect
from public.Logs.log import log1
from public.utils import save_data


class SouGouWeiXinSpider:
    def __init__(self, cursor):
        self.url = "https://weixin.sogou.com/"
        self.cursor = cursor

    def get_proxy(self, wait):
        params = {
            "account": account,
            "accountKey": accountKey,
            "num": num,
            "cityId": city
        }
        try:
            time.sleep(wait)
            resp_js = requests.get(dby_url, params=params)
            resp = json.loads(resp_js.text)
            if resp["code"] == "200":
                pro_ip = resp["data"][0]["ip"]
                pro_port = resp["data"][0]["port"]
                ip = '{}:{}'.format(pro_ip, pro_port)
                proxies = {
                    "http": "http://" + ip,
                }
                # requests.packages.urllib3.disable_warnings()
                resp = requests.get(self.url, proxies=proxies, verify=False)
                s_code = resp.status_code
                if s_code == 200:
                    return ip
                else:
                    return
        except Exception as e:
            log1.error("获取代理IP异常: {}".format(e))
            return

    def user_agent(self):
        first_num = random.randint(55, 62)
        third_num = random.randint(0, 3200)
        fourth_num = random.randint(0, 140)
        os_type = [
            '(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)', '(X11; Linux x86_64)',
            '(Macintosh; Intel Mac OS X 10_12_6)'
        ]
        chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)
        ua = ' '.join(['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36',
                       '(KHTML, like Gecko)', chrome_version, 'Safari/537.36'])
        return ua

    def options(self, ip):
        chromeOptions = webdriver.ChromeOptions()
        # chromeOptions.add_argument(('--proxy-server=http://' + ip))
        chromeOptions.add_argument('user-agent=' + self.user_agent())
        chromeOptions.add_argument('--no-sandbox')
        chromeOptions.add_argument('--disable-gpu')
        chromeOptions.add_experimental_option('excludeSwitches', ['enable-automation'])
        chromeOptions.add_argument('blink-settings=imagesEnabled=false')
        chromeOptions.add_argument("--headless")
        return chromeOptions

    def request_url(self, chromeOptions):
        driver = webdriver.Chrome(options=chromeOptions)
        driver.get(self.url)
        return driver

    def web_driver(self, driver, name):
        url = ""
        try:
            query_input = WebDriverWait(driver, 5).until(
                lambda driver: driver.find_element_by_xpath('//*[@id="query"]'))
            query_input.clear()
            query_input.send_keys(name)
            driver.find_element_by_xpath('//div[@class="header-box"]//input[@class="swz2"]').click()
            time.sleep(1)
            try:
                WebDriverWait(driver, 5).until(
                    EC.element_to_be_clickable((By.XPATH, '//*[@id="sogou_vr_11002301_box_0"]//a[@uigs="account_article_0"]'))).click()
                time.sleep(3)
                driver.switch_to.window(driver.window_handles[1])
                url = driver.current_url
            except:
                log1.info("{}无最新文章".format(name))
        except Exception as e:
            log1.error("{}爬取异常: {}".format(name, e))
        return url

    def start_requests(self, row, wait):
        # ip = self.get_proxy(wait)
        # if not ip:
        #     log1.info("获取代理IP失败/IP失效")
        #     return
        for net in row:
            chromeOptions = self.options(0)
            driver = self.request_url(chromeOptions)
            domain = net[0]
            category_id = net[1]
            classify = int("9" + str(net[1]))
            source_name_id = classify
            articleUrl = self.web_driver(driver, domain)
            if articleUrl == "":
                continue
            self.parse_data(driver, articleUrl, category_id, source_name_id)
            # headers = {
            #     'Content-Type': 'application/json',
            #     'appId': APPID,
            #     'enterpriseId': str(ENTERPRISEID),
            #     'saasCorpId': CROPID
            # }
            # body = {
            #     "articleUrl": articleUrl,
            #     "enterpriseId": ENTERPRISEID,
            #     "classify": classify
            # }
            # request_body = json.dumps(body)
            # url = REQUEST_HOST + "qywx/cpInformationShare/managerArticleAnalysis"
            # print(url)
            # try:
            #     # requests.packages.urllib3.disable_warnings()
            #     resp_js = requests.post(url, headers=headers, data=request_body, verify=False).text
            #     resp = json.loads(resp_js)
            #     log1.info("{}: {}".format(domain, resp["msg"]))
            # except Exception as e:
            #     log1.error("{}({})文章解析异常: {}".format(domain, articleUrl, e))

    def parse_data(self, driver, articleUrl, category_id, source_name_id):
        pages = driver.page_source
        html = re.search('<div class="rich_media_content " id="js_content" style="visibility: visible;">(.*?)</div>(\s+)<script nonce="" type="text/javascript">', pages, flags=re.DOTALL)
        title = re.search('<meta property="og:title" content="(.*)">', pages)  # 标题
        url = re.search('<meta property="og:url" content="(.*)">', pages)  # url
        image = re.search('<meta property="og:image" content="(.*)">', pages)  # image
        description = re.search('<meta property="og:description" content="(.*)">', pages)
        site_name = re.search('<meta property="og:site_name" content="(.*)">', pages)
        type = re.search('<meta property="og:type" content="(.*)">', pages)
        author = re.search('<meta property="og:article:author" content="(.*)">', pages)
        articleTitle = title.group(1) if title else ""
        # articleUrl = url.group(1) if url else ""
        articleImage = image.group(1) if image else ""
        articleDescription = description.group(1) if description else ""
        siteName = site_name.group(1) if site_name else ""
        # articleType = type.group(1) if type else ""
        articleAuthor = author.group(1) if author else ""
        content = html.group(1).strip().replace("'", '"') if html else ""
        sub_title = articleDescription
        author = articleAuthor
        source_name = siteName
        source_id = ""
        article_date = "0000-00-00 00:00"
        datas = (articleTitle, source_name, source_id, articleUrl, article_date, content, source_name_id, category_id, sub_title, author, articleImage)
        driver.quit()
        save_data(self.cursor, datas)
        print(articleTitle)


def wxArtiveMain():
    start = time.time()
    log1.info("\n ------------------------------微信公众号开始爬取--------------------------------")
    sql = """
            select name, article_type from tax_article_from where web_type = 0
        """
    cursor = connect.cursor()
    cursor.execute(sql)
    names = cursor.fetchall()
    sg = SouGouWeiXinSpider(cursor)
    sg.start_requests(names, 1)
    cursor.close()
    end = time.time()
    log1.info("微信公众号抓取完毕, 共耗时:{}s".format(end - start))


if __name__ == '__main__':
    wxArtiveMain()