#!/usr/bin/python
# -*-coding:utf-8-*-
# pip install ddddocr 图片识别
"""中国农业科学院_科研进展"""
import math
import time
import re
import requests
import json
from bs4 import BeautifulSoup
from datetime import datetime
from time import sleep
import common
import db
import pymongo
import sys
import random
from dotenv import load_dotenv, find_dotenv
import os
import asyncio
from pyppeteer import launch
import numpy as np
import pandas as pd
import os

mysqldb = db.DbManager()

load_dotenv(verbose=True)
# 今天
today_time = common.TodayTime()
# 将-替换为.
today_time_new = today_time.replace('-', '.')
# 账号
username = os.getenv('LOGIN_ADMIN')
passworld = os.getenv('LOGIN_PWD')
url = os.getenv('URL')

# 获取登录后cookie
async def get_cookie(page):
    cookies_list = await page.cookies()
    cookies = ''
    for cookie in cookies_list:
        str_cookie = '{0}={1};'
        str_cookie = str_cookie.format(cookie.get('name'), cookie.get('value'))
        cookies += str_cookie
    print(cookies)
    return cookies


async def login(org_ids):
    log_url = url
    browser = await launch(headless=True, args=['--disable-infobars', '--no-sandbox'])
    page = await browser.newPage()
    await page.setUserAgent(
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36 LBBROWSER")
    await page.setViewport(viewport={'width': 1536, 'height': 768})
    await page.evaluateOnNewDocument('() =>{ Object.defineProperties(navigator,'
                                     '{ webdriver:{ get: () => false } }) }')
    for i in range(len(org_ids)):
        print(i)
        print(org_ids[i]['link'])
        await page.goto(org_ids[i]['link'])

        # body > article.subPage > div.subPage_con02 > div > div > div.pageArticle_con > div.article.art
        from_str = await page.querySelectorAllEval(
            'body > article.subPage > div.subPage_con02 > div > div > div.pageArticle_con > div.articleAuthor > span:nth-child(1) > strong',
            'nodes=>nodes.map(node=>node.innerText)')
        author_arr=await page.querySelectorAllEval(
            'body > article.subPage > div.subPage_con02 > div > div > div.pageArticle_con > div.articleAuthor > span:nth-child(2)',
            'nodes=>nodes.map(node=>node.innerText)')
        new_author_arr=re.split("：",author_arr[0])
        author=new_author_arr[1]
        info = await page.querySelectorAllEval(
            'body > article.subPage > div.subPage_con02 > div > div > div.pageArticle_con > div.article.art',
            'nodes=>nodes.map(node=>node.innerHTML)')
        info_list={
            'from_str':from_str[0],
            'author':author,
            'info':info[0]
        }
        find_info = mysqldb.table_select_one(table="report_caas", field="id", where={"link": org_ids[i]['link']})
        # 存在数据则跳过
        if find_info:
            info_list['is_run'] = 1
            mysqldb.table_update(table="report_caas", data=info_list,
                                 where={"link":org_ids[i]['link']})

    exit()

    # 关闭
    await page.close()
    await browser.close()


class Egas:
    def __init__(self):
        self.url = url
        self.org_ids = mysqldb.table_select_many(table="report_caas", field="link", where="is_run=0",
                                                 limit=1000)
        self.main()

    def main(self):
        # 登录
        asyncio.get_event_loop().run_until_complete(login(self.org_ids))

Egas()
