#该函数用于查询content[]中各条新闻title对应的imagesrc，返回一个imagesrc[]
import pandas as pd
from rouge import Rouge
import datetime
import pymysql
import requests
import time
import pandas
import re
import xlwt
import to_sql #写入数据库函数

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from urllib import request
from urllib import parse
from bs4 import BeautifulSoup


from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
rouge=Rouge()


# 隐式打开一个浏览器
def open_Explor():
    chrome_options = Options()
    chrome_options.add_argument('--headless') # 16年之后，chrome给出的解决办法，抢了PhantomJS饭碗
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--no-sandbox')  # root用户不加这条会无法运行

    driver = webdriver.Chrome(chrome_options=chrome_options, executable_path='/usr/bin/chromedriver')

    return driver

# 主函数
def get_image(key,type = 0,n = 1):#key：搜索关键词，type：图片尺寸类型，n:下载图片数量
#这里加入两个参数用来筛选图片的尺寸
    size = ["",                   ##任意尺寸
        "&width=1920&height=1080",##电脑壁纸
        "&width=2560&height=1440",##手机壁纸
    ]
    # 网页链接中不能出现中文，所以要对关键词进行转码，用到了quote函数。
    key_word = parse.quote(str(key))
    # 拼接起始页面地址
    url = "https://image.baidu.com/search/index?tn=baiduimage&word=" + key_word + size[type]
    # 打开浏览器并访问起始页
    driver = open_Explor()
    driver.get(url)
    soup = BeautifulSoup(driver.page_source, 'html.parser')
    node = soup.find('img', attrs={"class": "main_img img-hover"})
    #hre = driver.find_element(By.XPATH, '//*[@id="imgid"]/div/ul/li[1]/div/div[2]/a/img')
    # print(hre)
    #next_url = hre.get_attribute("data-imgurl")
    driver.quit()
    if(node is None):
        return "https://img2.baidu.com/it/u=2677059737,2685115333&amp;fm=253&amp;fmt=auto&amp;app=120&amp;f=JPEG?w=500&amp;h=500" #返回一个搜索图片
    else:
        return node["data-imgurl"]
    

# 接收标题list，传回图像url list
def process_content_image(list):
    i=0
    list_image =[]
    for content in list:
        print('\n')
        print(i)
        print(content)
        image_url=get_image(key = content)    
        print(image_url)
        print('\n')
        list_image.append(image_url)
        i=i+1
    return list_image
