from bs4 import BeautifulSoup

import requests
import csv
import bs4
import time
import mysql.connector

from selenium import webdriver
from selenium.webdriver.chrome.options import Options

# 抓取惠农网价格数据
# https://www.cnhnb.com/hangqing/cdlist-2003656-0-0-0-0-1/
chrome_path ='/usr/bin/chromedriver'
# chrome_path = 'C:\Program Files\chrome\chromedriver.exe'
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# 创建链接数据库
config_168 = {'host': '127.0.0.1',  # 默认127.0.0.1
          'user': 'root', 'password': 'Tnq39/*riqJcC', 'port': 3306,  # 默认即为3306
          'database': 'ccii_redesign', 'charset': 'utf8','auth_plugin':'mysql_native_password'  # 默认即为utf8
          }


config1 = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_ccii', 'password': 'cI1546_wodesecerts', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8', 'auth_plugin':'mysql_native_password'  # 默认即为utf8
          }


config = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_python', 'password': '939_58J6kAW)P&^', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8'  # 默认即为utf8
          }



# 查询数据库当天已存在数据列表
def find_name_List(price_date):
    cnn = ''
    try:
        cnn = mysql.connector.connect(**config)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("连接数据库sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        stmt = 'select market_name from shop_price_daily_info where type=4004 and date_format(price_date, "%Y-%m-%d") = %s '
        cursor.execute(stmt, [price_date])
        data = cursor.fetchall()
        # cnn.commit()
        # print("查询数据成功！")

    except mysql.connector.Error as e:
        print('查询数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接

    return data

# 爬取资源
def get_contents(rurl):
    rs_data = list()
    try:
        soup = BeautifulSoup(rurl, 'lxml')
        trs = soup.find_all('li', class_="market-list-item")
        for item in trs:
            data = list()
            date = item.find_all("span", class_="time")[0].string
            productName = item.find_all("span", class_="product")[0].string
            price = item.find_all("span", class_="price")[0].string
            price = price.replace("元/斤", "")
            # 价格换算为元/KG
            price = float(price)*2
            place = item.find_all("span", class_="place")[0].string
            data.append(str(date))
            data.append(str(productName))
            data.append(str(price))
            data.append(str(place))
            print(data)
            rs_data.append(data)

    except Exception as e:
        print(f'解析数据异常 {e}')

    return rs_data


# 用浏览器打开网页
def open_chrome(url):
    browser = webdriver.Chrome(executable_path=chrome_path, options=chrome_options)
    browser.get(url)  # 执行了打开网页操作
    # print(browser.page_source)# 输出了网页源代码
    # browser.close()  # 关闭了浏览器
    return browser.page_source


def main(req_url):
    print("开始爬取数据" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    data = list()
    print(req_url)
    rs = open_chrome(req_url)
    # print(rs)
    data.extend(get_contents(rs))
    data_copy = list(data)

    price_date = data[0][0]
    db_market = find_name_List(price_date)
    db_market_names = list()
    for item in db_market:
        db_market_names.append(list(item)[0])

    print(db_market_names)

    for item in data_copy:
        if item[3] in db_market_names:
            data.remove(item)
            print("{} -->在数据库已存在，移除该条记录不保存。".format(item[3]))
        else:
            print("不存在--执行保存")

    # 保存到数据库
    if data :
        save(data)
    else:
        print("没有获取到数据，不保存！")
    print("爬取数据完成！" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

# 保存到数据库
def save(data):
    cnn = ''
    try:
        cnn = mysql.connector.connect(**config)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        # 第三种：可以一次插入多条，效率比一条条插高,用的方法是executemany 巴沙鱼 4004
        stmt = 'insert into shop_price_daily_info(price_date, name, price, market_name, type, created_date, last_modified_date, version, copy_data) ' \
                      'values (%s, %s, %s, %s, 4004, NOW(), NOW(), 0, 0)'
        # for item in data:
        #     print(item)
        # cursor.execute(stmt, item)
        cursor.executemany(stmt, data)
        cnn.commit()
        print("插入数据成功！")

    except mysql.connector.Error as e:
        print('插入数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接

# 第一页数据
url_1 = "https://www.cnhnb.com/hangqing/cdlist-2003656-0-0-0-0-1/"
# 第二页数据
url_2 = "https://www.cnhnb.com/hangqing/cdlist-2003656-0-0-0-0-2/"

if __name__ == '__main__':
    main(url_1)
    main(url_2)
