from bs4 import BeautifulSoup

import requests
import csv
import bs4
import time
from datetime import datetime
import mysql.connector

import json
import os
import sys
import io
import xlrd
from xlrd import xldate_as_tuple

from selenium import webdriver
from selenium.webdriver.chrome.options import Options

'''
越南巴沙鱼FOB出厂价格
巴沙鱼价格网址：
http://seafood.vasep.com.vn/51/statistics/pangasius.htm
网站访问很慢

单位： 越南盾/千克
获取价格大的值
'''
chrome_path ='/usr/bin/chromedriver'
# chrome_path = 'C:\Program Files\chrome\chromedriver.exe'
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# 创建链接数据库
config_168 = {'host': '127.0.0.1',  # 默认127.0.0.1
          'user': 'root', 'password': 'Tnq39/*riqJcC', 'port': 3306,  # 默认即为3306
          'database': 'ccii_redesign', 'charset': 'utf8','auth_plugin':'mysql_native_password'  # 默认即为utf8
          }

config1 = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_ccii', 'password': 'cI1546_wodesecerts', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8', 'auth_plugin':'mysql_native_password'  # 默认即为utf8
          }

config = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_python', 'password': '939_58J6kAW)P&^', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8'  # 默认即为utf8
          }



header = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Cookie": "ASP.NET_SessionId=njl4ybuvhwfysr45ngqztl45; _ga=GA1.3.30078231.1605059355; _gid=GA1.3.1065798023.1605583549; __atuvc=14%7C46%2C1%7C47",
    "Host": "seafood.vasep.com.vn",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
}

path = "/opt/excel_price/"

# 处理成sql插入数据
def handle_sql_data(data):

    for item in data:
        # data[0] 日期  data[1]  价格 越南盾/Kg
        rate = get_rate(item[0], "VND")
        # 汇率2
        item.append(rate)
        # 人民币/千克
        fob_price = format(float(item[1]) / float(rate) + float(4.9), ".2f")

        # CIF价格（元/KG）=FOB出厂价格(元/KG)+海运费（1000*7.1/25000  元/KG）+海运保险费（FOB出厂价格*0.05% 元/KG）
        cif_price = format(float(fob_price) + float(1000 * 7.1 / 25000) + float(fob_price) * 0.0005, ".2f")
        # cif_price 3
        item.append(cif_price)
        # 换算fob_price 4
        item.append(fob_price)
        # 国家英文 5
        item.append("VN")
        # 国家中文 6
        item.append("越南")
        # 币种 7
        item.append("VND")
        # 单位 8
        item.append("越南盾/千克")
        print(item)

    return data


# 爬取资源
def get_contents(in_data):

    rs_data = []

    try:

        soup = BeautifulSoup(in_data, 'lxml')
        divs = soup.find_all("div", class_="khungNoiDung")
        # print(divs[0])
        ahtml = divs[0].find_all("a", class_= "items")
        # print(ahtml[0])
        ahref = ahtml[0].get("href")

        result = requests.get(ahref, timeout=60)
        result.raise_for_status()
        # print(result.content) font-size:10.0pt;font-family:"Arial","sans-serif";mso-bidi-font-weight:  #   bold
        page_content = BeautifulSoup(result.content, 'lxml')
        date_div = page_content.find_all("div", class_="titleCategory")
        file_name = date_div[0].text
        print(file_name)

        if not check_exists(file_name):
            return

        date_arr = handle_date(date_div[0].text)
        span = page_content.find_all("span", style='font-size:10.0pt;font-family:"Arial","sans-serif";mso-bidi-font-weight:\n  bold')
        # 20,500 - 21,800
        priceStr = span[0].text
        priceStr = priceStr.replace(" ", "")
        priceStr = priceStr.replace(",", "")
        prices = priceStr.split("-")
        print(prices)
        for item in date_arr:
            data = list()
            data.append(item)
            data.append(prices[1])
            rs_data.append(data)

    except Exception as e:
        print(f'解析数据异常 {e}')

    return rs_data

# 校验文本是否存在
def check_exists(file_name):
    flag = False

    if not os.path.exists(path):
        os.makedirs(path)

    path_full_name = path + file_name + ".txt"
    print(path_full_name)
    if os.path.exists(path_full_name):
        print("网站未更新数据无需爬取")
        return flag
    else:
        try:
            f = open(path_full_name, "a+")
            f.write(file_name)
            flag = True
        except Exception as e:
            print("校验文件异常")
        finally:
            f.close()

    return flag

def main(req_url):
    print(" 开始爬取数据" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    print(req_url)
    rs = requests.get(req_url, timeout=120)
    data = get_contents(rs.content)
    if data:
        handle_sql_data(data)

    # 保存到数据库
    if data :
        save(data)
    print("爬取数据完成！" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))


# 查询数据库得到汇率
def get_rate(priceDate, currencyType):
    cnn = ''
    rate = 0
    try:
        cnn = mysql.connector.connect(**config)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        # 第三种：可以一次插入多条，效率比一条条插高,用的方法是executemany 猪牛羊 1 2 3
        stmt = "select rate_value from alarm_rate_cny where worm_date like \"%" + priceDate + "%\"" + "and rate_en_name = \'"  + (currencyType + "CNY\'")
        # print(stmt)
        # for item in data:
        #     print(item)
        cursor.execute(stmt)
        rate = cursor.fetchone()[0]
        cnn.commit()
        print(rate)
        print("查询数据成功！")

    except mysql.connector.Error as e:
        print('查询数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接

    return rate

# 保存到数据库
def save(data):
    cnn = ''
    try:
        cnn = mysql.connector.connect(**config)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        # 第三种：可以一次插入多条，效率比一条条插高,用的方法是executemany 猪牛羊 1 2 3
        stmt = 'insert into alarm_intl_fish_price(name, price_date, price, rate, price_cif, price_fob, country_en, country_cn, currency_type, unit, create_date, del_flag) ' \
               'values ("' + str('越南巴沙鱼') + '", %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW(), 0)'
        # for item in data:
        #     print(item)
        # cursor.execute(stmt, item)
        cursor.executemany(stmt, data)
        cnn.commit()
        print("插入数据成功！")

    except mysql.connector.Error as e:
        print('插入数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接

# 处理日期
def handle_date(str_):
    date_arr = []
    # str_ = "The prices of pangasius, 9th – 15th October 2020"
    strs = str_.split(",")
    print(strs[1])
    arr = strs[1].split(" ")
    # ['', '9th', '–', '15th', 'October', '2020']
    print(arr)
    start = arr[1].replace("th", "")
    end = arr[3].replace("th", "")

    for day in range(int(start), int(end) + 1):
        date_str = arr[5] + "-" + arr[4] + "-" + str(day)
        date_str = time.strftime('%Y-%m-%d', time.strptime(date_str, '%Y-%B-%d'))
        date_arr.append(date_str)
    print(date_arr)
    return date_arr



url = "http://seafood.vasep.com.vn/51/statistics/pangasius.htm"

if __name__ == '__main__':
    main(url)







