#coding=utf-8

from bs4 import BeautifulSoup
import requests
import csv
import bs4
import time
import mysql.connector
import datetime
from random import choice

import json


from selenium import webdriver
from selenium.webdriver.chrome.options import Options

# chrome_path = 'C:\Program Files\chrome\chromedriver.exe'
chrome_path ='/usr/bin/chromedriver'
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
# 创建链接数据库
config = {'host': '127.0.0.1',  # 默认127.0.0.1
          'user': 'root', 'password': '654321', 'port': 3306,  # 默认即为3306
          'database': 'worm_data', 'charset': 'utf8'  # 默认即为utf8
          }

config1 = {'host': 'localhost',  # 默认127.0.0.1
          'user': 'root', 'password': '123456', 'port': 3306,  # 默认即为3306
          'database': 'shopxxb2b2c', 'charset': 'utf8'  # 默认即为utf8
          }

# 测试服务器
config2 = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_ccii', 'password': 'cI1546_wodesecerts', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8'  # 默认即为utf8
          }

config3 = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_python', 'password': '939_58J6kAW)P&^', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8'  # 默认即为utf8
          }


header = {
    "Accept": "application/json, text/javascript, */*; q=0.01",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Connection": "keep-alive",
    "Content-Length": "206",
    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
    "Cookie": "vjuids=311a20aa4.16c3c9ae31c.0.d336aa33739c5; vjlast=1564384879.1564384879.30; _pk_ref.134.0d5f=%5B%22%22%2C%22%22%2C1606977373%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DCodGZw2HlzS9Ycba2PltBA4Tdo4WbC67mrnqV10Cof8M_dCVu8f6H4Ny04RSTaBs%26wd%3D%26eqid%3Dcf58c5330005b137000000025fbb1260%22%5D; _pk_id.134.0d5f=2e479bfdb551ba87.1599009038.10.1606977373.1606096355.; insert_cookie=28314071",
    "Host": "nc.mofcom.gov.cn",
    "Origin": "http://nc.mofcom.gov.cn",
    "Referer": "http://nc.mofcom.gov.cn/jghq/priceList?craftName=&pIndex=&eudName=&queryDateType=4&timeRange=2020-12-21%20~%202020-12-21",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
    "X-Requested-With": "XMLHttpRequest",
}

# 全国农产品公共信息服务平台 升级版爬虫获取150余个市场 有的市场没有价格数据


# 处理sql
def handleSql(price_date, in_data):
    sqlData = []
    try:
        if in_data:
            for i in range(len(in_data)):
                data = (str(price_date), str(in_data[i]['CRAFT_NAME']), str(in_data[i]['AG_PRICE']),  str(in_data[i]['EUD_NAME']))
                print(data)
                sqlData.append(data)
            return sqlData

    except IOError as err:
        print(err)
        pass

# 去除重复的元素
def del_repeatnum(s):
    s1=[]
    for i in s:
        # print(i)
        if i not in s1:
            s1.append(i)
        else:
             pass
    return s1
    # print(del_repeatnum())


# 保存到数据库
def save(data, mealType, pName):
    cnn = ''
    try:
        cnn = mysql.connector.connect(**config3)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        # 第三种：可以一次插入多条，效率比一条条插高,用的方法是executemany 猪牛羊 1 2 3
        stmt = 'insert into shop_price_daily_info(price_date, name, price, province_name, market_name, type, created_date, last_modified_date, version, copy_data) values (%s, %s, %s, "' + str(pName) + '", %s, ' + str(mealType) + ', NOW(), NOW(), 0, 0)'
        # for item in data:
        #     print(item)
        # cursor.execute(stmt, item)
        cursor.executemany(stmt, data)
        cnn.commit()
        print("插入数据成功！")

    except mysql.connector.Error as e:
        print('插入数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接

# 循环省市获取数据
def work(type, craft_index, product_name, num):
    for index in range(len(provice_code)):
        # url_nopage = baseUrl + provice_code[index] + "&startTime=" + getdate(num) + "&endTime=" + getdate(1)

        url = "http://nc.mofcom.gov.cn/jghq/priceList"
        province = provice_name[index]
        price_data = getdate(num)
        params = {

            "craftIndex": craft_index,
            "craftName": product_name,
            "pIndex": provice_code[index],
            # "pIndex": "13",
            # "eudName": "全河北省市场",
            "eudName": "全" + province + "市场",
            "queryDateType": "4",
            # "timeRange": "2020-12-21 ~ 2020-12-21",
            "timeRange":price_data + "~" + price_data, # "2020-12-21 ~ 2020-12-21",
            "pageNo": "1"

        }
        rs_data = getData(url, params)
        json_data = json.loads(rs_data)
        first_page_data = json_data['result']

        print(json_data['result'])
        page_no = json_data['totalPages']
        # print(page_no)

        sql_data = list()
        if page_no > 1:

            for current_page in range(1, page_no + 1):
                params[pageNo] = current_page
                page_rs_data = getData(url, params)
                time.sleep(1)
                json_rs_data = json.loads(page_rs_data)
                sql_data.extend(handleSql(price_data, json_rs_data))

        else:

             sql_data = handleSql(price_data, first_page_data)


        # print(sql_data)
        if sql_data:
            save(sql_data, type, province)

        time.sleep(1)

    print("全部省市数据抓取完成" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))


def getData(page_url, page_params):
    # url = "http://nc.mofcom.gov.cn/jghq/priceList"
    # params = {
    #     "craftIndex": "13233",
    #     "craftName": "猪肉(白条猪)",
    #     "pIndex": "13",
    #     "eudName": "全河北省市场",
    #     "queryDateType": "4",
    #     "timeRange": "2020-12-21 ~ 2020-12-21",
    #     "pageNo": "1"
    #
    # }
    rs = requests.post(page_url, page_params, headers = header)
    # print(rs.text)
    return rs.text


# 猪肉 1
# pork_url   = "http://nc.mofcom.gov.cn/channel/jghq2017/price_list.shtml?par_craft_index=13079&craft_index=13233&par_p_index="
pork_url   = "http://nc.mofcom.gov.cn/jghq/priceList"

# 牛肉
# beef_url = "http://nc.mofcom.gov.cn/channel/jghq2017/price_list.shtml?par_craft_index=13079&craft_index=13235&par_p_index=&p_index=&startTime=2019-01-04&endTime=2019-04-04&page="
beef_url   = "http://nc.mofcom.gov.cn/channel/jghq2017/price_list.shtml?par_craft_index=13079&craft_index=13235&par_p_index="
# 羊肉
mutton_url = "http://nc.mofcom.gov.cn/channel/jghq2017/price_list.shtml?par_craft_index=13079&craft_index=13237&par_p_index="
# 活草鱼
fish_url_4001 = "http://nc.mofcom.gov.cn/channel/jghq2017/price_list.shtml?par_craft_index=13080&craft_index=8754711&par_p_index="

# 扇贝 开头
pictinid_url_7001 = "http://nc.mofcom.gov.cn/channel/jghq2017/price_list.shtml?par_craft_index=13080&craft_index=13165&par_p_index="



# 省市名称
provice_name = ['安徽', '北京','福建', '甘肃', '广东', '广西壮族自治区', '贵州省', '海南省', '河北省' , '河南省', '黑龙江省','湖北省',
                 '湖南省', '吉林省','江苏省', '江西省', '辽宁省', '内蒙古自治区','宁夏回族自治区','青海省', '山东省',
                 '山西省', '陕西省', '上海市', '四川省', '天津市', '西藏自治区','新疆建设兵团', '新疆维吾尔自治区', '云南省',
                 '浙江省', '重庆市']
# 省市编码
provice_code = ['34', '11', '35', '62', '44', '45','52', '46', '13', '41', '23','42',
                '43', '22','32', '36', '21', '15','64','63','37',
                '14', '61', '31', '51', '12', '54','99', '65', '53',
                '33', '50']


# 获取前1天或N天的日期，beforeOfDay=1：前1天；beforeOfDay=N：前N天
def getdate(beforeOfDay):
    today = datetime.datetime.now()
    # 计算偏移量
    offset = datetime.timedelta(days=-beforeOfDay)
    # 获取想要的日期的时间
    re_date = (today + offset).strftime('%Y-%m-%d')
    #print(re_date)
    return re_date

# url_06 ="&p_index=&startTime=2019-07-09&endTime=2019-07-23&page="
url_07 ="&p_index=&startTime=2019-09-20&endTime=2019-09-25&page="

# 猪肉
pork_type = 1
# 牛肉
beef_type = 2
# # 羊肉
mutton_type = 3
# 活草鱼
fish_type = 4001

shrimp_type = 5001

#获取前第num天的数据 至少是1
num= 1
# work(pork_type, 13233, "猪肉(白条猪)", num)
# work(beef_type, 13235, "牛肉", num)
work(mutton_type, 13237, "羊肉", num)
work(fish_type, 8754711, "活草鱼", num)













