#
# 从惠农网(https://www.cnhnb.com/)获取价格信息
# 该网站有防护措施，仅获取第一页数据（15条）
#

import requests
import re
import math
import random
import string
import base64
import hashlib
import time
from bs4 import BeautifulSoup
import CapturerDb

def waitSeconds(second):
    time.sleep(second)

def generateRandomString(length):
    # 字符集包括小写字母、大写字母和数字
    letters = string.ascii_letters + string.digits
    return ''.join(random.choice(letters) for _ in range(length))

def generateRandomCookies():
    timestamp = str(int(time.time()))
    md5Hash = hashlib.md5()
    md5Hash.update(timestamp.encode('utf-8'))
    md5=md5Hash.hexdigest()
    signtrue = ('%s,%s' % (timestamp, md5))
    return str(base64.b64encode(signtrue.encode('utf-8')), 'utf-8')

def parsePrices(priceName, soup, price_info_list):
    priceItems = soup.find_all(class_='market-list-item')
    for item in priceItems:
        parts = item.text.strip().split(' ')        
        #print(item.text.strip())
        #print(parts)
               
        url = 'https://www.cnhnb.com/hangqing/'
        timeText = parts[0]
        varietyText = parts[1]
        placeText = parts[2]
        priceText = parts[3]
        unitIndex = priceText.find('元')
        priceUnit = priceText[unitIndex:]
        priceText = priceText[:unitIndex]
        #print(priceName, varietyText, timeText, '惠农网', placeText, priceText, priceUnit)
        price_info_list.append(['惠农网', '交易', url, placeText, timeText, priceName, varietyText, priceText, priceUnit])
        #CapturerDb.updatePrice('惠农网', '交易', url, placeText, timeText, priceName, varietyText, priceText, priceUnit)

#根据代码获取价格，代码由源网站提供
def getPrices(codes, priceName, price_info_list):
    print('开始获取惠农网' + priceName + '数据...')

    #Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3
    #Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36
    #Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:126.0) Gecko/20100101 Firefox/126.0
    
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
        'Host':	'www.cnhnb.com',
        'Referer':'https://www.cnhnb.com/'
    }

    cookies = {'fingerId': generateRandomCookies(), 'deviceId': generateRandomString(15)}
    url = f'https://www.cnhnb.com/hangqing/cdlist-{codes}-0-0-0-0-1/'
    
    waitSeconds(10) #等待10秒
    response = requests.get(url, headers = headers, cookies=cookies)
    if response.status_code == 200:
        #print(response.text)
        
        soup = BeautifulSoup(response.text, 'html.parser')

        #获取数据总数
        totalSpan = soup.find(class_='eye-pagination__total')
        totalMatch = re.search(r"\d+", totalSpan.next)
        totalRecord = int(totalMatch.group())
        pageCount = math.ceil(totalRecord / 15)

        #解析价格数据
        parsePrices(priceName, soup, price_info_list)

        #获取其它页数据
        '''
        iIndex = 2
        while iIndex <= pageCount:
            waitSeconds(10) #等待10秒
            cookies = {'fingerId': generateRandomCookies(), 'deviceId': generateRandomString(15)}
            urlSubPath = f'https://www.cnhnb.com/hangqing/cdlist-{codes}-0-0-0-0-{iIndex}/'
            responseSubPath = requests.get(urlSubPath, headers = headers, cookies=cookies)
            if responseSubPath.status_code == 200:         
                soupSubPath = BeautifulSoup(responseSubPath.text, 'html.parser')
                parsePrices(priceName, soupSubPath)
            else:
                print(f'获取{iIndex}页数据失败!')

            iIndex+=1
        '''
        print('获取惠农网' + priceName + '信息完成!')
    else:
        print('获取惠农网' + priceName + '信息失败!')

#测试
#getPrices('2002027', '大豆')