#
# 从中国农业农村信息网(http://www.agri.cn/)获取舆情信息
# 该网站有防护措施，仅获取第一页数据（15条）
#

import sys
import io
import requests
import re
import math
import random
import string
import base64
import hashlib
import time

from datetime import datetime
from bs4 import BeautifulSoup
import CapturerDb


def waitSeconds(second):
    time.sleep(second)

def generateRandomString(length):
    # 字符集包括小写字母、大写字母和数字
    letters = string.ascii_letters + string.digits
    return ''.join(random.choice(letters) for _ in range(length))

def generateRandomCookies():
    timestamp = str(int(time.time()))
    md5Hash = hashlib.md5()
    md5Hash.update(timestamp.encode('utf-8'))
    md5=md5Hash.hexdigest()
    signtrue = ('%s,%s' % (timestamp, md5))
    return str(base64.b64encode(signtrue.encode('utf-8')), 'utf-8')

def parsePopularFeelings(aList, category):    
    for a in aList:
        #print(a) 
        
        title = a.text   
        href = a['href']       
        url = 'http://nynct.sc.gov.cn/' + href[1:]
        dateText = time.strftime("%Y-%m-%d", time.localtime())
        
        positiveText = '正面'
        if category=='价格行情': positiveText='负面'
    
        CapturerDb.updatePopularFeelings('四川省农业农村厅', url, category, dateText, positiveText, '', title)
            
        #print(title, category, href, dateText)

#获取舆情信息
def getPopularFeelings():
    print('开始获取四川省农业厅舆情信息...')
    
    url = f'http://nynct.sc.gov.cn/nynct/c100623/news.shtml'
    
    response = requests.get(url)#, headers = headers, cookies=cookies)
    response.encoding = 'utf-8'
    if response.status_code == 200:
        #print(response.text)
        
        soup = BeautifulSoup(response.text, 'html.parser')

        #获取农业要闻
        categories = ['农业要闻', '农业要闻', '农业要闻', '价格行情', '', '']
        newsList = soup.find_all(id='headLine')
        divIndex = 0
        for div in newsList:
            if divIndex >= 0: 
                aList = div.find_all('a')
            
                #print(aList)
                #解析数据
                parsePopularFeelings(aList, categories[divIndex])
            
            divIndex += 1
            
        #获取热点推荐
        categories = ['工作动态', '时事要闻', '行业动态', '市县动态', '', ''] 
        newsList = soup.find_all(class_='artcleList')
        divIndex = 0
        for div in newsList:
            if divIndex >= 0:
                aList = div.find_all('a')
            
                #print(aList)
                #解析数据
                parsePopularFeelings(aList, categories[divIndex])
            
            divIndex += 1

        
        print('获取四川省农业厅舆情信息完成!')
    else:
        print('获取四川省农业厅舆情信息失败!')

#测试
#getPopularFeelings()