from bs4 import BeautifulSoup
import sys
sys.path.append('../kernel')
sys.path.append('../model')
import spider_main
import urllib.parse
import chardet
import re
import cg_task #任务列表
import cg_info #插入信息列表
import urllib.request #请求类
import time
class CgController(object):
    def __init__(self):
        self.exists_count=0
    #获取列表信息
    def get_data(self,params):
        cur_time = int(time.time())
        i_model = cg_info.InfoModel()
        #获取页面
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}#加入header防止反爬虫
        req = urllib.request.Request(url=params['web_url'], headers=headers)
        html = urllib.request.urlopen(req).read()
        codedetect = chardet.detect(html)['encoding']#获取网页编码
        html_obj = BeautifulSoup(html.decode(codedetect), 'html.parser')
        #过滤页面
        html_list = html_obj.find('table',attrs={'class':'aa'})
        html_a=html_list.find_all('a')
        
        for a_tag in html_a:
            xtitle = a_tag.string
            ma=re.search(r'\'([a-zA-Z0-9])*\'',a_tag['onclick'])
            xcurl = "www.whgp.gov.cn/whzf/front/pubmsg/one.do?id=" + ma.group().strip("'")
            exists_data=i_model.exists(xcurl)
            if exists_data:
                self.exists_count=self.exists_count+1 #计算出现重复数据的条数，如果一个页面超过三次就停止掉
                if self.exists_count>=3:
                    print('1页面已经爬取过，停止程序！')
                    break
                continue
            save_data={
                'url':xcurl,
                'title':xtitle,
                'creation_time':int(time.time()),
                't_id':params['id'],
                'c_id':params['c_id'],
            }
            i_model.add_info(save_data)
            print(xcurl)

if __name__=='__main__':
    t_mode = cg_task.TaskModel()
    data = t_mode.get_task_by_id(24)

    obj=CgController()
    obj.get_data(data)