from bs4 import BeautifulSoup
import sys
import chardet
# win下相对路径
sys.path.append('../kernel')
sys.path.append('../model')
# linux下绝对路径
# sys.path.append('/home/python/caigou/kernel')
# sys.path.append('/home/python/caigou/model')
import urllib.parse
import cg_task #任务列表
import cg_info #插入信息列表
import cg_task_log #log日志
import urllib.request #请求类
import ssl #用于解决https请求协议
import json
import time
import requests
# https://www.cnblogs.com/zy6103/p/6943557.html  动态加载模块

class GetinfoController(object):
    def __init__(self):
        self.exists_count=0
        self.t_mode = cg_task.TaskModel()
        self.i_model = cg_info.InfoModel()
        self.l_model = cg_task_log.TasklogModel()

    def index(self):
        #获取需要执行的任务
        task_data=self.t_mode.get_task()
        if len(task_data)==0:
            print('没有任务可执行')
            return

        #使用循环调度要执行的任务
        for task_info in task_data:
            # 获取网站的host部分
            host_name = urllib.parse.urlparse(task_info['web_url']).hostname
            # 更新数据库参数
            params={
                "creation_time":int(time.time()),
                "host_name":host_name,
                "t_id":task_info['id'],
                "c_id":task_info['c_id'],
                "web_url":task_info['web_url'],
                "trait":task_info['trait'],
                "web_http":task_info['web_http'],
            }
            # html_data = self.get_data(params)
            # print(html_data)
            # exit()
            try:#抓取页面报错处理
                html_data = self.get_data(params)
            except BaseException as e:
                error_info='获取数据错误:%s' % e
                print(error_info,params)
                params['info']=error_info
                self.l_model.insert_log(params)
                continue

            data_list = self.trait_decode(params, html_data)
            if data_list==None:
                print('解析数据失败')
                continue
            # 写入数据库
            self.save_info(data_list,params)
            #暂停
            time.sleep(2)

    #用户测试的方法，抓取数据但是不写入数据库
    def test_get(self,task_info):
        #获取内容
        html_data=self.get_data(task_info)
        #根据特征码解析数据
        data_list=self.trait_decode(task_info,html_data)
        return data_list
        
    #获取页面内容
    def get_data(self,param):
        self.url = "http://127.0.0.1:9123"
        payload = {"url":param['web_url']}
        response = requests.post(self.url, data=payload, timeout=30).content
        return response
    #基于特征码解析数据
    def  trait_decode(self,param,html_data):
        if param['trait']=='':
            #特征码为空无法解析
            return None
        if isinstance(param['trait'], str)==False:
            trait=param['trait']
        else:
            trait = json.loads(param['trait'])  # json转字典，必须使用“”做边界值

        # 数据查询模型，返回html标签组合
        form_model=trait['form_model']
        html_obj = BeautifulSoup(html_data, 'html.parser')
        html_list = html_obj.find_all(form_model['tag'],attrs=form_model['attrs'])
        if 'select_children' in form_model.keys():
            h_list=[x for j in html_list for x in j.find_all(form_model['select_children']['tag'])]
            html_list=h_list
        print(len(html_list))
        # 数据过滤模型，返回过滤后的数据list列表
        data_model=trait['data_model']
        data_list=[]
        data={}
        for html_val in html_list:
            data['url']=html_val[data_model['url']]
            if data_model['title']=='title':
                data['title']=html_val[data_model['title']]
            elif data_model['title']=='string':
                data['title'] = html_val.string
            data_list.append(data)
            data = {}
        return data_list


    #写入数据库
    def save_info(self,data,params):
        for d_val in data:
            #判断是否是完整的url
            is_host_name = urllib.parse.urlparse(d_val['url']).hostname
            d_val['url']='/'+d_val['url'].lstrip('/')
            if is_host_name==params['host_name']:
                url=d_val['url']
                url=url.lstrip('https://')#将http去掉，展示的时候会根据协议加上
                url=url.lstrip('http://')
            else:
                url=params['host_name']+d_val['url']
            exists_data=self.i_model.exists(url)
            if exists_data:
                self.exists_count=self.exists_count+1 #计算出现重复数据的条数，如果一个页面超过三次就停止掉
                if self.exists_count>=3:
                    print('页面已经爬取过，停止程序！')
                    break
                continue
            save_data={
                'url':url,
                'title':d_val['title'],
                'creation_time':params['creation_time'],
                't_id':params['t_id'],
                'c_id':params['c_id'],
            }
            self.i_model.add_info(save_data)#写入数据库
            print(url)
        self.exists_count=0

if __name__=='__main__':
    obj=GetinfoController()
    obj.index()