# coding=utf-8
from os import pardir
import pdb
from bs4 import BeautifulSoup
import io
import sys
from difflib import Differ
import requests
import utils
from urllib import parse
from urllib.parse import urlparse
import re
import json
from html.parser import HTMLParser
from mysqlAction import mysqlAction
from redisAction import redisAction
# 规避urllib3 Unverified HTTPS request is being made to host
import urllib3
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)

#改变标准输出的默认编码
sys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')

#引入配置参数
from config import global_config

#声明爬虫访问网站时所使用的浏览器身份
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36'}

# 判断无效scheme格式
illegal_scheme_words = ['javascript','#this']

# 判断无效链接地址
illegal_href_words = ['###','javascript:;','javascript:void(0);','javascript:history.go(-1);','#this']

# 判断是否是html
is_html = ['text/html','text/html;charset=UTF-8','text/html; charset=utf-8','text/plain','text/plain;charset=UTF-8','text/plain; charset=UTF-8','text/html; charset=gb2312','text/html;charset=gb2312','text/html;charset=GBK', 'text/html; charset=GBK']

# 详情页链接关键字
detail_url_keywords = r'.*?(Detail|Notice|infoPage).*?'

# 数据列表一页多少条的最小临界值
page_size = 5

#声明数据库实例
db_obj = mysqlAction('mysql')

#声明redis实例
redis_obj = redisAction()

"""
splash抓取页面的html
"""

def splashCatchHtml(url):
    connect_url_get_iframe = global_config.getRaw('splashConfig', 'connect_url_get_iframe')
    splash_url = connect_url_get_iframe+'&url='+url
    html_repsonse = requests.get(splash_url, verify=False)
    html_content = ''
    child_frames = ''
    try:
        html = json.loads(html_repsonse.text)
        html_content = html['html']
        child_frames = html['childFrames']
    except:
        print('json格式错误!')
    return {'html':html_content, 'iframe':child_frames}

"""
splash抓取页面的html
"""

def splashGetHtml(url, headers, data, proxys):

    connect_url_html = global_config.getRaw('splashConfig', 'connect_url_html')
    splash_url = connect_url_html+'&url='+url
    html_repsonse = requests.get(splash_url, verify=False, headers=headers, data=data, proxies=proxys)
    return html_repsonse.text


"""
处理html的页面
"""

def doGetHtml(url, headers, data, proxies):
    try:
        r = requests.get(url=url,headers=headers,data=data, verify=False, proxies=proxies)
        #return r.text.decode('UTF-8','ignore')
        # 输出前设置编码
        r.encoding = "utf-8"
        content = r.text if r.status_code == 200 else ''
        content_type = r.headers['Content-Type'] if r.headers['Content-Type'] else ''
        return {'content':content, 'content_type':content_type}
    except Exception as e:
        return {'content':str(e), 'content_type':'none'}

"""
构造IP代理
"""
def getRedisProxies():
    
    proxies={}
    proxies_info = redis_obj.get('proxies_info')
    if not proxies_info:
        proxies_res = utils.doGetRquest('http://pandavip.xiongmaodaili.com/xiongmao-web/apiPlus/vgl?secret=1452fd1bdd39e2c2d41916f5048f56ba&orderNo=VGL20210623102821cPNTN4H2&count=1&isTxt=0&proxyType=1&validTime=0&removal=0', headers, {}, {})
        try:
            proxies_list = json.loads(proxies_res['content'])
            proxies_info = proxies_list['obj'][0]
            if proxies_info:
                proxies_info = json.dumps(proxies_info)
                set_res = redis_obj.set('proxies_info', proxies_info, 300)
        except Exception as e:
            error = str(e)
            print(f'{error}')
    proxies_url = ''
    if proxies_info:
        proxies_info = json.loads(proxies_info)
        proxies_url = proxies_info['ip']+':'+proxies_info['port']
    # 自定义代理信息
    if proxies_url:
        proxies = {
            "http":"http://%(proxy)s/" % {'proxy':proxies_url},
            "https":"https://%(proxy)s/" % {'proxy':proxies_url},
        }

    return proxies


"""
根据a标签的href相识度获取列表链接
excel_url 要爬取的地址
headers 自定义header头信息
proxys  自定义代理信息
method 请求类型 get post
"""

def getListLinks(excel_url, headers, proxies, method):
    data = {}
    detail_url_list_set = {}
    url_parse_rsult = urlparse(excel_url)
    js_drawing_befor = doGetHtml(excel_url, headers=headers, data=data, proxies=proxies)
    content_type = js_drawing_befor['content_type']
    filtered_normal_response = utils.filterTags(js_drawing_befor['content'])
    if content_type.find('html') > 0 or content_type.find('plain') > 0:
        iframe_detail_url_list = []
        iframe_detail_url_relation_titles = {}
        if method == 'post':
            js_drawing_after_content = splashCatchHtml(excel_url)
            # 如果存在iframe获取iframe的内容筛除详情链接地址
            js_drawing_after_iframes = js_drawing_after_content['iframe']
            iframe_url = ''
            if js_drawing_after_iframes:
                iframe_url = js_drawing_after_iframes[0]['requestedUrl']
            if iframe_url:
                iframe_url_parse_result = urlparse(iframe_url)
                iframe_scheme = iframe_url_parse_result.scheme
                iframe_netloc = iframe_url_parse_result.netloc
                if iframe_scheme == '':
                    if iframe_netloc:
                        iframe_url = url_parse_rsult.scheme+'://'+iframe_url
                    else:
                        iframe_url = url_parse_rsult.scheme+'://'+url_parse_rsult.netloc+iframe_url     
                iframe_html = doGetHtml(iframe_url, headers=headers, data=data, proxies=proxies) 
                if iframe_html['content']:
                    filter_iframe_html = utils.filterTags(iframe_html['content'])
                    iframe_soup = BeautifulSoup(filter_iframe_html)
                    iframe_a_list = iframe_soup.findAll('a',href=True)
                    iframe_detail_url_list_result = analysisHrefGetDetaillinks(iframe_url, iframe_a_list)
                    iframe_detail_url_list = iframe_detail_url_list_result['links']
                    iframe_detail_url_relation_titles = iframe_detail_url_list_result['links_relation_title']
            ## 当前页面处理分析a标签链接
            js_drawing_after_html = js_drawing_after_content['html']        
            filtered_splash_html = utils.filterTags(js_drawing_after_html)
            # 获取不同的内容
            different_content = utils.getDifferentContent(filtered_splash_html, filtered_normal_response)
        else:
            different_content = filtered_normal_response
        # TODO 两次获取页面内容相差不大的需要在做处理 比如列表数据不是用js渲染的
        # if len(different_content) <= 0:
        #     different_content = filtered_splash_html
        print(different_content)
        if len(different_content) > 0:
            try:
                soup = BeautifulSoup(different_content, 'lxml')
                # 获取不同内容的a标签及href
                a_list = soup.findAll('a',href=True)
                detail_url_list_result = analysisHrefGetDetaillinks(excel_url, a_list)
                detail_url_list = detail_url_list_result['links']
                detail_url_relation_titles = detail_url_list_result['links_relation_title']
                # 合并数组
                detail_url_list.extend(iframe_detail_url_list)
                # 详情链接地址和标题关联的字典合并
                detail_links_relation_titles = dict(iframe_detail_url_relation_titles,**detail_url_relation_titles)
                print(detail_links_relation_titles)
                pdb.set_trace()
                detail_url_total = len(detail_url_list)
                if detail_url_total > 0:
                    # 去重
                    detail_url_list_set = set(detail_url_list)
                    for detail_url in detail_url_list_set:
                        #记录数据库中
                        detail_title = detail_links_relation_titles[detail_url]
                        insert_data = dict({'site_url': excel_url, 'detail_url': detail_url, 'detail_title':detail_title, 'catch_result': '', 'catch_status': '1'})
                        db_obj.insert(table='catch_detial_url_list', data=insert_data)
                    print(f'{excel_url}：该地址匹配到详情链接数为{detail_url_total}!')      
                else:
                    insert_data = dict({'site_url': excel_url, 'detail_url': '', 'detail_title':'', 'catch_result': json.dumps(different_content, ensure_ascii=True), 'catch_status': '2'})
                    db_obj.insert(table='catch_detial_url_list', data=insert_data)
                    print(f'{excel_url}：该地址没有匹配到详情链接！')
            except Exception as e:
                print(123)
                print(e.args)
                print(str(e))
                print(repr(e))
                insert_data = dict({'site_url': excel_url, 'detail_url': '', 'detail_title':'', 'catch_result': json.dumps(different_content, ensure_ascii=True), 'catch_status': '2'})
                db_obj.insert(table='catch_detial_url_list', data=insert_data)
                print(f'{excel_url}：该地址抓取内容非html格式！')
        else:
            insert_data = dict({'site_url': excel_url, 'detail_url': '', 'detail_title':'', 'catch_result': json.dumps(different_content, ensure_ascii=True), 'catch_status': '2'})
            db_obj.insert(table='catch_detial_url_list', data=insert_data)
            print(f'{excel_url}：页面抓取内容为空！')
    else:
        #catch_result = HTMLParser().unescape(js_drawing_befor)
        catch_result = json.dumps(filtered_normal_response, ensure_ascii=True)
        insert_data = dict({'site_url': excel_url, 'detail_url': '', 'detail_title':'', 'catch_result': catch_result, 'catch_status': '2'})
        db_obj.insert(table='catch_detial_url_list', data=insert_data)
        print(f'{excel_url}：该地址抓取内容非html格式！')       
    # print("detialLinks:")
    # print(detail_url_list_set)

"""
根据目标网站的a标签的集合获取列表详情的链接地址
site_url 目标网站的链接地址
a_list 目标网站a标签的数据集合
"""
def analysisHrefGetDetaillinks(site_url, a_list):
    url_parse_rsult = urlparse(site_url)
    a_href_paths = []
    a_title_relation_href_dict = {}
    for a_info in a_list:
        if len(a_info.text) <= 5:
            continue
        a_href = a_info['href'].strip('..').strip()
        a_url_parse = urlparse(a_href)
        a_path = a_url_parse.path.strip('..').strip()
        a_scheme = a_url_parse.scheme
        if a_path and (a_href not in illegal_href_words) and (a_scheme not in illegal_scheme_words):
            #a_path_split_res = a_path.split('/')
            a_href_paths.append(a_path)
    detail_url_list = utils.getDetailUrlStr(a_href_paths, page_size, 0)
    detail_link_list = []
    for a_detail in a_list:
        if len(a_detail.text) <= 5:
            continue
        a_href = a_detail['href'].strip('..').strip()
        a_url_parse = urlparse(a_href)
        a_path = a_url_parse.path.strip('..').strip()
        a_scheme = a_url_parse.scheme
        if a_path and (a_href not in illegal_href_words) and (a_scheme not in illegal_scheme_words):
            if detail_url_list:
                for detail in detail_url_list:
                    if a_href.find(detail) >= 0:
                        a_title = utils.filterTitle(a_detail.text)
                        if a_scheme:
                            absolute_url = a_href
                        else:
                            if a_url_parse.netloc:
                                absolute_url = url_parse_rsult.scheme+':'+a_href
                            else:
                                # 兼容a_href不带/的问题
                                absolute_url = url_parse_rsult.scheme+'://'+url_parse_rsult.netloc+'/'+a_href.lstrip('/') 
                                if a_path.find('/') <= 0:
                                    last_slanting_bar_postion = url_parse_rsult.path.rindex('/')
                                    a_url_path = url_parse_rsult.path[:last_slanting_bar_postion]
                                    absolute_url = url_parse_rsult.scheme+'://'+url_parse_rsult.netloc+a_url_path+'/'+a_href.lstrip('/')
                        a_title_relation_href_dict[absolute_url]=a_title
                        detail_link_list.append(absolute_url)
    
    return {'links':detail_link_list,'links_relation_title':a_title_relation_href_dict}

"""
根据详情页链接的正则规则获取列表链接
excel_url 要爬取的地址
headers 自定义header头信息
proxys  自定义代理信息
"""
def getDetailList(excel_url, headers, proxys):
    # 查找翻页关键词
    urlParseRsult = urlparse(excel_url)
    hrefRsult = []
    data = {}

    js_drawing_befor = doGetHtml(excel_url, headers=headers, data=data, proxies=proxys)
    filtered_normal_response = utils.filterTags(js_drawing_befor['content'])
    js_drawing_after = splashCatchHtml(excel_url)
    filtered_splash_html = utils.filterTags(js_drawing_after)

    # 获取不同的内容
    different_content = utils.getDifferentContent(filtered_splash_html, filtered_normal_response)

    soup = BeautifulSoup(different_content)
    #pageNodes = soup.find_all("a",string = re.compile(next_page_words),href=True)
    pageNodes = soup.find_all("a",href=True)
    if pageNodes:
        ## 标记站点是否抓取到所要匹配的详情地址
        is_need_detail_url = 0
        detail_url = ''
        for pageNode in pageNodes:
            hrefRsult = urlparse(pageNode['href'])
            a_href_str = str(pageNode['href'])
            if re.compile(detail_url_keywords, re.I).match(a_href_str): 
                is_need_detail_url = 1
                if hrefRsult.netloc:
                    detail_url = a_href_str    
                else:
                    detail_url = urlParseRsult.scheme+'://'+urlParseRsult.netloc+a_href_str.strip('..').strip()
                #记录数据库中
                insert_data = dict({'site_url': excel_url, 'detail_url': detail_url, 'catch_result': '', 'catch_status': '1'})
                db_obj.insert(table='catch_detial_url_list', data=insert_data)
        #如果没有匹配到所需要详情链接地址则记录日志
        if is_need_detail_url == 0:
            insert_data = dict({'site_url': excel_url, 'detail_url': detail_url, 'catch_result': different_content, 'catch_status': '2'})
            db_obj.insert(table='catch_detial_url_list', data=insert_data)


"""
 分析详情页内容
 excel_url 要爬取的地址
 headers 自定义header头信息
 proxys  自定义代理信息
 target_title 目标详情标题
"""

def analysisPageInfo(excel_url, headers, proxys, target_title):
    data = {}
    page_detail_response = splashGetHtml(excel_url, headers, data, proxys)
    page_detail_content = utils.filterTags(page_detail_response)
    soup = BeautifulSoup(page_detail_content)
    targets = soup.find_all(re.compile("h1|h2|h3|h4|h5|h6", re.I),string=target_title)
    detail_content = ''
    if targets:
        for target in targets:
            target_parents = target.parents
            if target_parents:
                target_parents_list = {}
                i = 1
                for parent in target_parents:
                    target_parents_list[i] = parent
                    i += 1
                target_parents_list_len = len(target_parents_list)    
                if target_parents_list_len > 0:
                    stop_index = target_parents_list_len//2
                    for index,target_parent in target_parents_list.items():
                        if index == stop_index:
                            detail_content = target_parent
                            break
            if len(detail_content) > 0:
                break
        status = 2
        if detail_content:
            insert_detail_content = detail_content
            status = 1
        else:
            insert_detail_content = page_detail_content     
        # 记录到数据库中
        insert_data = dict({'url': excel_url, 'detail_content': insert_detail_content, 'status': status})
        db_obj.insert(table='catch_page_detail', data=insert_data)
    else:
        # 记录到数据库中
        insert_data = dict({'url': excel_url, 'detail_content': page_detail_content, 'status': 2})
        db_obj.insert(table='catch_page_detail', data=insert_data)    
