# coding=utf-8

from warnings import simplefilter
from requests.api import head
from mysqlAction import mysqlAction
import utils
import requests
import json
import os
import encodings
from urllib.parse import urlparse
import base64
import re
import pdb
from bs4 import BeautifulSoup
import io
import sys
from difflib import Differ
import jieba

#改变标准输出的默认编码
sys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
#引入配置参数
from config import global_config

#ajax请求文档的类型
content_type = {'application/json;charset=UTF-8','application/json; charset=GBK'}

#请求接口返回的内容格式
interface_content_type = {'application/json;charset=UTF-8','application/json; charset=GBK'}

#过滤css样式，js以及图片
filter_list = {'.css','.js', '.png','.gif','.html'}

#ajax请求参数编码类型
param_encode = {'base64'}

regExt = "(html|shtml)"

# 用于拆分url地址中的连字符
stop_words = ['/','.','_',' ']

#声明爬虫访问网站时所使用的浏览器身份
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36'}

#声明数据库实例
db_obj = mysqlAction('mysql')

"""
post请求
"""

def doPostRquest(url, data, headers):
    try:
        #声明爬虫访问网站时所使用的浏览器身份
        # header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36',
        # 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
        # 'X-Requested-With':'XMLHttpRequest',
        # 'Referer':'https://www.cqggzy.com/jyxx/jyxx-page.html'}
        r = requests.post(url, data=data, headers=headers, verify=False)
        # print(r.status_code)
        # print(r.text)
        # 如果请求成功则返回内容，成功返回空
        content = r.text if r.status_code == 200 else ''
        content_type = r.headers['Content-Type'] if r.headers['Content-Type'] else ''
        return {'content':content, 'content_type':content_type}
    except Exception as e:
        # print(e.args)
        # print(str(e))
        # print(repr(e))
        return str(e)

"""
处理html的页面
"""

def doGetHtml(url, headers, data):
    try:
        r = requests.get(url=url,headers=headers,data=data, verify=False)
        #return r.text.decode('UTF-8','ignore')
        content = r.text if r.status_code == 200 else ''
        content_type = r.headers['Content-Type'] if r.headers['Content-Type'] else ''
        return {'content':content, 'content_type':content_type}
    except Exception as e:
        print(e.args)
        print(str(e))
        print(repr(e))
        return "someting wrong!"


"""
splash抓取页面的har和html
"""

def splashCatchHarAndHtml(url):
    connect_url_har = global_config.getRaw('splashConfig', 'connect_url_json')
    splash_url = connect_url_har+'&url='+url
    html_har_response = requests.get(splash_url, verify=False)
    
    html_har = json.loads(html_har_response.text)

    return html_har


"""
splash抓取页面的html
"""

def splashCatchHtml(url):
    connect_url_html = global_config.getRaw('splashConfig', 'connect_url_har')
    splash_url = connect_url_html+'&url='+url

    html_repsonse = requests.get(splash_url, verify=False)
    
    #html = json.loads(html_response.text)

    return html_repsonse.text

"""
执行分析内容
"""
def analyse(url):
    splash_response = splashCatchHarAndHtml(url)
    #har数据信息
    splash_har = splash_response['har']
    #带有js渲染之后的html页面
    splash_html = splash_response['html']
    for i in splash_har:
        for info in splash_har[i]['entries']:
            response_url = str(info['response']['url'])
            #是否是想要的地址 1 是 0 否
            is_use_url = 1
            for filter in filter_list:
                if response_url.find(filter) > 0:
                    is_use_url = 0
            if is_use_url == 1:
                #获取request的信息
                request_info = info['request']
                #获取response的信息
                response_info = info['response']
                #获取response的headers
                response_headers = response_info['headers']
                #获取request的headers
                request_headers = request_info['headers'] 
                #构造新的header头
                new_headers = utils.structureHeader(request_headers)
                #response_headers是否为空分两步走
                if response_headers :
                    response_content_type = ''
                    for res_header in response_headers:
                        if res_header['name'] == 'Content-Type':
                            response_content_type = res_header['value']        
                    #筛选过滤不需要的content-type类型的链接        
                    if response_content_type in content_type:
                        #区分get和post请求
                        #post的处理方式
                        if request_info['method'] == 'POST':
                            #接口参数
                            post_param = utils.postDataDecode(request_info['postData'])
                            #请求接口数据
                            post_res = doPostRquest(response_url, post_param, new_headers)
                            interface_content = post_res['content']
                            interface_content_type = post_res['content_type']
                            #获取不相同的html
                            different_content = getDefferentHtml(url, splash_html);
                            doAnalyse(interface_content, interface_content_type, different_content, request_info['method'], response_url, url, post_param)
                        #get方式        
                        else:
                            #print(response_url)
                            get_res = doGetHtml(response_url, new_headers, data=None)
                            #print(get_res)
                    else:
                        #post的处理方式
                        if request_info['method'] == 'POST':
                            post_param = utils.postDataDecode(request_info['postData'])
                            #print(post_param)
                            #pdb.set_trace();
                            post_res = doPostRquest(response_url, post_param, new_headers)
                            #print(post_res)
                            pdb.set_trace()
                        #get方式        
                        # else:
                        #     print(1231)
                            #get_res = doGetHtml(response_url, new_headers, data=None)
                            #print(get_res)
                else:
                    #post的处理方式
                    if request_info['method'] == 'POST':
                        #接口参数
                        post_param = utils.postDataDecode(request_info['postData'])
                        #请求接口数据
                        post_res = doPostRquest(response_url, post_param, new_headers)
                        interface_content = post_res['content']
                        interface_content_type = post_res['content_type']
                        #获取不相同的html
                        different_content = getDefferentHtml(url, splash_html);
                        doAnalyse(interface_content, interface_content_type, different_content, request_info['method'],response_url, url, post_param)
                    #get方式        
                    else:
                        #print(response_url)
                        #print(new_headers)
                        get_res = doGetHtml(response_url, new_headers, data=None)
                        #print(get_res)
                
"""
处理接口数据进行对比
interface_content 接口返回的数据
interface_content_type 接口返回数据的格式
different_content 对比之后的html
request_method 接口请求的方式
response_url 接口请求地址
url 目标地址
post_param 请求接口的参数
"""
def doAnalyse(interface_content, interface_conent_type, different_content, request_method, response_url, url, post_param):
    res = interface_conent_type in interface_content_type
    if interface_conent_type in interface_content_type:
        different_content_tmp = different_content['different_content']
        content = json.loads(interface_content)
        target_content_list = content['result']['records']
        soup = BeautifulSoup(different_content_tmp)
        
        #target_pattern = re.compile(target_content)
        #a_list = soup.find("a", text=target_pattern)
        # a_list = soup.find(string=target_pattern)
        # print(a_list)
        # pdb.set_trace()

        a_list = soup.findAll("a",href=True)
        for a_info in a_list:
            info_str = str(a_info)  
            if target_content_list :
                for target_content in target_content_list:
                    if info_str.find(str(target_content['title'])) > 0:
                        url_parse_list = urlparse(response_url)
                        #链接协议
                        url_scheme = url_parse_list.scheme
                        #链接域名
                        url_net_loc = url_parse_list.netloc
                        detail_url = url_scheme+"://"+url_net_loc+a_info['href']
                        rule_detail_url = extractRegRuls(a_info['href'])
                        print(target_content)
                        print(a_info['href'])
                        print(rule_detail_url)
                        print(url_net_loc)
                        #记录数据库中
                        insert_data = dict({'target_url': url, 'detail_url': detail_url, 'detail_url_rule': '', 'list_link': response_url, 'list_link_content_type': interface_conent_type, 'list_link_request_type': request_method, 'list_link_param': post_param, 'detail_url_param': '', 'detail_url_rule_param': '', 'status': 0, 'analysis_log': '成功'})
                        db_obj.insert(table='analysis_detail_url_rule', data=insert_data)
                        pdb.set_trace()
                        print(request_method)
                        print(response_url)
                        print(url)
                        pdb.set_trace()

"""    
# 输入关键字和数据格式组提取生成正则匹配规则
# 优先提取固定关键字
# 识别可变数值
"""

def extractRegRuls(links):
    regRule = ''
    splitPaths = []
    extractRuleFromPiece = {}
    for link in links:
        parsedUrl = urlparse(link)
        path = parsedUrl.path
#         splitPaths.append(getDataFormat(path))
        splitPaths.append(utils.spliUrlIntoPiece(path, stop_words=stop_words))

    
    for splitPath in splitPaths:
        i = 0
        while(i < len(splitPath)):
            if (str(i) not in extractRuleFromPiece):
                extractRuleFromPiece[str(i)] = []
                
            if (splitPath[i] not in extractRuleFromPiece[str(i)]):
               extractRuleFromPiece[str(i)].append(splitPath[i]) 
            i = i +1
        
    for e in extractRuleFromPiece:
        for urlpiece in extractRuleFromPiece[e]:
            if(len(extractRuleFromPiece[e])>1):
                regPiece = '/' + utils.getRegFromUrlPiece(urlpiece)
            else:
                if(re.match(regExt,urlpiece)):
                    urlpiece = '.' + urlpiece
                regPiece = '/' + urlpiece
        regRule = regRule + regPiece
    
    return regRule    

"""
返回不同部分的html内容
"""

def getDefferentHtml(url, splash_html):

    filtered_splash_html = utils.filterTags(splash_html)
    #传递给服务器的请求参数
    data={}
    normal_response = doGetHtml(url,headers,data)
    filtered_normal_response = utils.filterTags(normal_response['content'])
    d = Differ()
    diff = d.compare(filtered_splash_html.splitlines(), filtered_normal_response.splitlines(False))
    analyse_different_content = ''.join([val[2:] for idx,val in enumerate(diff) if val.startswith("-")]);
    different_content = analyse_different_content if analyse_different_content else ''
    content_type = normal_response['content_type'] if normal_response['content_type'] else ''

    return {'different_content':different_content,'content_type':content_type}


"""
mysql数据库操作类
"""
#db_obj = mysqlAction('mysql')

#print(db_obj.query("select * from analysis_detail_url_rule limit 2"))

"""
get请求获取目标地址的html的文档
"""
#res = doGetHtml('https://www.ruc.edu.cn/loadnoticelist?tid=1&lid=1')
#parser = BeautifulSoup(res)
#print(parser.body.div)



"""
ajax请求获取目标地址内容进行详情链接地址解析
"""

#http://m.news18a.com/search/
#http://www.sccin.com/InvestmentInfo/ZhaoBiao/Default.aspx?type=PDSY

#ajax的post请求splash渲染的抓取不到接口地址，get请求的能抓取到
#http://www.ccgp-guangxi.gov.cn/reformColumn/ZcyAnnouncement10016/index.html
#https://www.cqggzy.com/jyxx/jyxx-page.html
#http://www.ccgp-shaanxi.gov.cn/notice/list.do?noticetype=3&province=province



#get方式的请求结果返回html
#excel_url = 'https://changd.ccgp-hunan.gov.cn/f/m/noticechannel/c_2'

#触发反爬虫机制
#http://zbtb.gd.gov.cn/login
excel_url = 'http://www.yngp.com/bulletin.do?method=toMessagesMore'
excel_url = 'https://www.cqggzy.com/jyxx/jyxx-page.html'
excel_url = 'http://www.qhggzyjy.gov.cn/ggzy/jyxx/001001/001001001/secondPage.html'
#excel_url = 'http://ggzy.huaian.gov.cn/EpointWeb/showinfo/jyxxsearch.aspx'
analyse(excel_url)

#iframe引入的列表获取不到
excel_url = 'http://ggzy.huaian.gov.cn/EpointWeb/showinfo/jyxxsearch.aspx'

#excel_url = 'http://www.yngp.com/bulletin.do?method=toMessagesMore'

#excel_url = 'http://zbtb.gd.gov.cn/login'

excel_url = 'https://www.cqggzy.com/jyxx/jyxx-page.html'
#getDefferentHtml(excel_url)

       


