import requests
#导入json用于将网页返回数据转为人能看懂的json格式
import json
import re
from bs4 import BeautifulSoup
from urllib import parse
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import jieba

from sklearn.cluster import KMeans

 
#临时存放url的列表，作为待爬列表，通过队列FIFO特性遍历url
tmplist = []
#存放爬取到的所有url
urlList = []
#同为深度字典
depthDict = {}
patternDomain = re.compile(
    r'^(http://|https://)(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|'
    r'([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|'
    r'([a-zA-Z0-9][-_.a-zA-Z0-9]{0,61}[a-zA-Z0-9]))\.'
    r'([a-zA-Z]{2,13}|[a-zA-Z0-9-]{2,30}.[a-zA-Z]{2,3})\/*(\/?)$'
)
#查找上级节点关键字
parent_node_words = ["p","div","table","ul"]
# 查找翻页关键词
next_page_words = "[后一页|下一页|下页|next page]"

# 用于拆分url地址中的连字符
stop_words = ['/','.','_',' ']

# 判断无效scheme格式
illegal_scheme_words = ['javascript']

# 判断无效链接地址
illegal_href_words = ['###','javascript:;','javacript:;']

# 判断疑似列表页关键字
listKeyWords = "[后一页|下一页|下页|next page]"

# 判断疑似详情页标题关键字
title_node_words = ['h1','h2','h3','h4','h5','h6']

# 判断疑似详情页内容关键字
content_node_words = ['div','p','a']

# 数据类型
dataTypes = ['int','string']

# 正则识别uuid
regUUID = "(\\w{8}(-\\w{4}){3}-\\w{12}?)"

# 正则识别md5
regMd5 = "[a-fA-F0-9]{16,32}"

# 正则识别md5
# regExt = "[shtml|html|php|jsp|asp|aspx|do|go]"
regExt = "(html|shtml)"

# 递归逐字符判断字符串数据格式组成
# 如果字符串为MD5一类特殊字符串则优先识别
# 默认按照字符串数字组合进行识别
def getRegFromUrlPiece(value):
    if(re.match(regUUID,value)):
        return regUUID
    if(re.match(regMd5,value)):
        return regMd5
    if(is_number(value)):
        return '\d{'+str(len(value))+'}'

    if(re.match(regExt,value)):
        return '.'+value
    
    if(isinstance(value,str)):
        return '\S{'+str(len(value))+'}'
    return False

# url拆开分析
def spliUrlIntoPiece(pathStr):
    
    tempSplit =[k for k in jieba.lcut(pathStr,cut_all=False) if k not in stop_words]
    
    return tempSplit

# 输入关键字和数据格式组提取生成正则匹配规则
# 优先提取固定关键字
# 识别可变数值
def extractRegRuls(links):
    regRule = ''
    splitPaths = []
    extractRuleFromPiece = {}
    for link in links:
        parsedUrl = urlparse(link)
        path = parsedUrl.path
#         splitPaths.append(getDataFormat(path))
        splitPaths.append(spliUrlIntoPiece(path))

    
    for splitPath in splitPaths:
        i = 0
        while(i < len(splitPath)):
            if (str(i) not in extractRuleFromPiece):
                extractRuleFromPiece[str(i)] = []
                
            if (splitPath[i] not in extractRuleFromPiece[str(i)]):
               extractRuleFromPiece[str(i)].append(splitPath[i]) 
            i = i +1
        
    for e in extractRuleFromPiece:
        for urlpiece in extractRuleFromPiece[e]:
            if(len(extractRuleFromPiece[e])>1):
                regPiece = '/' + getRegFromUrlPiece(urlpiece)
            else:
                if(re.match(regExt,urlpiece)):
                    urlpiece = '.' + urlpiece
                regPiece = '/' + urlpiece
        regRule = regRule + regPiece
    
    return regRule

# 判断是否为详情页，用于检查所在链接数组是否为有效可识别列表
def is_detail_page(urlDetail):
    responseDetail=requests.get(url=urlDetail,headers=headers,verify=False)
    soupDetail = BeautifulSoup(responseDetail.content)
    titles = soupDetail.findAll(title_node_words)
#     print('title counts:%d'%(len(titles)))
    if(len(titles) > 0):
        nodeCounts = []
        listNodes = []
        sortedList = {}
        
        for c in soupDetail.findAll(content_node_words):
            
            if((len(c.find_all(content_node_words)) < 1) and (len(c.text) > 100)):
                nodeCount = {}
                c_parent = c.find_parent(parent_node_words)
                if(c_parent != None):
                    b = c_parent.find_all(content_node_words)
                    for nodeCount in nodeCounts:
                        if (str(nodeCount['parent_count']) not in sortedList) and (nodeCount['parent_count'] > 1):
                            sortedList[str(nodeCount['parent_count'])] = []
                        if((nodeCount['parent_count'] == len(b)) and (nodeCount['parentNode'] == c.find_parent(parent_node_words)) and (len(b) > 1)):
                            listNodes.append(c)
                            # TODO：此处需要另行判断scheme值
                            if c.text not in sortedList[str(nodeCount['parent_count'])]:
                                sortedList[str(nodeCount['parent_count'])].append(c.text);

                    nodeCount['parent_count'] = len(b)
                    nodeCount['parentNode'] = c.find_parent(parent_node_words)
                    nodeCounts.append(nodeCount)
        for t in sortedList:
            contentLength = 0
            for content in sortedList[t]:
                contentLength = contentLength + len(content)
                if(contentLength>100):
                    return True
    else:
        return False
 
    return False

def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        pass
 
    try:
        import unicodedata
        unicodedata.numeric(s)
        return True
    except (TypeError, ValueError):
        pass
 
    return False
#声明爬虫访问网站时所使用的浏览器身份
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36'}
 
# #声明需要爬取的url
url='http://www.cau.edu.cn/col/col10353/index.html'
# url='http://ggzyjy.abazhou.gov.cn/ggzyjyzx/c106701/common_list_2.shtml'
# url='http://www.zmzb.com/cms/channel/ywgg1hw/index.htm'

# 目前无法爬取
# url='http://119.164.253.173:8080/jngp2016/site/listInfo2.jsp?colid=121'
# url='http://ggzy.yn.gov.cn/jyxx/jsgcZbgg'
# 正则算错
# url='http://www.sxggzyjy.cn/jydt/001001/001001001/001001001001/2.html'
# no list 
# url='http://www.ccgp-anhui.gov.cn/cmsNewsController/getCgggNewsList.do?pageNum=2&numPerPage=20&title=&buyer_name=&agent_name=&proj_code=&bid_type=&type=&dist_code=&pubDateStart=&pubDateEnd=&pProviceCode=&areacode_city=&areacode_dist=&channelCode=cggg&three='


# LocationParseError: Failed to parse: 'www.gzebpubservice.cn..', label empty or too long
# url='http://www.gzebpubservice.cn/fjzbgg/index.htm'

 
#传递给服务器的请求参数
data={
    }

urlParseRsult = urlparse(url)
parsedUrls = []
 
#调用requests.get方法访问目标网址
response=requests.get(url=url,headers=headers,data=data)
soup = BeautifulSoup(response.content)
nodeCounts = []
listNodes = []
pageNodes = []
# sortedNodes = p
sortedList = {}
comfirmedLinks = []
    
for a in soup.findAll('a',href=True):
#     if re.findall('sharejs', a['href']):
#     print("Found the URL:", a['href'])
    nodeCount = {}
    parseRsult = urlparse(a['href'])
    arr = {}
    arr = {'scheme':parseRsult.scheme,'netloc': parseRsult.netloc,'path': parseRsult.path,'params': parseRsult.params,'query': parseRsult.query}
    if(len(parseRsult.netloc)<=0 and parseRsult.scheme not in illegal_scheme_words):
        arr['netloc'] = urlParseRsult.netloc
    if((parseRsult.scheme not in illegal_scheme_words) and (a['href'] not in illegal_href_words)):
        parsedUrls.append(arr)
        
        a_parent = a.find_parent(parent_node_words)
        if(a_parent != None):
            b = a_parent.find_all("a")
        for nodeCount in nodeCounts:
            if (str(nodeCount['parent_count']) not in sortedList) and (nodeCount['parent_count'] > 1):
                sortedList[str(nodeCount['parent_count'])] = []
            if((nodeCount['parent_count'] == len(b)) and (nodeCount['parentNode'] == a.find_parent(parent_node_words)) and (len(b) > 1)):
                listNodes.append(a)
                # TODO：此处需要另行判断scheme值
                if ((urlParseRsult.scheme+'://'+arr['netloc']+arr['path']+arr['params'] not in sortedList[str(nodeCount['parent_count'])]) and (len(arr['path'])>1)):
                    sortedList[str(nodeCount['parent_count'])].append(urlParseRsult.scheme+'://'+arr['netloc']+arr['path']+arr['params']);
        
        nodeCount['parent_count'] = len(b)
        nodeCount['parentNode'] = a.find_parent(parent_node_words)
        nodeCounts.append(nodeCount)
    
    
for sortedLinks in sortedList:
    for sortedLink in sortedList[sortedLinks]:
        if is_detail_page(sortedLink):
            comfirmedLinks = sortedList[sortedLinks]
            break
    
    
print("comfirmedLinks:")
print(comfirmedLinks)


nodeCounts = []
listNodes = []
sortedList = {}
#开始分析分页节点
#TODO: 此处需继续调试
#TODO：需要获取分页的所有节点、获取分页节点中的最大值和最小值、获取当前页所在值、分析分页模式（a链接方式、js函数跳转方式，ajax请求方式）
pageNodes = soup.find_all("a",string = re.compile(next_page_words))
maxPage = 0
minPage = 0
currentPage = 0
paginationType = 'link' # link，function，ajax
for pageNode in pageNodes:
    p_parent = pageNode.find_parent(parent_node_words)
    if(p_parent != None):
        b = p_parent.find_all(content_node_words)
    for nodeCount in nodeCounts:
        if (str(nodeCount['parent_count']) not in sortedList) and (nodeCount['parent_count'] > 1):
            sortedList[str(nodeCount['parent_count'])] = []
        if((nodeCount['parent_count'] == len(b)) and (nodeCount['parentNode'] == a.find_parent(parent_node_words)) and (len(b) > 1)):
            listNodes.append(a)
            # TODO：此处需要另行判断scheme值
            if ((urlParseRsult.scheme+arr['netloc']+arr['path']+arr['params'] not in sortedList[str(nodeCount['parent_count'])]) and (len(arr['path'])>1)):
                sortedList[str(nodeCount['parent_count'])].append(urlParseRsult.scheme+'://'+arr['netloc']+arr['path']+arr['params']);

    nodeCount['parent_count'] = len(b)
    nodeCount['parentNode'] = a.find_parent(parent_node_words)
    nodeCounts.append(nodeCount)

if (len(comfirmedLinks) <1):
    print('no list detected')
    
# print(pageNodes)
        

regRule = extractRegRuls(comfirmedLinks)

print("regRule:")
print(regRule)
# print("/ggzyjyzx/c106701/\d{6}/\s{16,32}.shtml'")
