import requests
from lxml import etree
import re
import time
import os
# from util.log_handler import LogHandler

# 获取文件名
def get_filename(path):
	filename = os.path.basename(path)
	filename = filename.split('.')[0]
	return filename

# 获取当前执行程序的文件名
def get_execute_filename():
	return get_filename(__file__)

# log = LogHandler(get_execute_filename())

base_url = 'http://www.cma.gov.cn/kppd/'
'''
# 气象视界
    http://www.cma.gov.cn/kppd/kppdqxsj/kppdtqqh/ # 天气气候
    http://www.cma.gov.cn/kppd/kppdqxsj/kppdqxgc/ # 观测何预报
    http://www.cma.gov.cn/kppd/kppdqxsj/kppdqhbh/ # 气候变化
    http://www.cma.gov.cn/kppd/kppdqxsj/kppdrgyxtq/ # 人工影响天气
    http://www.cma.gov.cn/kppd/kppdqxsj/kppdldfh/ # 雷电防护
    http://www.cma.gov.cn/kppd/kppdqxsj/kppdhwsm/ # 气象观测
# 名士观点
http://www.cma.gov.cn/kppd/kppdmsgd/
# 气象与人
    http://www.cma.gov.cn/kppd/kppdqxyr/kppdnyqx/ # 农业气候
    http://www.cma.gov.cn/kppd/kppdqxyr/kppdshqx/ # 生活气象
    http://www.cma.gov.cn/kppd/kppdqxyr/kppdtyqx/ # 体育气象
    http://www.cma.gov.cn/kppd/kppdqxyr/kppdjtqx/ # 交通气象
    http://www.cma.gov.cn/kppd/kppdqxyr/kppdjsqx/ # 生态气象
    http://www.cma.gov.cn/kppd/kppdqxyr/kppdxyqx/ # 校园气象
# 科技之光
http://www.cma.gov.cn/kppd/kppdkjzg/
# 奇文轶事
http://www.cma.gov.cn/kppd/kppdqxwq/kppdqwys/ 
# 科普动态
http://www.cma.gov.cn/kppd/kppdkpdt/ 
'''
list_path = [
            'kppdqxsj/kppdhwsm/{}', 'kppdmsgd/{}', 'kppdqxyr/kppdnyqx/{}', 'kppdqxyr/kppdshqx/{}', 'kppdqxyr/kppdtyqx/{}', 
            'kppdqxsj/kppdtqqh/{}', '/kppdqxsj/kppdqxgc/{}', 'kppdqxsj/kppdqhbh/{}', 'kppdqxsj/kppdrgyxtq/{}', 'kppdqxsj/kppdldfh/{}',
            'kppdqxyr/kppdjtqx/{}', 'kppdqxyr/kppdjsqx/{}', 'kppdqxyr/kppdxyqx/{}', 'kppdkjzg/{}', 'kppdqxwq/kppdqwys/{}', 'kppdkpdt/{}'
]

headers = {
    'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
}

# 获取最大页数
def get_max_number(url):
    response = requests.get(url=url, headers=headers)
    response.encoding='utf8'
    # xpath匹配不到,页数由js代码生成
    # html = etree.HTML(response.text)
    # max_number_url = html.xpath('//*[@id="content"]/text()')
    pattern = 'createPageHTML\((\d{1,3})'
    max_number = re.search(pattern, response.text)
    return int(max_number.group(1))
    # print(max_number.group(1))

def save_url(url, title):
    with open('url_list.csv', 'a') as f:
        content = url + ',' + title + '\n'
        f.write(content)

def save_info(info):
    with open('info.csv', 'a') as f:
        f.write(info)

# 获取文章url列表
def get_list_url(url):
    response = requests.get(url=url, headers=headers)
    response.encoding='utf8'
    time.sleep(2)
    html = etree.HTML(response.text)
    pattern = '<a href="(.*?\.html)"'
    list_url = re.findall(pattern, response.text)
    list_title = re.findall(pattern, response.text)
    # list_url = html.xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsList"]/ul/li/a/@href')
    # list_title = html.xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsList"]/ul/li/a/text()')
    # print(list_url)
    return (list_url, list_title)

# 获取文章内容
def get_article(url, title):
    # print('获取文章:', url)
    save_url(url, title)
    # response = requests.get(url=url, headers=headers)
    # response.encoding='utf8'
    # html = etree.HTML(response.text)
    # # 文章内容有3种
    # list_article = html.xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsDetails"]/div[@class="con"]/div[@class="TRS_Editor"]/p/text()')
    # if not list_article:
    #     list_article = html.xpath('//div[@class="TRS_Editor"]/p/text()')
    #     if not list_article:
    #         list_article = html.xpath('//div[@class="TRS_Editor"]/text()')
    # # 一篇文章由多个p标签组成,连接这些p标签的内容
    # article = ''.join(list_article)
    # if not article:
    #     log.error("文章内容为空: %s,%s"%(url, title))
    # save_article(title, article)

# 检查文件夹是否存在
def makedir(path):
    if not os.path.exists(path):
        os.makedirs(path)

# 保存文章为.txt文件
def save_article(title, content):
    now_path = os.getcwd()
    relative_path = 'app/qixiangkepu/articles'
    path = os.path.join(now_path, relative_path)
    makedir(path)
    file_path = os.path.join(path, title+'.txt')
    # print(file_path)
    with open(file_path, 'w') as f:
        f.write(content)

for path_name in list_path:
    url = base_url + path_name
    max_num = get_max_number(url.format('index.html'))
    info = '最大页数:' + str(max_num)
    # save_info(info)
    # try:
    for num in range(max_num):
        info = '当前页数:' + str(num)
        # save_info(info)
        if num != 0:
            list_url, list_title = get_list_url(url.format('index_'+str(num)+'.html'))
            for i in range(len(list_url)):
                info = 'url:' + list_url[i] + str(i) + ','
                # save_info(info)
                if not list_url[i].startswith('http'):
                    if not list_url[i].startswith('/'):
                        # get_article(url.format(list_url[i]), list_title[i])
                        print(url.format(list_url[i]), list_title[i])
                    else:
                        # 第三种文章 
                        url = 'http://www.cma.gov.cn' + list_url[i]
                        # get_article(url, list_title[i])
                        print(url, list_title[i])
                else:
                    # 第二种文章 http://www.cma.gov.cn/2011xzt/20160518/20190809/
                    # get_article(list_url[i], list_title[i])
                    print(list_url[i], list_title[i])
        else:
            list_url, list_title = get_list_url(url.format('index.html'))
            for i in range(len(list_url)):
                if not list_url[i].startswith('http'):
                    if not list_url[i].startswith('/'):
                        # get_article(url.format(list_url[i]), list_title[i])
                        print(url.format(list_url[i]), list_title[i])
                    else:
                        url = 'http://www.cma.gov.cn' + list_url[i]
                        # get_article(url, list_title[i])
                        print(url, list_title[i])
                else:
                    # 第二种文章 http://www.cma.gov.cn/2011xzt/20160518/20190809/
                    # get_article(list_url[i], list_title[i])
                    print(list_url[i], list_title[i])
        info = '\n'
        # save_info(info)
    # except Exception as e:
    #     log.error("错误消息: %s"%e, exc_info=True)

# with open('qixiangkepu.html', 'wb') as f:
#     f.write(response.content)