import requests
from lxml import etree
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from util.log_handler import LogHandler
import re
import os

# 获取文件名
def get_filename(path):
	filename = os.path.basename(path)
	filename = filename.split('.')[0]
	return filename

# 获取当前执行程序的文件名
def get_execute_filename():
	return get_filename(__file__)

log = LogHandler(get_execute_filename())

# 检查文件夹是否存在
def makedir(path):
    if not os.path.exists(path):
        os.makedirs(path)

# 读取所有文章url和标题
def read_url_and_title(path):
    f = open(path, 'r')
    list_content = f.readlines()
    return list_content

# 保存文章为.txt
def save_article(title, article):
    global num
    num += 1
    
    file_path = './article/' + title + '.txt'
    file_dir = file_path.split('/')[:-1]
    file_dir = '/'.join(file_dir)
    makedir(file_dir)
    print(num, title, file_path)
    with open(file_path, 'w') as f:
        f.write(article)

# 获取文章内容
def selenuim_get_article(driver, title, url):
    driver.get(url)
    print('休息0.1秒')
    time.sleep(0.1)
    # 因为文章类型实在是太多,为了尽可能匹配到更多的文章,所以这边写了很多的if语句......
    list_article = driver.find_elements_by_xpath('//div[@class="news_text"]/div[@class="TRS_Editor"]/p')
    if not list_article:
        list_article = driver.find_elements_by_xpath('//div[@class="news_text"]/p')
        if not list_article:
            list_article = driver.find_elements_by_xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsDetails"]/div[@class="con"]/div[@class="TRS_Editor"]/p')
            if not list_article:
                list_article = driver.find_elements_by_xpath('//div[@class="TRS_Editor"]')
                if not list_article:
                    list_article = driver.find_elements_by_xpath('//div[@class="TRS_Editor"]//p')
                    if not list_article:
                        list_article = driver.find_elements_by_xpath('//div[@class="news_text"]/p/span')
                        if not list_article:
                            list_article = driver.find_elements_by_xpath('//div[@class="news_text"]//div/p/span')
                            if not list_article:
                                list_article = driver.find_elements_by_xpath('//div[@class="con"]//p')
                                if not list_article:
                                    list_article = driver.find_elements_by_xpath('//div[@class="con"]//div')
                                    if not list_article:
                                        list_article = driver.find_elements_by_xpath('//div[@class="con"]//p/span')
                                        if not list_article:
                                            list_article = driver.find_elements_by_xpath('//p[@align="justify"]')
                                            if not list_article:
                                                list_article = driver.find_elements_by_xpath('//div[@class="con"]')
                                                if not list_article:
                                                    list_article = driver.find_elements_by_xpath('//div[@class="Custom_UnionStyle"]')
    content = ''
    for article in list_article:
        content += article.text
    if not content:
        log.error("文章内容为空: %s,%s"%(url, title))
    save_article(title, content)

ops = Options()
# ops.add_argument('--proxy-server=http://112.87.69.76:9999')
driver = webdriver.Chrome(executable_path='/home/ubuntu/桌面/ljz/dianping/chromedriver',chrome_options=ops)
headers = {
    'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
}

driver.get('http://www.cma.gov.cn/kppd/')
# 对爬取的文章进行计数
num = 0
list_content = read_url_and_title('url_and_title.csv')
for content in list_content:
    title = content.split(',')[0]
    url = content.split(',')[-1][:-1]
    selenuim_get_article(driver, title, url)

    # 没有抓取到内容的文章:
    # http://www.cma.gov.cn/kppd/kppdmsgd/201405/t20140519_246700.html
