# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
import os.path
from crawler import config

path = config.generalConfig.output_path
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}


def file_extension(path):
  return os.path.splitext(path)[1]


def mkdir(path):
    # 引入模块
    # import os

    # 去除首位空格
    path = path.strip()
    # 去除尾部 \ 符号
    path = path.rstrip("\\")

    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists = os.path.exists(path)

    # 判断结果
    if not isExists:
        # 如果不存在则创建目录
        # 创建目录操作函数
        os.makedirs(path)

        print
        path + ' 创建成功'
        return True
    else:
        # 如果目录存在则不创建，并提示目录已存在
        print
        path + ' 目录已存在'
        return False


def path_manage(path):
    pos = path.find('|')
    return path[:pos]


def date_manage(tmp_date):
    tmp_date = tmp_date.replace('发表于: ', '')
    pos = tmp_date.find(" ")
    return tmp_date[:pos]


def path_clean(path):
    path = path.replace('?', '')
    return path


def get_title(url):

    source_code = requests.get(url, headers=headers)
    source_code.encoding = 'gbk'
    plain_text = source_code.text
    Soup = BeautifulSoup(plain_text, 'lxml')
    title = Soup.find('div', {'class': 'title'}).text
    title = title.replace("\n", "").replace("\r", "")
    info = Soup.find('div', {'class': 'info'})
    up_date = info.text[info.text.find("时间：")+3:]
    up_date = up_date.replace(" ", "")
    return up_date + title


def get_links(url):
    source_code = requests.get(url, headers=headers)
    source_code.encoding = 'gbk'
    plain_text = source_code.text
    Soup = BeautifulSoup(plain_text, 'lxml')
    prefix = url[:url.rfind("/")]
    # print(prefix)
    link_element = Soup.find('div', {'class': 'cupage'})
    links_e = link_element.select('a')
    links =[]
    links.append(url)
    for i in range(len(links_e) - 1):
        links.append(prefix + '/' + links_e[i].get('href'))

    return links


def img_download(links,title):
    download_links = []
    prefix = 'http://www.cecet.cn'
    for link in links:
        source_code = requests.get(link, headers=headers)
        source_code.encoding = 'gbk'
        plain_text = source_code.text
        soup = BeautifulSoup(plain_text,  'lxml')
        content = soup.find('div', {'class': 'content'})
        images = content.find_all('img')
        for image in images:
            download_links.append(prefix + image.get('src'))

    # date = date_manage(date.text)
    # for pic_tag in Soup.select(".f14 img"):
    #     pic_link = pic_tag.get('src')
    #     download_links.append(pic_link)
    #
    i = 1
    for item in download_links:
        res = requests.get(item)
        print(item)
        tmp_dir = path + title + "/"

        tmp_dir = path_clean(tmp_dir)
        tmp_dir = tmp_dir.replace("<", "").replace(">", "")
        print(tmp_dir)
        mkdir(tmp_dir)
        with open(tmp_dir + str(i).zfill(3) + file_extension(item), 'wb') as f:
            f.write(res.content)
        i = i + 1


def read_urls():
    file = open(config.generalConfig.feed_file)
    lines = file.readlines()

    strr = ''.join(lines)

    urls = strr.split( )
    print(len(urls))
    return urls


def main():
    urls = read_urls()
    for url in urls:
        # img_download(url)
        print(url)
        title = get_title(url)
        links = get_links(url)
        img_download(links, title)
    print('下载完成')


if __name__ == "__main__":
    main()

