#!/usr/bin/python
# encoding: utf-8
import requests
import threading
from lxml import etree
import sys
import os
import datetime
import re
import random
import time

reload(sys)

sys.setdefaultencoding('utf-8')




def getNewUrlList(zhi):
    global url
    header ={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'}
    count=1
    dic={}
    dic['name']=[]
    

    while (count < zhi):

        url="http://laravelacademy.org/?page="
        url=url+str(count)
        # print(url)
        response=requests.get(url,headers=header)
        html=response.content.decode("utf8")
        selector=etree.HTML(html)
        contents = selector.xpath('//main[@id="main"]/article')

        for eachlink in contents:

            
                url = eachlink.xpath('header/h2/a/@href')[0]
                title = eachlink.xpath('header/h2/a/text()')[0]
            
                dic['name'].append([title,url])
            

        # return list2        

        count = count + 1
    return dic


# def getNewContent(urlList):
#     for item in urlList:



def test(posturl):
    now_time = datetime.datetime.now()
    time1=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')

    dir_name=time1
    is_zai=os.path.exists('./file/'+str(dir_name))
   
    if is_zai:
        print("在的")
    else:
        print("不在")
        os.mkdir('./file/'+str(dir_name))

    for item in posturl:
        # print(item[0])
        # print(item[1])

        header ={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'}
        url=item[1]
        response=requests.get(url,headers=header)
        html=response.content.decode("utf8")
        selector=etree.HTML(html)
        # contents = selector.xpath('//div[@class="row"]/div[@class="col-xs-12 col-md-12 col-lg-12 b-content-word"]/div[@class="js-content"]')
        # print(selector)
        bloger = selector.xpath("//div[@class='entry-content']")
        # print(bloger)
        context=bloger[0].xpath('string(.)').strip()
        print(context)
        
        t = time.time()
        three=int(t)
        one=random.randint(100000,900000)
        two=random.randint(100000,900000)
        last=str(one)+str(two)+str(three)
        txtName = "./file/"+str(dir_name)+"/"+str(last)+".txt"
        f=file(txtName, "a+")
        f.write(context)
        f.close()


def validateTitle(title):
    rstr = r"[\/\\\:\*\?\"\<\>\|\、\.]"  # '/ \ : * ? " < > |'
    new_title = re.sub(rstr, "", title)  # 替换为下划线
    return new_title
# def go(i):
#     print("第"+str(i)+"条阿尔法狗已出击!")
#     urlList= getNewUrlList(16)
#     # info=test(urlList['name'])

if __name__=="__main__":
    info= getNewUrlList(94);  
    test(info['name']) 
   

 