# coding:utf-8
import urllib.request

# 使用 lxml 的 etree 库
import redis
from lxml import etree
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import http.client
import requests

articles_urls=[]
def getPage():
    ssl._create_default_https_context = ssl._create_unverified_context
    url = "https://segmentfault.com/u/xiaomage_5a60560a90a98/articles"
    header = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "x-hit":"web2",

    }
    request = urllib.request.Request(url, headers=header)
    response = urllib.request.urlopen(request).read().decode("utf-8")
    #print(response)
    # 类文件对象支持 文件对象的操作方法，如read()方法读取文件全部内容，返回字符串
    # 利用etree.HTML，将字符串解析为HTML文档
    html = etree.HTML(response)
    trs = html.xpath("//ul[@class='pagination']")
    page=len(trs[0])-1
    return page

def getArticlesList(pageNumber):
    ssl._create_default_https_context = ssl._create_unverified_context
    url = "https://segmentfault.com/u/xiaomage_5a60560a90a98/articles?page="+str(pageNumber)
    print(url)
    header = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "x-hit":"web2",

    }
    request = urllib.request.Request(url, headers=header)
    response = urllib.request.urlopen(request).read().decode("utf-8")
    #print(response)
    # 类文件对象支持 文件对象的操作方法，如read()方法读取文件全部内容，返回字符串
    # 利用etree.HTML，将字符串解析为HTML文档
    html = etree.HTML(response)
    trs = html.xpath("//div[@class='col-md-10 profile-mine']/ul[@class='profile-mine__content']")
    page=len(trs[0])
    print(page)
    for item in trs[0]:
        articles_urls.append(item[0][1][0].get("href"))

def getDetail(xx):
    ssl._create_default_https_context = ssl._create_unverified_context
    url = "https://segmentfault.com"+xx
    print("访问网址",url)
    header = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
    request = urllib.request.Request(url, headers=header)
    reponse = urllib.request.urlopen(request).read()


index = 0

if __name__ == '__main__':
    page=getPage()
    print(page)
    for p in range(page):
        getArticlesList(p+1)

    while index < 10000:
        for url in articles_urls:
            getDetail(url)
        index += 1
        print("线程：%d，index%d" % (1, index))



