#!/usr/bin/env py8thon
#-*- coding:utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
import socket
import cookielib
import urllib

url_prev = 'http://www.17k.com'
baseurl = "http://www.17k.com/list/493239.html"
#伪装浏览器，以免被封
req_header = {'User-Agent':r'Mozilla/5.0 (windowsMozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/34.0.1847.116 Chrome/34.0.1847.116 Safari/537.36'}
req_timeout = 20
cj = cookielib.LWPCookieJar()
cookie_support = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
#urllib2.install_opener(opener)
postdata = {
        'supt':1,
        'userReadSet':1,
        'gotoUrl':'%2Fnovel%2F211993%2F1.html',
        'lately_read_list':'211993_1_001%A3%BA%B7%EF%BC%D2%C6%DF%D0%A1%BD%E3',
        'xudu':'%7B%22211993%22%3A%22944%22%7D',
        'sid':'hidhl35spacei6vkfblnjqaug0',
        'ksid':'hidhl35spacei6vkfblnjqaug0',
        'Hm_lvt_4990b6a4b440d196c146332313d4cf31':'1397907904,1397919883,1397920226',
        'Hm_lpvt_4990b6a4b440d196c146332313d4cf31':'1397920528'
        }
def user_agent(url):
    print url
    try:
#        global postdata
#        data = urllib.urlencode(postdata)
        req = urllib2.Request(url=url, data=None, headers=req_header)
        page = urllib2.urlopen(req,None, req_timeout)
        print page.geturl() 
        html = page
    except urllib2.URLError, e:
        print e.message
    except socket.timeout, e:
        user_agent(url)
    return html

total = 0
def page_loop(url):
    url = ''.join([url_prev,url])
    page = user_agent(url)
    soup = BeautifulSoup(page)
    article_title = soup.select('.readAreaBox h1')
    if not article_title return
    print article_title
    str_title = article_title[0].get_text()
    title = str_title.encode('utf-8')
    article_p = soup.select('#chapterContent') 
    #print article_p
    lines = []
    lines.append(title+'\n')
    for p in article_p:
        con_lines = str(p).split('<br/><br/>')
        print len(con_lines)
        lines.append(con_lines[0].replace('<div class="p" id="chapterContent">','')+'\n')
        for con_line in con_lines[1:len(con_lines)-1]:
            lines.append(con_line+'\n')

    with open('/home/web5/xiaoshuo1/'+str_title+'.txt','wb') as code:
        code.writelines(lines)
    

def getTotalPage():
    page = user_agent(baseurl)
    soup = BeautifulSoup(page)
    a_title = soup.find_all('div',class_='con')
    titles = []
    ch_titles = []
    for a in a_title[1:len(a_title)]:
        ch_titles_a = a.find_all('a')
        for title in ch_titles_a:
            href = title.get('href')
            titles.append(href)
    return titles    
titles = getTotalPage()
#print  titles
#a = 0
for cha in titles:
#    if a < 1 : 
        page_loop(cha)
#        a += 1
