# coding=utf-8
__author__ = 'NDMY'
import urllib
import urllib2
from BeautifulSoup import BeautifulSoup
import re
class Spider:
    def __init__(self):
        self.Site_Url = ""
        self.Var = ""
        self.Count = 0
    def Get_Page(self):
        url = self.Site_Url + urllib.quote(self.Var.decode('gbk').encode("utf-8"))
        request = urllib2.Request(url)
        request.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1")
        response = urllib2.urlopen(request)
        return response.read()
    def Deal_Response(self,response):
        soap = BeautifulSoup(response,fromEncoding="gb18030")
        return soap



links = []


t = open("links", "r")
while True:
    line = t.readline()
    if line:
        print(line)
        links.append(str(line))
    else:
        break
t.close()
for link in links:
    title = []
    spider = Spider()
    spider.Site_Url =link
    soap = spider.Deal_Response(spider.Get_Page())
    final = soap.findAll("div",attrs={"name":"itemData"})
    final2 = soap.findAll("a",attrs={"name":"itemTitle"})
    final_t = soap.findAll("p",attrs={"class":"visa_main_heading"})
    f = open(r"./doc/"+str(final_t[0].string.encode('gbk').replace('/', ''))+".txt",'a')
    for each2 in final2:
        title.append(each2.string)
    count = 0
    for each in final:
        dr = re.compile(r'<[^>]+>',re.S)
        dd = dr.sub('',str(each))
        dds = dd.split("\n\n\n")
        f.write("\n\n"+str(title[count])+str(dds[0]))
        print(type(dds))
        count += 1
    f.close()
