#coding:utf8

import requests
import lxml

import re
import os
from bs4 import BeautifulSoup as bs
from collections import OrderedDict

from urllib.request import urlopen
import json
import io
import sys

"""爬取学校"""
def schools():
    try:
        for i in range(2, 33, 1):
            url = 'https://gaokao.chsi.com.cn/gkxx/zszcgd/dnzszc/201706/20170615/1611254988-%s.html'%(str(i))

            r = requests.get(url = url)

            soup = bs(r.content, "lxml")

            names = soup.find_all(name = 'td', attrs = {'colspan':'7'})

            if names == []:
                filename = soup.find_all(name = 'td', attrs = {'colspan':'6'})[0].string
            else:
                filename = names[0].string
            dir = os.getcwd()
            f = open("%s/schools/%s.txt"%(dir, filename), 'w')
            content = soup.find_all(name = 'tr', attrs = {'height':'29'})
            for content1 in content:
                try:
                    soup_content = bs(str(content1), 'lxml')
                    soup_content1 = soup_content.find_all(name = 'td')
                    f.write(soup_content1[1].string+"\n")

                except IndexError:
                    pass
    except IndexError:
        pass

def getInfo(page):
    print(sys.path.append('..'))
    exit()
    conn = pymongo.MongoClient('localhost', 27017)
    db = conn.mydb
    my_db = db.Test

    for i in range(1, page+1):
        url = "http://www.chictr.org.cn/searchproj.aspx?title=&officialname=&subjectid=&secondaryid=&applier=&studyleader=&ethicalcommitteesanction=&sponsor=&studyailment=&studyailmentcode=&studytype=0&studystage=0&studydesign=0&minstudyexecutetime=&maxstudyexecutetime=&recruitmentstatus=0&gender=0&agreetosign=&secsponsor=&regno=&regstatus=0&country=&province=&city=&institution=&institutionlevel=&measure=&intercode=&sourceofspends=&createyear=0&isuploadrf=&whetherpublic=&btngo=btn&verifycode=&page=%s"%(str(i))
        # sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')#解决不能在window编码问题
        html = urlopen(url).read()
        soup = bs(html, "html.parser")

        p_tags = soup.select("table[class='table_list'] p")

        rep = re.compile(r'\n|&nbsp|\xa0|\\xa0|\u3000|\\u3000|\\u0020|\u0020|\t|\r|\n\r|:')
        parent = []
        for p_tag in p_tags:
            if p_tag.find('a'):
                child = OrderedDict()
                href = "http://www.chictr.org.cn/" + p_tag.find('a').get('href')
                child['注册题目'] = p_tag.find('a').string
                # child['链接'] = href
                #通过详情链接查找详情获取信息
                html = urlopen(href).read()
                detail = bs(html, "html.parser").select('div[class="ProjetInfo_ms"] tr')[0:2]
                for tr in detail:
                    td_tags = tr.find_all('td')
                    for i in range(len(td_tags)):
                        if i%2 == 0:
                            key = rep.sub('',td_tags[i].get_text())
                        else:
                            val = rep.sub('',td_tags[i].get_text())
                            if val == '':
                                continue
                            else:
                                child[key] = val
                                # print("key:{0}, val:{1}\n".format(key, val))
                parent.append(child)
        my_db.insert(parent)

        jsons = json.dumps(parent)

        with io.open("jsons.json", "w", encoding = "utf-8") as f:
            f.write(jsons)
        print("恭喜你爬取成功！！！")
        exit()


if __name__ == '__main__':
    # schools()
    # getInfo(2)
    print(os.path)
