# -*- coding:utf-8 -*-

# 学术点评网 （xueshu.com.cn）爬取

import requests as rq
from os import mkdir,lexists,path
import bs4

class Scrapy :

    def __init__(self, workspace):

        self.work_path = workspace
        self.notype_path = path.join(self.work_path,'notype')
        if not path.lexists(work_path):
            mkdir(work_path)
            mkdir(notype_path)
        elif not path.lexists(notype_path):
            mkdir(notype_path)


        self.open_url = []
        self.close_url = []
        self.error_list = []

        pass


    def downloader(self, url):

        try:
            res = rq.get(url = url)
        except:
            res = None
            self.error_list.append(url)
        else:
            pass

        self.close_url.append(url)
        return res
        pass


    def analysis(self, res):

        if not res:
            return

        # 粗糙的url解析函数，一般地：ls[-1] 为文件名，ls[1] 为域名
        def _analysis_url_path(url):
            return [ not_space_part for not_space_part in url.split('/') if not_space_part]


        url_load = _analysis_url_path(res.url)
        flie_path = path.join(self.notype_path,url_load[-1])
        content_type = res.headers.get('Content-Type',None)


        if not content_type:
            with open(flie_path, 'wb', encoding = 'utf-8') as f:
                f.write(res.content)

        elif 'text/html' in content_type:
            web = bs4.BeautifulSoup(res.content)

            # 搜索结果页特征 .box-tab  标签
            if web.select('.box-tab'):

                tab_list = web.select('.item-title')

                for tab in tab_list:
                    url = path.join(url_load[1],tab.a['href'])
                    self.open_url.append(url)
                pass
            else:
                select ='.' + ('fanwen-box' if 'fanwen-box' in res.text else 'qkwz-info-box')
                text_box = web.select(select)[0]

                if select == '.fanwen-box':
                    title = 




            pass
        elif 'application/x-download' in content_type:
            pass


    def run(self, start_url):

        self.open_url.append(start_url)

        while self.open_url:

            res = self.downloader(self.open_url.pop())
            self.analysis(res = res)



def main():

    download_dir = '学术点评网-爬虫-保存'
    # notype_dir = path.join(download_dir,'notype')

    # app = Scrapy(download_dir)




if __name__ == '__main__':

    main()