import scrapy
from guoxueContenter.items import GuoxuecontenterItem
import re
import pymysql
from  time import sleep
from bs4 import BeautifulSoup as soup
class GuoxuespideSpider(scrapy.Spider):
    conn=None
    cursor=None
    name = 'guoxueSpide'
    allowed_domains = ['guoxue.shufaji.com']
    start_urls = ['http://guoxue.shufaji.com/',]
    scend_urls = []

    def parse(self, response):
        item =GuoxuecontenterItem()
       
        meta={}
        # #获取所有包含文章的表
        # table_lists = response.xpath("//html/body/center[1]/table/tr[2]/td/table[@class='tbC']")
        # for table in table_lists:
        #     #获取所有包含文章的tr
        #     tr_lsits = table.xpath(".//tr")
        #     #获取tr下的td
        #     for tr in tr_lsits:
        #         td_list=tr.xpath(".//td")
        #         for td in td_list:
        #             if td.xpath(".//a"):
        #                 a_text = td.xpath(".//a/text()").extract()
        #                 a_url = td.xpath(".//a/@href").extract_first()
        #                 a_url = self.start_urls[0]+a_url
        #                 item['art_name'] = a_text
        #                 item["art_url"] = a_url
        #                 meta["item"]=item
        #                 yield scrapy.http.Request(url=a_url,callback=self.parse_detail,dont_filter=True,meta=meta)
        #             else:
        #                 continue
        self.con()
        try:
            self.cursor.execute("select srname from co_books_look")
            fechall = self.cursor.fetchall()
            # self.cursor.execute("select fileurl from co_books_look")
            # fechall2 = self.cursor.fetchall()
            # bk_pid = self.cursor.execute("select keywordid from co_books_look")
        except Exception as e:
            print("查询数据错误！>>",e)
        pat = '((<.*?>,),)'
        for bk_n in fechall:
            # 书名
            bk_name = ''.join(bk_n)#元组转为字符串
            #链接
            self.cursor.execute("select fileurl from co_books_look where srname='{}'".format(bk_name))
            bk_url = self.cursor.fetchall()[0]
            bk_url=''.join(bk_url)
            #keywordid
            # self.cursor.execute("select keywordid from co_books_look where srname='{}'".format(bk_name))
            # bk_id = self.cursor.fetchall()[0]
            # bk_id=list(bk_id)
            # bk_id = bk_id[0]
            # item['art_name'] = bk_name
            # item['art_pid'] = bk_id
            
         
            yield scrapy.http.Request(url=bk_url,callback=self.parse_detail,meta={"url":bk_url})


    def parse_detail(self,response):
        a_url = response.meta["url"]
        item = GuoxuecontenterItem()
        
        #设置起始页
        start1= response.xpath("//div[@class='con']")
        a_url_p = start1.xpath('.//p[1]/font[1]/text()').extract_first()
        #进入没有a标签的连接获取内容
        # if  not start1.xpath(".//div[@align = 'left']/a"):
        #     art_content = start1.xpath(".//div[@align = 'left']/text()").extract()
        #     item['art_content'] = art_content
        #     self.con()
        #     # print(a_url)
        #     try:
        #         self.cursor.execute("select keywordid from co_books_look where fileurl='{}'".format(a_url))
        #         kid = self.cursor.fetchall()
        #         kid=kid[0]
        #         kid=list(kid)[0]
        #         # print(kid)
        #         self.cursor.execute("select srname from co_books_look where fileurl='{}'".format(a_url))
        #         k_name = self.cursor.fetchall()
        #         k_name=k_name[0]
        #         k_name = ''.join(k_name)
        #         # print(k_name)
        #     except Exception as e:
        #         print("查询数据错误！>>",e)

        #     item["art_pid"] = kid
        #     item["art_name"] = k_name
        #    # print(item)
        #     yield item
        #进入有a标签的链接获取内容
        if  start1.xpath(".//div[@align = 'left']/a"):
            a_lists=start1.xpath(".//div[@align = 'left']/a")
            meta={}

            #得到下一级a标签
            #item["art_content"] = response.xpath(".//div[@class='align']/text()").extract_first()
            
            for a_list in a_lists:
                a_href = a_list.xpath(".//@href").extract_first()
                a_href = self.start_urls[0]+"国学/"+a_url_p+"/"+a_href 
                chapte = a_list.xpath(".//text()").extract()
                #item["art_chapte"] = a_list.xpath(".//a/text()").extract_first()
                
                yield scrapy.http.Request(url=a_href,callback=self.parse_detail_two,meta={'url':a_url,"chapte":chapte})
    def parse_detail_two(self,response):
            #not response.xpath("//div[@class='con']//p[@align='left']//a/text()").extract() 
        item = GuoxuecontenterItem()
        b_href = response.meta["url"]
        start2_chapte = response.meta["chapte"]
        start2_text=''
        if  not response.xpath("//div[@class='con']//p[@align='left']//a[-1]/text()").extract()  or  '../书名' not in response.xpath("//div[@class='con']//p[@align='left']/a/@href").extract_first():
            starts_text=response.xpath("//div[@class='con']//p[@align='left']//text()").extract()
            start2_text = starts_text[-1]
            start2_text = start2_text[-4:]
            start2_text= re.sub("者","",start2_text)
            start2_text= re.sub(":","",start2_text)
            start2_text= re.sub("、","",start2_text)
        else:
            start2_text = response.xpath("//div[@class='con']//p[@align='left']//a/text()").extract_first()
        print(start2_text)
        self.con()
    
        try:
            self.cursor.execute("select keywordid from co_books_look where fileurl='{}'".format(b_href))
            kid = self.cursor.fetchall()
            kid=kid[0]
            kid=list(kid)[0]
            # print(kid)
            # self.cursor.execute("select srname from co_books_look where fileurl='{}'".format(b_href))
            # k_name = self.cursor.fetchall()
            # k_name=k_name[0]
            # k_name = ''.join(k_name)
            # print(k_name)
            # self.cursor.execute("select content from co_books_look_content where fileurl='{}'".format(b_href))
            # k_name = self.cursor.fetchall()
            # k_name=k_name[0]
            # k_name = ''.join(k_name)
        except Exception as e:
            print("查询数据错误！>>",e)
        item['art_chapte'] = start2_chapte
        item["art_pid"] = kid
        # item["art_name"] = k_name
        # item["art_content"] = start2_text   
        # item["art_chapte"] = start2_chapte
        pata=r'作者:(.*?)'
        
        # pata='''书名:大学&nbsp;&nbsp;&nbsp;&nbsp;作者:(.*?)</p>'''
        # pata='&nbsp;&nbsp;&nbsp;作者:(.*?)'
        item['art_author']=start2_text
        yield item
        # print(item)

    @classmethod
    def con(self):
        self.conn = pymysql.Connection(host="192.168.0.113",port=3306,user='coname',password='xZS6p4LDcKCwn4Yb',db='co',charset="utf8")
        self.cursor= self.conn.cursor()
    def closed(self,spider):
        try:
            self.cursor.close()
            sleep(1)
            self.conn.close()
            print("关闭成功！")
        except Exception as e:
            print("关闭失败>>>>",e)