import scrapy
import pymysql
from selenium import webdriver
import re
from fruitNamer.items import FruitnamerItem
class FruitnameSpider(scrapy.Spider):
    name = 'fruitName'
    allowed_domains = ['baike.baidu.com']
    start_urls = ['https://baike.baidu.com/']

    def __init__(self):
        self.conn=pymysql.Connection(host="192.168.0.113",port=3306,user='coname',password='xZS6p4LDcKCwn4Yb',db="co",charset="utf8")
        self.cursor=self.conn.cursor()
        # self.bro = webdriver.Chrome()

    def parse(self, response):#在百度百科页面获取需要查询的水果
        meta={}
        #取出水果名称
        self.cursor.execute('select srname from co_fruit')
        ress = self.cursor.fetchall()
        for res in ress:
            resName=''.join(res[0])
            yield scrapy.http.Request(url=self.start_urls[0]+'item/'+resName,callback=self.parse_detail,meta={'fname':resName})
        # resName='西瓜'#''.join(ress[1][0])
        # yield scrapy.http.Request(url=self.start_urls[0]+'item/'+resName,callback=self.parse_detail,meta={'fname':resName})
    def parse_detail(self,response):#进入详情页获取需要的页面
        fname=response.meta['fname']
        item =FruitnamerItem()    
        txt = response.xpath('/html/body/div[3]/div[2]/div/div[1]')
        cont = txt.xpath("string(.)").extract()
        content=''
        pat='window.rsInsertData(.*?)参考资料'
        pat1='window.rsInsertData'
        pat2='参考资料'
        pat3="'"
        for con in cont:
            cons=con.replace('\n','').replace('\r','').replace('\xa0','').replace('"','\"').replace('‘','\‘').replace('’','\’').replace('“','\“').replace('”','\”').replace("'","\'")
            content=content+cons.strip()
        content=re.sub(pat,'',content)
        content=re.sub(pat1,'',content)
        content=re.sub(pat2,'',content)
        content=re.sub(pat3,'',content)
    
        # if "分布范围" in content:
        #     pat="分布范围(.*?)。\["
        # elif "分布区域" in content:
        #     pat="分布区域(.*?)。\["
        # elif "产地生境" in content:
        #     pat="产地生境(.*?)。\["
        # else:
        #     pat="地理分布(.*?)。\["
        # contents = re.findall(pat, content)
        # j=0
        # try:
        #     if len(contents)>0:
        #         j=len(contents)-1
        #     else:
        #         pass
        # except Exception as e:
        #     print('下标越界>>>',e)
        # ffrom=contents[j]
        # ffrom = ffrom.replace('编辑','').strip()
        item['art_name'] = fname
        # item['art_from'] = ffrom
        item['art_content'] = content
        yield item

    def closed(self,response):
        try:
            self.cursor.close()
            self.conn.close()
            print('数据库关闭成功!')
            # self.bro.quit()
            # print("selenium关闭成功!")
        except Exception as e:
            print("数据库关闭失败或者selenium关闭失败>>>",e)
