'''
中文名
别名
原产地
功能
信息介绍
历史
适合人群

友善
颜值
粘人程度
聪明度
忠诚度

'''
import sqlite3
import threading


# 初始化 SQLite  
def __init__():
    sqlite3
# 创建一个线程局部变量，用于存储 SQLite 连接  
# conn = threading.local()
conn = sqlite3.connect('test.sqlite')

def queryclass(pclass):
    pclass='%'+pclass+'%'
    sql = 'select * from pclasstable where pclass like "%s" '%pclass
    try:
        cursor = conn.cursor()
        cursor.execute(sql)
        res_tup = cursor.fetchone()
        if res_tup != None:
           return res_tup[2]
        else:
           return False
    finally:
        # conn.rollback()
        # conn.commit()
        pass
    
def insert_data(pname,oname,address,ability,pmsg,pet_his,pet_fit,pet_clingy,pet_kind,pet_smart,pet_honest,bueaty,pclass,pimg):
  sql = "insert into pet_msg (pname,oname,address,ability,pmsg,pet_his,pet_fit,pet_clingy,pet_kind,pet_smart,pet_honest,bueaty,pclass,pet_img) values\
  ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"%(pname,oname,address,ability,pmsg,pet_his,pet_fit,pet_clingy,pet_kind,pet_smart,
   pet_honest,bueaty,pclass,pimg)
  try:
    cursor = conn.cursor()
    cursor.execute(sql)
    conn.commit()
    return "爬取成功"
  except:
    return "爬取失败"
  finally:
    conn.rollback()
   #conn.close()

  
    
# url = 'https://www.chongso.com/z/chongwufbz/'
# url = 'https://www.chongso.com/z/chongwufbzrdfg'
def spyder_main(listid):
    import base64  
    import requests
    from bs4 import BeautifulSoup
    global pclassflag,img_base64
    successcount=0
    failcount=0 
    for id in listid:
        url = f'https://www.chongso.com/z/chongwu{id}'
        rq = requests.get(url)
        rq.encoding = rq.apparent_encoding;
        soup = BeautifulSoup(rq.text,'html.parser')
        tag1 = soup.find_all('div',class_='dq_dbox')
        tag2 = soup.find_all('div',class_='dq_dtwo clearfix')
        tag3 = soup.find_all('div',class_='tab-pal')
        tagclass= soup.find_all('div',class_='position_r')
        tagimg = soup.find_all('div',class_='dq_dtop') #跟图片有关的的div
        text = []
        msg = [] #宠物自我介绍存放
        his = [] #历史来源
        fit = [] #适合人群
        
        for line1 in tag1:
            line = line1.find_all('div',class_='plc')
            #print(line)
            for i in line:
                if i.string is not None:
                    #print(i.string)
                    text.append(i.string)

        for lines in tag1:
            line = lines.find_all('span')
            for i in line:
                if i.string is not None:
                    text.append(i.string)
        try:
            for line in tag3:
                res1=line.find_all('p')[0]
                recieve = (res1.string).strip('\r\n\t')
                msg.append(recieve)
            for line in tag3:
                res1=line.find_all('p')[1]
                his.append((res1.string).strip('\r\n\t'))
            for line in tag3:
                res1=line.find_all('p')[2]
                fit.append((res1.string).strip('\r\n\t'))

        except:
            msg.append('没有找到信息')
            his.append('没有找到信息')
            fit.append('没有找到信息')

        for line2 in tag2:
            line = line2.find_all('em')
            #print(line)
            for i in line:
                indexnum=str(i).index(':') #16
                #print(indexnum)
                res=str(i)[indexnum+1:indexnum+4]
                #print(res)
                if res.endswith('%'):

                    text.append(res.strip('%'))
                    #print(res)
                else:
                    text.append(res)
                    
        #找到宠物类别信息
        for c in tagclass:
            aclass = c.find_all('a')[2]
            classstr = (aclass.string)[0]
            pclassflag= classstr
            res = queryclass(classstr)
            if res != False:
               text.append(res)
            else:
               text.append('')
        for i in tagimg:
            img= i.find_all('img')[0]
            src = img.get('src')
            img_bytes= requests.get(src).content
            img_base64 = base64.b64encode(img_bytes).decode('utf-8')

        try:    
           pname=text[0]  #宠物名
        except:
           failcount+=1 
           continue
        oname =text[1] #宠物别名
        address =text[2] #宠物来源地
        pmsg =msg[0] #自我介绍   
        pet_his=his[0] #宠物历史
        pet_fit=fit[0] #适合人群
        pclass= text[-1] #类别  

        if pclassflag == '狗' or pclassflag=='猫':
            ability=text[28] #宠物功能
            pet_clingy =text[30] #粘人程度
            pet_kind=text[35] #友善
            pet_smart = text[44]#聪明
            pet_honest=text[45]#忠诚
            bueaty=text[46] #颜值
        else:
            ability = '未知'
            pet_clingy =text[25] #粘人程度
            pet_kind=text[26] #友善
            pet_smart = text[27]#聪明
            pet_honest=text[28]#忠诚
            try:
               bueaty =text[29]
            except:
               bueaty='0'
        res=insert_data(pname=pname,oname=oname,address=address,ability=ability,pmsg=pmsg,pet_his=pet_his,pet_fit=pet_fit,\
        pet_clingy=pet_clingy,pet_kind=pet_kind,pet_smart=pet_smart,pet_honest=pet_honest,bueaty=bueaty,pclass=pclass,pimg=img_base64)
        if res == '爬取成功':
           successcount+=1
        else:
           failcount+=1
    return successcount,failcount
print(threading.active_count())
s,f=spyder_main(['fxd','gzz','sadsffdsfd','845445dc'])
print('成功次数:%d,失败次数:%d'%(s,f))
    

        



