#coding=utf-8
import os
import re
import sqlite3

from lxml import etree
import requests
import json

from tbitems_db_vip import *
from maokoulingClass import *



def writejson(dict, jsonname):
    try:
        with open(f"{jsonname}", "w+")as f:  # 写入json文件
            f.write(json.dumps(dict))
            f.close()
    except Exception as e:
        print(e)
        pass

def readjson(jsonname):
    with open(f"{jsonname}", "r")as f:
        cookies = json.loads(f.readline())
    return cookies
mydict = {}
mylist =[]
def geturl(url):
    # header={}
    response=requests.get(url=url)
    print('请求拿到的信息编码格式为 %s' %response.encoding)
    print('请求拿到的信息编码格式为 %s' %response.apparent_encoding)
    if response.encoding == 'ISO-8859-1':
        a=response.text.encode(response.encoding).decode(response.apparent_encoding)
        print('a====%s' %a)
        print(type(a))
        return a
    else:
        # print(str(response.text))
        # print(response.content.decode('utf-8', 'ignore'))
        return response.text
def parsehtml(cursor,strhtml):
    html=etree.HTML(strhtml)
    titles=html.xpath('//*[@id="moderate"]/table/tbody/tr/th/a')
    for title in titles:
        webtitle = title.text  #存标题
        webhref = title.get('href')  #存链接
        webstatus = html
        insertdb(cursor,webtitle,webhref,webstatus,'','','','','','')

    # # session.close()
    #多线程运行
from concurrent import futures
def ThreadLine():
    print('213')
    with futures.ThreadPoolExecutor(max_workers=1) as executor:  # 多线程
        #for循环进行多线程调用
        for i in range(500):
            #在循环内实例化，在外边实例化会让所有的值一致
            executor.submit(gethtml,i)



if __name__ == '__main__':
    dbpath = os.path.join('JiSu.db')
    print(dbpath)
    conn = sqlite3.connect(dbpath)
    cursor = conn.cursor()
    initdb(cursor)
    def geturls():
        for i in range(1,500):
            url = f'http://bbs.8080.net/forum-160-{i}.html'
            print(url)
            text1 = geturl(url)
            parsehtml(cursor,text1)
            conn.commit()
        updatedb(cursor,'thread-1008244-1-1.html','21312','21312','21312','21312','21312','21312','21312')
        conn.commit()

    geturl()
    def zhuanhuan():
        # 查找所有的links，然后for循环
        # 拿到一个link ，请求url，获取内容
        all_results = searchLINKdb(cursor)
        for link in all_results:
            panduan_sclik = 'https://s.click.taobao.com/'
            sclick_str = re.compile('([a-zA-z]+://s.click.taobao.com[^\s]*)["]')

            contentUrl = f"http://bbs.8080.net/{link[0]}"
            # contentUrl = f"http://bbs.8080.net/thread-964167-1-1.html"
            # print(contentUrl)
            html = geturl(url=contentUrl)
            # print(html)
            # 获取到网页里边的html后 匹配sclick链接，搜索匹配链接，进行id解析，解析出本网页的id后存储
            print(f"开始匹配{contentUrl}网页内容")
            if panduan_sclik in html:  # 匹配到sclick
                print('ok')
                sclickurls = sclick_str.findall(html)  # 看看找到了几个，进行遍历，有的是店铺有的是id
                for sclickurl in sclickurls:
                    # print(sclickurl)
                    # 辨识id，如果返回的是空的，就是店铺id链接，若有值，就是商品id，取到商品id就不用再分析店铺id了
                    itemid = sclikto_id(sclickurl)
                    print(link[0])
                    if itemid != None:
                        # 商品id，存入到数据库退出该循环，通过id，解析item 和shop id
                        abc = maokouling_ONE()
                        abc.itemid = itemid
                        abc.userid = item_to_SELLID(abc)
                        # print(tblink_get_gaoyong(abc))
                        itemlink = tblink_get_gaoyong(abc)
                        # print(userid_to_shopTBK_link(abc))
                        shoplink = userid_to_shopTBK_link(abc)
                        updatedb(cursor, link[0], itemid, itemlink, shoplink)  # 把id存入到 备用 里边
                        conn.commit()
                        # 存入成功：
                        break
            else:
                print("垃圾网页")
                # 没有该链接的删除这一行
                delete(cursor, f"'{link[0]}'")
                conn.commit()


    cursor.close()