import  requests
import  matplotlib
import  re
import  os
import  csv
import  json
import  time
import  pymysql
import  pyecharts.options   as      opts
import  matplotlib.pyplot   as      plt
from    urllib              import parse
from    pyecharts.charts    import Pie
from    pyecharts.charts    import Bar
from    pyecharts.globals   import ThemeType
from    pprint              import pp
from    rich.progress       import track
from    rich                import print
import  random
import  traceback
from selenium import webdriver

# 数据库类
class OperationMysql:
    """
    数据库SQL相关操作
    import pymysql
# 打开数据库连接
db = pymysql.connect("localhost","testuser","test123","TESTDB" )
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute()  方法执行 SQL 查询
cursor.execute("SELECT VERSION()")
    """
 
    def __init__(self):
        # 创建一个连接数据库的对象
        self.conn = pymysql.connect(
            host        =   '127.0.0.1',  # 连接的数据库服务器主机名
            port        =   3306,  # 数据库端口号
            user        =   'root',  # 数据库登录用户名
            passwd      =   'mcp19990916',
            db          =   'jd_spider',  # 数据库名称
            charset     =   'utf8',  # 连接编码
            cursorclass =   pymysql.cursors.DictCursor
        )
        # 使用cursor()方法创建一个游标对象，用于操作数据库
        self.cur = self.conn.cursor()
 
    # 查询一条数据
    def search(self, sql):
        self.cur.execute(sql)
        result = self.cur.fetchall()  # 使用 fetchone()方法获取单条数据.只显示一行结果
        # result = self.cur.fetchall()  # 显示所有结果
        return result
 
    # 更新SQL
    def updata_one(self, sql):
        try:
            self.cur.execute(sql)  # 执行sql
            self.conn.commit()  # 增删改操作完数据库后，需要执行提交操作
        except:
            # 发生错误时回滚
            self.conn.rollback()
 
    # 插入SQL
    def insert(self, sql, values):
        try:
            self.cur.execute(sql,values)  # 执行sql
            self.conn.commit()  # 增删改操作完数据库后，需要执行提交操作
        except Exception as e:
            # 发生错误时回滚
            # pp(sql)
            # pp(values)
            # traceback.print_exc()
            self.conn.rollback()
 
    # 删除sql
    def delete(self, sql):
        try:
            self.cur.execute(sql)  # 执行sql
            self.conn.commit()  # 增删改操作完数据库后，需要执行提交操作
        except Exception as e:
            # 发生错误时回滚
            traceback.print_exc()
            self.conn.rollback()
    
    # 断开连接
    def __del__(self):
        self.conn.close()

# 代理IP池
proxy_host = []
# 代理IP
proxy_ip = []

# 配置备用cookie
C_Index = 0
cookies = list()
# 测试
cookies.append('__jdu=1')
# 新的
cookies.append('__jda=122270673.16062072967971011004577.1606207297.1620015995.1620019113.39; shshshfp=723f4914e66add5236825bef4f1abd55; shshshfpa=99287815-8bf7-7648-8055-5004dcab97de-1614853083; shshshfpb=f0KBc%20PZTpO%203e9MTziuavw%3D%3D; __jdv=76161171|baidu|-|organic|not set|1619195975771; TrackID=1UfENu1LgYVwEVOSMJ6GimirOTQLtVvzZF5eWVP6H9UfOSGhI1bLrmLPNnS9Twn5Trx2h8egu4ZvNLn7r8KHXq_7_-WeL0RNF2oM1jl9FBAipkgMQxh1QWyJXZNDsnFdy; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; user-key=aace645f-0675-4b09-a4b4-960f54a335bf; ipLoc-djd=18-1482-48939-0; areaId=18; mt_xid=V2_52007VwMVU1pbU1wcQR5aDWEDE1JdWVNfG08fbARmAhoCX1oBRh8bH1UZYgARB0FQBw4XVR9fAjVQG1cKWVcKHXkaXQZnHxNXQVtbSx9KEl0FbAAbYl1oUmodTxpZDW4FFFdbaFdZGUo%3D; __jdc=122270672; PCSYCityID=CN_430000_430100_430103; shshshsID=e97584265a721f4a9c245d8a4339a17d_1_1620019112856; __jdb=122270672.1.16062072967971011004577|39.1620019113')
cookies.append('__jda=122270672.16062072967971011004577.1606207297.1620015995.1620019113.39; shshshfp=723f4914e66add5236825bef4f1abd55; shshshfpa=99287815-8bf7-7648-8055-5004dcab97de-1614853083; shshshfpb=f0KBc%20PZTpO%203e9MTziuavw%3D%3D; __jdv=76161171|baidu|-|organic|not set|1619195975771; TrackID=1UfENu1LgYVwEVOSMJ6GimirOTQLtVvzZF5eWVP6H9UfOSGhI1bLrmLPNnS9Twn5Trx2h8egu4ZvNLn7r8KHXq_7_-WeL0RNF2oM1jl9FBAipkgMQxh1QWyJXZNDsnFdy; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; user-key=aace645f-0675-4b09-a4b4-960f54a335bf; ipLoc-djd=18-1482-48939-0; areaId=18; mt_xid=V2_52007VwMVU1pbU1wcQR5aDWEDE1JdWVNfG08fbARmAhoCX1oBRh8bH1UZYgARB0FQBw4XVR9fAjVQG1cKWVcKHXkaXQZnHxNXQVtbSx9KEl0FbAAbYl1oUmodTxpZDW4FFFdbaFdZGUo%3D; __jdc=122270672; PCSYCityID=CN_430000_430100_430103; shshshsID=e97584265a721f4a9c245d8a4339a17d_1_1620019112856; __jdb=122270672.1.16062072967971011004577|39.1620019113')

# 谭志伟
cookies.append('shshshfpa=9cc6a17d-a105-fe6f-10ad-304f1d19bba6-1590154426; shshshfpb=eOCWCr9gy2lic7ysd8aMOdg%3D%3D; __jdu=2059537583; unpl=V2_ZzNtbUoDEUIhWBVULkldAWIGFA1LBURAfQBGBCxLX1I1VBJcclRCFnUUR1BnGF4UZwEZXENcQBNFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZHgYWA1jARRZS1VzJXI4dmR7G1kDYwMiXHJWc1chVERWfx1YACoAE1lKU0ETcQFEZHopXw%3d%3d; areaId=18; ipLoc-djd=18-1482-48939-0; PCSYCityID=CN_430000_430100_430103; shshshfp=36414ad11f898175ca4065cc39f57815; wlfstk_smdl=jjzthl1yxqfxuavevi117eo7lz0hcc7v; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_9ecfeab1da05447a8c6d991afc2fcf10|1620018086109; __jda=122270672.2059537583.1583480762.1607505318.1620018028.6; __jdc=122270672; shshshsID=5764d497ba2da93ab5940e7aea6c14be_10_1620018126184; __jdb=122270672.19.2059537583|6.1620018028; 3AB9D23F7A4B3C9B=WYX5ODQJL3OTRWGCVHLOPEJNV5OAEZBNT5OZYB4E564LGNKV74B2SNC5TQMYCRUJBDXA7KOVBU3CIXON7EXBKOMUYY')
# 求建立
cookies.append('__jdu=722641671; shshshfpb=fN8eCP6hgcVkYfKI13OXiKg%3D%3D; shshshfpa=5ae176f0-5f99-c600-2888-8fd7a08cd9d1-1577091051; unpl=V2_ZzNtbURVQR0hCBJXch1UAWIGEVhLUEFFIglPUShJD1FjBBZdclRCFnUUR1FnGlwUZwcZWURcQxZFCENkexhdBWMGEV5EVnMlMEsWBi8FXAdgChFfQlNLEHYJQVR4Hl0MZjMRXXJWcxN2C0FRex5aBFczElVGZ0MUdAlHXXMfVABiAyKL7MOWncrR9vGts%2fLQ6rPH1OmD%2fZNFCE9UeRFdBGcEEW1DZ0IlI2ZGVXoYXQRmARNUD1FAFnINRlN9GGwEVwA%3d; areaId=18; ipLoc-djd=18-1482-48939-0; __jdv=76161171|baidu|-|organic|not set|1619587612984; PCSYCityID=CN_430000_430100_430103; user-key=8a6b1a03-3ef4-4d3f-9471-d471ad306da9; __jda=122270672.722641670.1600331235.1618991834.1619587613.6; __jdc=122270672; shshshfp=f16389d35734a845c7df3abdad06c908; 3AB9D23F7A4B3C9B=XTPJVSTTTAHBPIV4MFU5LPVVKPKHUTSMHGJY5UO5HSO3HDJ646TNASQINXXXEXUUSUS4QDPC53AYDWZXZU7XPCWSJU; shshshsID=80ab06b65c6c53c89bf0e08212c42592_7_1619587763111; __jdb=122270672.10.722641670|6.1619587613')
# 火狐cookie

cookies.append('__jda=122270672.16062072967971011004577.1606207297.1620012311.1620015995.38; shshshfp=723f4914e66add5236825bef4f1abd55; shshshfpa=99287815-8bf7-7648-8055-5004dcab97de-1614853083; shshshfpb=f0KBc%20PZTpO%203e9MTziuavw%3D%3D; __jdv=76161171|baidu|-|organic|not set|1619195975771; TrackID=1UfENu1LgYVwEVOSMJ6GimirOTQLtVvzZF5eWVP6H9UfOSGhI1bLrmLPNnS9Twn5Trx2h8egu4ZvNLn7r8KHXq_7_-WeL0RNF2oM1jl9FBAipkgMQxh1QWyJXZNDsnFdy; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; user-key=aace645f-0675-4b09-a4b4-960f54a335bf; ipLoc-djd=18-1482-48939-0; areaId=18; mt_xid=V2_52007VwMVU1pbU1wcQR5aDWEDE1JdWVNfG08fbARmAhoCX1oBRh8bH1UZYgARB0FQBw4XVR9fAjVQG1cKWVcKHXkaXQZnHxNXQVtbSx9KEl0FbAAbYl1oUmodTxpZDW4FFFdbaFdZGUo%3D; __jdc=122270672; PCSYCityID=CN_430000_430100_430103; shshshsID=f762afc6cee53cfba0404cc2b1f087c7_2_1620016006417; __jdb=122270672.2.16062072967971011004577|38.1620015995')
cookies.append('__jda=122270672.16062072967971011004577.1606207297.1619966140.1620012311.37; shshshfp=723f4914e66add5236825bef4f1abd55; shshshfpa=99287815-8bf7-7648-8055-5004dcab97de-1614853083; shshshfpb=f0KBc%20PZTpO%203e9MTziuavw%3D%3D; __jdv=76161171|baidu|-|organic|not set|1619195975771; TrackID=1UfENu1LgYVwEVOSMJ6GimirOTQLtVvzZF5eWVP6H9UfOSGhI1bLrmLPNnS9Twn5Trx2h8egu4ZvNLn7r8KHXq_7_-WeL0RNF2oM1jl9FBAipkgMQxh1QWyJXZNDsnFdy; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; user-key=aace645f-0675-4b09-a4b4-960f54a335bf; ipLoc-djd=18-1482-48939-0; areaId=18; mt_xid=V2_52007VwMVU1pbU1wcQR5aDWEDE1JdWVNfG08fbARmAhoCX1oBRh8bH1UZYgARB0FQBw4XVR9fAjVQG1cKWVcKHXkaXQZnHxNXQVtbSx9KEl0FbAAbYl1oUmodTxpZDW4FFFdbaFdZGUo%3D; __jdc=122270672; PCSYCityID=CN_430000_430100_430103; __jdb=122270672.3.16062072967971011004577|37.1620012311; shshshsID=df2397d8ab9029f317b46335e705c98a_2_1620012404921')
cookies.append('__jda=122270672.16062072967971011004577.1606207297.1619544211.1619582467.31; shshshfp=723f4914e66add5236825bef4f1abd55; shshshfpa=99287815-8bf7-7648-8055-5004dcab97de-1614853083; shshshfpb=f0KBc%20PZTpO%203e9MTziuavw%3D%3D; qrsc=3; __jdv=76161171|baidu|-|organic|not set|1619195975771; PCSYCityID=CN_430000_430100_430103; TrackID=12ad-d_3UIN7_33qz_XSetB1CX-9v6uNybKej8BW3J5Ghy02OZG0lDGO97E1Vm4ZC6286arsx81lMbCMLs1w2C00ERRbLk0PbDidtuwvd4cKPRBSeITX50Ymi99CHHdk0; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; user-key=aace645f-0675-4b09-a4b4-960f54a335bf; __jdc=122270672; ipLoc-djd=18-1482-48939-0; rkv=1.0; areaId=18; __jdb=122270672.13.16062072967971011004577|31.1619582467; shshshsID=d4c83b3e4c6d49e6f553bde25c0f9f4c_7_1619582600416; wlfstk_smdl=ritvusdashwf40jodlr26106jhcs5xmw; mt_xid=V2_52007VwMVU1pbU1wcQR5aDWEDE1JdWVNfG08fbARmAhoCX1oBRh8bH1UZYgARB0FQBw4XVR9fAjVQG1cKWVcKHXkaXQZnHxNXQVtbSx9KEl0FbAAbYl1oUmodTxpZDW4FFFdbaFdZGUo%3D; thor=C2B3FECD4004FC7A4B1EEE48217FFE561BF8A38B2C2CB2F779536A999333AA661B78A83324172C7DC624111064106B949A9474456F143F0FAB41E2052B0EDEEC44DA6BD9DA7AA26455382A856F01EBD85BDA70746687EA9D6BE9AA2AC895B86E24A1BC0ADF6BBD310E6B35466B527C4BBD524676E519C1C7EC487F37481598AFED0A4AF76EA63078A0B231900D8F758868BB2790DFAFBEA7DB4F94C26DD83766; ceshi3.com=000')
# # 谷歌cookie
cookies.append('__jdv=122270672|direct|-|none|-|1619161573991; __jdu=16191615739901372208880; areaId=18; ipLoc-djd=18-1482-48939-0; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; PCSYCityID=CN_430000_430100_430103; shshshfpa=2cd8a639-e830-2a6b-4978-26a7d423d4a6-1619163509; shshshfp=c34c9f95fc919fc06e47c3c1e3084e82; shshshfpb=iClIOlwYjItDqRD9Q8%20Z%20sw%3D%3D; qrsc=3; TrackID=1Ys1h8gUSdI1Szmsucsz08NiKE1iCzpgdMwvBCUm0BFUI-EzOkXk6FYDyZy9Rog4s3DsYQUCNm4PMeSJGqBm0NWPu0i5Lp0MVnUzbi7hJiJ-0WAiGPXnLD3plMHi2lyDa; __jda=122270672.16191615739901372208880.1619161574.1619423876.1619582664.5; __jdb=122270672.1.16191615739901372208880|5.1619582664; __jdc=122270672; shshshsID=ac71be8326bed751931c781eb013821b_1_1619582664927; rkv=1.0; 3AB9D23F7A4B3C9B=M62N7F6JIJIKH3XC4UBOVH2GRD5RJL6TNQXB7XNJXEYIALUTIJYJ4V6AVDLR2CZGI5ZGH2E7JQUELBZY76KR35VRKA')
# cookies.append('__jdv=122270672|direct|-|none|-|1619161573991; __jdu=16191615739901372208880; areaId=18; ipLoc-djd=18-1482-48939-0; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; PCSYCityID=CN_430000_430100_430103; shshshfpa=2cd8a639-e830-2a6b-4978-26a7d423d4a6-1619163509; shshshfp=c34c9f95fc919fc06e47c3c1e3084e82; shshshfpb=iClIOlwYjItDqRD9Q8%20Z%20sw%3D%3D; qrsc=3; __jda=122270672.16191615739901372208880.1619161574.1619423876.1619582664.5; __jdc=122270672; rkv=1.0; wlfstk_smdl=few785q8wea2pofy11ua7p97qjc0twq9; TrackID=11vjgKMfLZFVGZRTQfAPBKbQCh3BmKoAPNje0CYImtI4uyKGeYcxsQjseFzrjDqoe1_PnfkMntiTVLFZPbJTvmFSxJrvzLO5a2TGyOT4MVdB6k5XYy2A-LBYXv5kiyGZq; thor=C2B3FECD4004FC7A4B1EEE48217FFE561BF8A38B2C2CB2F779536A999333AA6624AE2760BE1D99F2C347FC28541CA6202401A81F35DF502E25BAADF0726B61EDBE610527C219AD5181A984B64CAB0F73CA66DCC5D7119BC2053B16DEF9B2604C54DDD92127CF903EA8D224FB72C109F01254517439E75F2FEEC132191D517D64F59D5CDE6F2ACA95002EE45F1CA12C08C42F9B25A732DB3D8477E77E3E2A39E3; ceshi3.com=000; __jdb=122270672.5.16191615739901372208880|5.1619582664; shshshsID=ac71be8326bed751931c781eb013821b_3_1619582773345; 3AB9D23F7A4B3C9B=M62N7F6JIJIKH3XC4UBOVH2GRD5RJL6TNQXB7XNJXEYIALUTIJYJ4V6AVDLR2CZGI5ZGH2E7JQUELBZY76KR35VRKA')
# # 微软Cookie
cookies.append('__jdv=122270672|direct|-|none|-|1619436080371; __jdu=1619436080367741819767; shshshfpa=cee1d2a0-53ac-64b3-74f7-597890a4fd08-1619436081; areaId=18; ipLoc-djd=18-1482-48939-0; TrackID=1lDnfHlutU2cQ5HM-w0bKjGd0X80gm88ga3ciwnpD6gzqf2lrJPryl5_GgHzGXqjgCnmOLTWj7fPpKEfdWlcWgF2g7mB6quto_PV9GLcqvdE7zgPzFNurBXjqqy03VURW-PbToNr_ozHUrKDSNA1d5A; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; shshshfpb=lbiz4hWa%20LHJx%2FWlcdE0srQ%3D%3D; __jdc=122270672; __jda=122270672.1619436080367741819767.1619436080.1619455055.1619582899.3; wlfstk_smdl=k5795s61cwidaj4hlal91arxcz7btu4d; __jdb=122270672.3.1619436080367741819767|3.1619582899; shshshfp=9349ace6f962073af578c0e1c0e56b26; shshshsID=c67bb3c3ad9a0830f12a289e1bb113b4_1_1619582905167; qrsc=1; rkv=1.0; 3AB9D23F7A4B3C9B=IUFOXU4NBTLJMKPN7F6LFNVVEWVQEYJWG246PMMAXL2TUCA7KL4URKM4VOQV2VBRFQH62ZLKUBIDSVLCQSZITDUGK4')
# cookies.append('__jdv=122270672|direct|-|none|-|1619436080371; __jdu=1619436080367741819767; shshshfpa=cee1d2a0-53ac-64b3-74f7-597890a4fd08-1619436081; areaId=18; ipLoc-djd=18-1482-48939-0; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; shshshfpb=lbiz4hWa%20LHJx%2FWlcdE0srQ%3D%3D; __jda=122270672.1619436080367741819767.1619436080.1619455055.1619582899.3; __jdc=122270672; shshshfp=9349ace6f962073af578c0e1c0e56b26; rkv=1.0; wlfstk_smdl=paqe6zi7iw6wwpyqd12w60z9q1frmjnc; TrackID=1KNDk7A2X2BqDTGyXYIwsZ-HJMFpUnY2MQ_8daDbWAuGzbVxpJalAgPIsfvkgdKkv8cVIno_0hAAGRGqV1vl2cqlNv1XulV-VdMkse2azRvkcQT2RrKsaxkYzrQXM2yQIF5NXWjk9d8bB16siGYwwRA; thor=C2B3FECD4004FC7A4B1EEE48217FFE561BF8A38B2C2CB2F779536A999333AA667415B9FD5E3666020D7D86004EE2648582C5A842351D4895A5F0356CC38108964A06195C9CC9020579190F52EFB99313E4415621C4DDAAC89C92F115CDFF93B36F4307C7BE8E4F7119A6B5B49C3E4E30ACEEF8F48F6BCCA52D1CF456A36931059B2D4B2B88AD827AFA92EEC76FF4EC92E772F2AE4AEB5B595A1C9F9E1F4E2DE8; ceshi3.com=000; __jdb=122270672.7.1619436080367741819767|3.1619582899; shshshsID=c67bb3c3ad9a0830f12a289e1bb113b4_3_1619582990083; qrsc=3; 3AB9D23F7A4B3C9B=IUFOXU4NBTLJMKPN7F6LFNVVEWVQEYJWG246PMMAXL2TUCA7KL4URKM4VOQV2VBRFQH62ZLKUBIDSVLCQSZITDUGK4')

# cookie备用值的长度
global COOKIE_INDEX
global COOKIE_LENGTH
COOKIE_INDEX = 0
COOKIE_LENGTH = len(cookies)


C_Length = 0
# 配置session
sess = requests.Session()

headers = {
    # 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
    'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
}
proxies = dict(
    http='http://127.0.0.1:8080'
)

# 品牌brand
brand_regix = '''<li id="brand-(.*?)".*?data.*?style="display:block;">.*?<a href="(.*?)".*?rel="nofollow".*?onclick="searchlog\((.*?)\)".*?title="(.*?)">.*?<i></i>.*?</a>.*?</li>'''
# 分类classify
classify_regix = '''<li>.*?<a title=".*?".*?data-v="(.*?)".*?href="(.*?)".*?onclick="searchlog\((.*?)\)".*?><i></i>(.*?)</a>.*?</li>'''
## 热点hot
# hot_regix = '''<li data-group="(.*?)">.*?<a href="(.*?)".*?rel="nofollow".*?onclick="searchlog\((.*?)\)".*?><i></i>(.*?)</a>'''
# 配置config
config_regix = '''<li data-group="(.*?)">.*?<a href="(.*?)".*?rel="nofollow".*?onclick="searchlog\((.*?)\)".*?><i></i>(.*?)</a>.*?</li>'''
# 商品
goods_regix =  '''<div class="p-price">.*?<strong class=".*?".*?data-done="1".*?>.*?<em>￥</em><i>(.*?)</i>.*?</strong>.*?<div class="p-name p-name-type-2">.*?<a target="_blank".*?title=".*?".*?href="(.*?)".*?onclick="searchlog\(.*?'(.*?)'.*?flags.*?\);">.*?<em>(.*?)</em>.*?<strong><a id=".*?".*?target="_blank".*?href="(.*?)".*?onclick=".*?">'''

# append url
# 综合
synthesize_url = '&click=1'
# 销量
sales_sort_url = '&psort=3&click=0'
# 评论
comment_sort_url = '&psort=4&click=0'
# 新品
newpro_url = '&psort=5&click=0'

# 自定义输入方法
def my_input(s):
    rs = input('jdspider > ' + s)
    return rs

def my_output(s):
    print('[yellow]jdresult > [/]'+s)

# 自定义打印方法
def my_print(title:str,columns:list,data:list,format:list):
    s1 = ''
    s2 = ''
    for index,item in enumerate(columns):
        s1 = s1 + item + (format[index]-len(item.encode('gbk')))*' '
        s2 = s2 + len(item.encode('gbk'))*'-' + (format[index]-len(item.encode('gbk')))*' '
    print('\n'+title+'\n'+'='*len(title.encode('gbk'))+'\n\n\t#\t'+s1+'\n\t-\t'+s2)
    for x,itemx in enumerate(data):
        s3 = ''
        for y,itemy in enumerate(columns):
            itemy = itemy.lower()
            tmp = itemx[itemy]
            if itemy == 'url':
                tmp = '[link=%s][i][yellow]Please click[/i] Me[/]'%itemx[itemy]
            s3 = s3 + str(tmp) + (format[y]-len(str(tmp).encode('gbk')))*' '
        print('\t'+str(x) + "\t" + s3)
    print('')

# 返回代理IP
def get_proxy():
    my_output("请稍等，IP失效，正在请求新的代理IP...")
    global proxy_host
    global COOKIE_INDEX
    proxy_host = []
    global C_Length
    C_Length = 0
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0'
        # 'cookie':'__jda=122270672.16062072967971011004577.1606207297.1619544211.1619582467.31; shshshfp=723f4914e66add5236825bef4f1abd55; shshshfpa=99287815-8bf7-7648-8055-5004dcab97de-1614853083; shshshfpb=f0KBc%20PZTpO%203e9MTziuavw%3D%3D; qrsc=3; __jdv=76161171|baidu|-|organic|not set|1619195975771; PCSYCityID=CN_430000_430100_430103; TrackID=1Dt43OIz2OXKrV1ElbmF7rm48yONduvUPbp8ap-zsMQJPWrH9LXZbEGPlre2jrLGgu7uXErO1dhGX3Yjqz0YnORM-WqDi56YSkA7haT3U-B6-hz6UKczbH73Q6fiG7VBl; pinId=YMZoeXBNvKgoa3Z7zHp0p7V9-x-f3wj7; pin=jd_7551a6ebe0b16; unick=jd_7551a6ebe0b16; _tp=33QCYO9WWug7OIp5VZvueuPTU8349VfwTCZvHs7QzGs%3D; _pst=jd_7551a6ebe0b16; user-key=aace645f-0675-4b09-a4b4-960f54a335bf; __jdc=122270672; ipLoc-djd=18-1482-48939-0; rkv=1.0; areaId=18; __jdb=122270672.9.16062072967971011004577|31.1619582467; shshshsID=d4c83b3e4c6d49e6f553bde25c0f9f4c_5_1619582496879; wlfstk_smdl=tzahzdh29yyn3wzsic8btexh32mdba54; mt_xid=V2_52007VwMVU1pbU1wcQR5aDWEDE1JdWVNfG08fbARmAhoCX1oBRh8bH1UZYgARB0FQBw4XVR9fAjVQG1cKWVcKHXkaXQZnHxNXQVtbSx9KEl0FbAAbYl1oUmodTxpZDW4FFFdbaFdZGUo%3D'
    }
    # apiUrl = 'http://ip.memories1999.com/api.php?dh=1290551019699154977&sl=30&dk=3128&xl=国内'
    apiUrl = 'http://kuyukuyu.com/api/projects/get?uuid=a007d266-bc39-4518-93a4-3bcaa58d5ee0'
    # apiUrl = 'http://kuyukuyu.com/api/projects/get?uuid=ed3e8a7d-86b5-489d-9985-5d5bd974e2ea'
    while C_Length < 1:
        # headers['cookie'] = cookies[COOKIE_INDEX]
        headers['cookie'] = '__jdu='+ str(time.time())
        text = requests.get(url=apiUrl,headers=headers).text
        # pp(text)
        # proxyst = text.split('\r\n')[:-1]
        proxyst = [text]
        proxys = []
        # pp(proxyst)
        for item in proxyst:
            tmp_proxy_host = "https://"+item
            proxy_temp = {"https":tmp_proxy_host}
            proxys.append(proxy_temp)
        url = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=10026361140239&score=0&sortType=5&page=0&pageSize=-1"
        for item in proxys:
            try:
                res = requests.get(url=url,proxies=item,headers=headers,timeout=0.8).text
                # res = urllib.urlopen(url,proxies=proxy_temp).read()
                # print(res)    
                my_output(item['https'] + "\tYes!")
                if len(res) == 0:
                    # # 需要更换cookie
                    # my_output("#%s:Cookie不可用，请跟换Cookie！"%COOKIE_INDEX)
                    # COOKIE_INDEX = int(my_input("#请输入Cookie ID:"))
                    a = 0
                else:
                    b = 0
                proxy_host.append(item)
            except Exception as e:
                my_output(item['https'] + "\tNo!")
        C_Length = len(proxy_host)
    # 打印代理IP
    # my_print('Proxy Host',['Https'],proxy_host,[13])
    my_output('共获取到%s条代理IP！'%str(C_Length))

# 格式化返回时间
def format_time(timestamp:str)->str:
    formatTime = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(float(item['time'])))
    return formatTime

# 创建目录文件夹
def create_folder(folders_list = None):
    if folders_list == None:
        folders_list = ['tmp']
    for folder in folders_list:
        isExists = os.path.exists(folder)
        # print(isExists)
        if isExists is False:
            # 与os.mkdir有区别，这两个函数之间最大的区别是当父目录不存在的时候os.mkdir(path)不会创建，os.makedirs(path)则会创建父目录。
            os.makedirs(folder)

# 删除目录文件夹
def del_folder(folders_list):
    for folder in folders_list:
        isExists = os.path.exists(folder)
        # print(isExists)
        if isExists is True:
            os.system('rm -rf '+folder)

# 下载图片
def download_img(img_url,img_name):
    # 判断文件夹是否存在
    folder = './image/'
    # isExists = os.path.exists(folder)
    # if isExists is False:
    #     os.makedirs(folder)
    create_folder([folder])
    # 图片路径
    img_path = folder+img_name+'.jpg'
    isExistsImg = os.path.exists(img_path)
    # 如果图片已经存在，直接返回
    if isExistsImg:
        return
    # 当把get函数的stream参数设置成True时，它不会立即开始下载，当你使用iter_content或iter_lines遍历内容或访问内容属性时才开始下载。需要注意一点：文件没有下载之前，它也需要保持连接。
    r = sess.get(img_url, headers=headers, stream=True)
    # print(r.status_code) # 返回状态码
    if r.status_code == 200:
        for chunk in r.iter_content(chunk_size=512):
            if chunk:
                open(folder+img_name+'.jpg', 'ab').write(chunk) # 将内容写入图片
                # 帮助更好的理解stream参数
                # time.sleep(0.2)
    del r

# 获取主页详情
def get_url_data(url,regix):
    r = sess.get(url=url,headers=headers)
    if r.status_code == 200:
        t = r.text
        # # 将读取的html页面保存
        # with open('brand.html','w') as f:
        #     f.write(t)
    else:
        return None
    # re.S不会对\n进行中断
    res = re.findall(regix, t, re.S)
    # # 测试
    # pp(res)
    # exit()
    data = list()
    for item in res:
        tmp = dict(zip(['brand_id','url','brand','name'],[item[0],item[1],item[2],item[3]]))
        tmp['url'] = 'http://search.jd.com/' + item[1]
        data.append(tmp)
    return data

# 获取评论页面信息
def get_comment_info(url):
    r = sess.get(url=url,headers=headers,proxies = proxy_ip,timeout=2)
    if r.status_code == 200:
        t = r.text
        # fetchJSON_comment98vv13288();
        comments_json = t[len('fetchJSON_comment98('):][:-2]
        # # 将读取的html页面保存
        # with open('comment.json','w') as f:
        #     f.write(comments_json)
    else:
        return None
    # 测试
    comm_json = json.loads(comments_json)
    # 评价内容
    comments = comm_json.get('comments')
    # 评价数据
    comm_data = comm_json.get('productCommentSummary')
    # 删除部分键值对
    pop_key = ['defaultGoodCount','defaultGoodCountStr','commentCount','goodCount','generalCount','poorCount','videoCount','afterCount','showCount','oneYear','sensitiveBook','plusCount','plusCountStr','buyerShow','installRate']
    for item in pop_key:
        comm_data.pop(item)
    count = comm_data['score1Count'] + comm_data['score2Count'] + comm_data['score3Count'] + comm_data['score4Count'] + comm_data['score5Count']
    comm_data['count'] = count
    return comm_data
    # # 获取评论内容
    # comment_info = list()
    # for comment in comments:
    #     # 商品名称
    #     comment_info.append(comment.get('referenceName'))
    #     # 商品ID
    #     comment_info.append(comment.get('referenceId'))        
    #     # 评论内容
    #     comment_info.append(comment.get('content'))
    #     # 评论时间
    #     comment_info.append(comment.get('creationTime'))
    #     # 评论人昵称
    #     comment_info.append(comment.get('nickname'))
    #     # 顾客会员等级
    #     comment_info.append(comment.get('userLevelName'))
    #     # 购物使用的平台
    #     comment_info.append(comment.get('userClientShow'))

# 获取商品详情
def get_goods_data(url,regix):
    # tmp = headers
    # tmp['cookie'] = cookies[1]
    r = sess.get(url=url,headers=headers)
    if r.status_code == 200:
        t = r.text
        # # 将读取的html页面保存
        # with open('goods.html','w') as f:
        #     f.write(t)
    else:
        return None
    # re.S不会对\n进行中断
    res = re.findall(regix, t, re.S)
    # # 测试
    # pp(res)
    # exit()
    data = list()
    for item in res:
        tmp = dict(zip(['price','url','id','name','comment_page'],[item[0],item[1],item[2],item[3]]))
        tmp['url'] = 'https:' + item[1]
        tmp['comment_url'] = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=%s&score=0&sortType=5&page=0&pageSize=-1'%item[2]
        tmp['comment_page'] = 'https:' + item[4]
        data.append(tmp)
    return data

# 从数据库中查询数据
def select_data(table_name:str,flag:str=None):
    if table_name == 'brand':
        sql = 'select * from %s where classify = "%s"'%(table_name,flag)
    elif table_name == 'goods' or table_name == 'comment_info':
        sql = 'select * from %s where brand_classify = "%s"'%(table_name,flag)
    elif table_name == 'classify':
        sql = 'select * from %s'%(table_name)
    # pp(sql)
    op_mysql = OperationMysql()
    data = op_mysql.search(sql)
    return data

def delete_data(table_name:str,brand_name:str):
    op_mysql = OperationMysql()
    sql = 'delete from %s where classify = "%s"'%(table_name,brand_name)
    op_mysql.delete(sql)

# 将data插入数据库
def insert_data(table_name:str,data:list):
    op_mysql = OperationMysql()
    columns = data[0].keys()
    sql_keys = ', '.join(columns)
    for index,item in enumerate(data):
        sql_values = ', '.join(['%s'] * len(columns))
        sql = "insert into " + table_name + "(%s) "%sql_keys + "values(%s);"%sql_values
        op_mysql.insert(sql,list(item.values()))

# 将data写入csv文件
def write_csv(path,data):
    # 些csv
    folder = './data/'
    isExists = os.path.exists(folder)
    if isExists is False:
        os.makedirs(folder)
    headers = data[0].keys()
    with open(path,'a',newline='',encoding='utf-8') as f:
        writer = csv.DictWriter(f,fieldnames=headers)
        writer.writeheader()
        writer.writerows(data)

# 从csv中读取data数据
def read_csv(path):
    data = list()
    isExists = os.path.exists(path)
    if isExists:
        with open(path,'r',encoding='utf-8') as f:
            reader = csv.DictReader(f)
            for row in reader:
                data.append(row)
        return data
    else:
        return None

def get_brand_data(data,index):
    brand_index = index
    classify = data[brand_index]['classify']
    brand_classify = data[brand_index]['brand_classify']
    goods_data = select_data('goods',brand_classify)
    if len(goods_data) == 0:
        # 获取某商品url，且按评论排序
        brand_url = data[brand_index]['url'] + comment_sort_url
        # pp(brand_url)
        # 获取页面商品的数据
        while 1:
            goods_data = get_goods_data(brand_url,goods_regix)
            if len(goods_data) > 0:
                break
        # 给商品赋brand_classify,classify
        for item in goods_data:
            item['brand_classify'] = brand_classify
            item['classify'] = classify
        # # 存
        insert_data('goods',goods_data)
    # pp(goods_data)
    # write_csv('./data/goods.csv',goods_data)
    comment_info = select_data('comment_info',brand_classify)
    if len(comment_info) == 0:
        # 未执行过get_proxy()
        if C_Length == 0:
            # 获取代理IP
            get_proxy()
        # pp(goods_data)
        # 循环获取comment的数据
        comment_info = list()
        # 循环时记得设置sleep
        length = len(goods_data)
        # 打印正在爬取的信息
        print('正在爬取#%s:%s' % (brand_index,data[brand_index]['name']))
        for i in track(range(length)):
            # cookies
            global C_Index
            global proxy_ip
            # proxy_host
            while 1:
                try:
                    # headers['cookie'] = cookies[COOKIE_INDEX]
                    headers['cookie'] = '__jdu='+ str(time.time())
                    proxy_ip = proxy_host[C_Index]
                    tmp = get_comment_info(goods_data[i]['comment_url'])
                    break
                except Exception as e:
                    C_Index = C_Index + 1
                    if C_Index == C_Length:
                        get_proxy()
                        C_Index = 0
                    my_output('Proxy Host:%s->%s'%(proxy_ip['https'][8:],proxy_host[C_Index]['https'][8:]))
                    # traceback.print_exc()
                    # time.sleep(0.5)
                    # exit()
                # finally:
                #     C_Index = C_Index
                #     if C_Index == C_Length:
                #         C_Index = 0
            # 给comment_info赋brand_classify,classify
            tmp['brand_classify'] = brand_classify
            tmp['classify'] = classify            
            comment_info.append(tmp)
            # time.sleep(random.uniform(0.2,0.4))
        # # 存
        insert_data('comment_info',comment_info)
        # 从数据库中查数据，过滤count重复的部分
        comment_info = select_data('comment_info',brand_classify)
    # write_csv('./data/comm_data.csv',comment_info)
    # pp(comment_info)
    # 定义一个同brand的总评分
    # 统计前30的总销售量（定评价数=销售量）和评分
    goods_show_data = list()
    goods_num = len(goods_data)
    # 计算同brand时的临时值
    comment_num = 0
    score1Count = 0
    score2Count = 0
    score3Count = 0
    score4Count = 0
    score5Count = 0
    good_rate_count = 0.0
    # 开始计算 
    for item in comment_info:
        comment_num += int(item['count'])
        score1Count += int(item['score1Count'])
        score2Count += int(item['score2Count'])
        score3Count += int(item['score3Count'])
        score4Count += int(item['score4Count'])
        score5Count += int(item['score5Count'])
        good_rate_count += float(item['goodRate']) * int(item['count'])
    brand_comm_num = dict()
    brand_comm_num['name'] = data[index]['name']
    brand_comm_num['comment_num'] = comment_num
    brand_comm_num['goods_num'] = goods_num
    if good_rate_count != 0.0:
        brand_comm_num['good_rate'] = '%.2f%%'%(good_rate_count / comment_num * 100)
    else:
        brand_comm_num['good_rate'] = '0.0%'
    brand_comm_num['score1count'] = score1Count
    brand_comm_num['score2count'] = score2Count
    brand_comm_num['score3count'] = score3Count
    brand_comm_num['score4count'] = score4Count
    brand_comm_num['score5count'] = score5Count
    if comment_num == 0:
        brand_comm_num['5score_rate'] = '0.0%'
        brand_comm_num['45score_rate'] = '0.0%'
    else:
        brand_comm_num['5score_rate'] = '%.2f%%'%(score5Count/comment_num*100)
        brand_comm_num['45score_rate'] = '%.2f%%'%((score5Count+score4Count)/comment_num*100)
    return brand_comm_num
    # pp(data[index]['name'])
    # pp(comment_num)
    # pp(goods_num)

def draw_diagram(data,classify_name):
    # 绘制品牌热度图
    file_path_pie = '/mnt/d/phpstudy_pro/WWW/spider_data/'+classify_name+'Pie.html'
    file_url_pie = 'file:///D:/phpstudy_pro/WWW/spider_data/'+classify_name+'Pie.html'
    file_path_Gr_bar = '/mnt/d/phpstudy_pro/WWW/spider_data/'+classify_name+'GoodRateBar.html'
    file_url_Gr_bar = 'file:///D:/phpstudy_pro/WWW/spider_data/'+classify_name+'GoodRateBar.html'
    file_path_cb_bar = '/mnt/d/phpstudy_pro/WWW/spider_data/'+classify_name+'contrastBar.html'
    file_url_cb_bar = 'file:///D:/phpstudy_pro/WWW/spider_data/'+classify_name+'contrastBar.html'
    labels = list()
    sizes = list()
    for item in data:
        labels.append(item['name'])
        sizes.append(item['comment_num'])
    data_pair = [list(z) for z in zip(labels, sizes)]
    # 绘制品牌热度Pie图
    (
        #初始化配置项，内部可设置颜色
        Pie(
            init_opts=opts.InitOpts(
                theme=ThemeType.DARK,
                #设置动画
                animation_opts=opts.AnimationOpts(animation_delay=100, animation_easing="elasticOut"),
                #设置宽度、高度
                width='1440px',
                height='820px', 
            )
        )
        .add(
            #系列名称，即该饼图的名称
            series_name="购买热度",
            #系列数据项，格式为[(key1,value1),(key2,value2)]
            data_pair=data_pair,
            #通过半径区分数据大小 “radius” 和 “area” 两种
            rosetype="area",
            #饼图的半径，设置成默认百分比，相对于容器高宽中较小的一项的一半
            radius="55%",
            #饼图的圆心，第一项是相对于容器的宽度，第二项是相对于容器的高度
            center=["50%", "50%"],
            #标签配置项
            label_opts=opts.LabelOpts(is_show=False, position="center"),
        )
        #全局设置
        .set_global_opts(
            #设置标题
            title_opts=opts.TitleOpts(
                #名字
                title="品牌购买热度",
                title_link=file_url_Gr_bar,
                title_target='self',
                # subtitle="子标题",
                #组件距离容器左侧的位置
                pos_left="center",
                #组件距离容器上方的像素值
                pos_top="20",
                #设置标题颜色
                title_textstyle_opts=opts.TextStyleOpts(color="#fff"),
            ),
            #图例配置项，参数 是否显示图里组件
            legend_opts=opts.LegendOpts(type_="scroll",pos_left="80%",orient="vertical",is_show=False),
        )
        #系列设置
        .set_series_opts(
            tooltip_opts=opts.TooltipOpts(
                trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
            ),
            #设置标签颜色
            label_opts=opts.LabelOpts(color="rgba(255, 255, 255, 0.3)"),
        )
        .render(file_path_pie)
    )
    # 绘制好评率Bar图
    labels = list()
    sizes = list()
    for item in data:
        labels.append(item['name'])
        sizes.append(item['45score_rate'][:-1])
    # pp(sizes)
    data_pair = [list(z) for z in zip(labels, sizes)]
    # 绘制柱状图
    (
    Bar(
        init_opts=opts.InitOpts(
            theme=ThemeType.DARK,
            #设置动画
            animation_opts=opts.AnimationOpts(animation_delay=100, animation_easing="elasticOut"),
            #设置宽度、高度
            width='1440px',
            height='820px', 
        )
    )
    .add_xaxis([classify_name])
    .add_yaxis(labels[0],[sizes[0]])
    .add_yaxis(labels[1],[sizes[1]])
    .add_yaxis(labels[2],[sizes[2]])
    .add_yaxis(labels[3],[sizes[3]])
    .add_yaxis(labels[4],[sizes[4]])
    .add_yaxis(labels[5],[sizes[5]])
    .add_yaxis(labels[6],[sizes[6]])
    .add_yaxis(labels[7],[sizes[7]])
    .add_yaxis(labels[8],[sizes[8]])
    .add_yaxis(labels[9],[sizes[9]])
    # .reversal_axis()
    .set_series_opts(label_opts=opts.LabelOpts(position="top",formatter="{a}\n{c}%",))
    .set_global_opts(
        title_opts=opts.TitleOpts(        #名字
            title="品牌好评率",
            title_link=file_url_cb_bar,
            title_target='self',
            #组件距离容器左侧的位置
            pos_left="center",
            #组件距离容器上方的像素值
            pos_top="20"
        ),
        yaxis_opts=opts.AxisOpts(name="好评率/%"),
        xaxis_opts=opts.AxisOpts(name="品牌"),
        legend_opts=opts.LegendOpts(is_show=False)
    )  
    .render(file_path_Gr_bar)
    )
    # 绘制好评率和高评分率对比Bar图
    labels = list()
    sizes = list()
    score45 = list()
    for item in data:
        labels.append(item['name'])
        score45.append(item['45score_rate'][:-1])
        sizes.append(item['good_rate'][:-1])
    # pp(sizes)
    data_pair = [list(z) for z in zip(labels, sizes)]
    # 绘制柱状图
    (
    Bar(
        init_opts=opts.InitOpts(
            theme=ThemeType.DARK,
            #设置动画
            animation_opts=opts.AnimationOpts(animation_delay=100, animation_easing="elasticOut"),
            #设置宽度、高度
            width='1440px',
            height='820px', 
        )
    )
    .add_xaxis(labels)
    .add_yaxis("京东好评率",sizes)
    .add_yaxis("真实好评率",score45)

    .reversal_axis()
    .set_series_opts(label_opts=opts.LabelOpts(position="right",formatter="{a}\n{c}%",))
    .set_global_opts(
        title_opts=opts.TitleOpts(        #名字
            title="好评率对比",
            title_link=file_url_pie,
            title_target='self',
            #组件距离容器左侧的位置
            pos_left="center",
            #组件距离容器上方的像素值
            pos_top="20"
        ),
        yaxis_opts=opts.AxisOpts(name="好评率/%"),
        xaxis_opts=opts.AxisOpts(name="品牌"),
        legend_opts=opts.LegendOpts(is_show=False)
    )  
    .render(file_path_cb_bar)
    )
    return [file_url_pie,file_url_Gr_bar,file_url_cb_bar]

if __name__ == '__main__':
    print('''\
                             .-"""-.    __                         \n\
                            /       \.-"  "-.                      \n\
                         __:  :\     ;       `.                    \n\
                  _._.-""  :  ; `.   :   _     \                   \n\
                .'   "-.  "   :   \ /;    \    .^.              .-,\n\
    .-".       :        `.     \_.' \'   .'; .'   `.            `JD\n\
 ,-"    \      ;\         \  '.     /". /  :/       `.      __ JD_,\n\
 :    '. \_    ; `.  __.   ;_  `-._/   Y    \         `.   ( JD".';\n\
 ;      \  `.  :   "-._    ; ""-./      ;    "-._       `--JD .'  ;\n\
:    .--.;   \  ;      l   '.    `.     ;        ""--.   JD  /   / \n\
;   /    :    \/       ;\  . "-.   \___/            __\JD .-"_.-"  \n\
;  /     L_    \`.    :  "-.J   "-._/  ""-._       ( JD\ /   /     \n\
: :      ; \    `.`.  ;     /"+.     ""-.   ""--.._JD-, `._."      \n\
 \;     :   \     `.`-'   _/ /  "-.___   "         \`-'            \n\
  `.    ;    \      `._.-"  (     ("--..__..---g,   \              \n\
    `. :      ;             /\  .-"\       ,-JD ;    ;             \n\
      \;   .-';    _   _.--"  \/    `._,-.-JD-' |    ;             \n\
       :     :---"" """        `.     _:'.`.\   :    ;\            \n\
        \  , :           spider  "-. (,j\ ` /   ;\(// \\           \n\
         `:   \                     "JD__.-"    '-\\   \;          \n\
           \   :                .--JD,             \;              \n\
            `--'                `JD`-'                             \n\
                              .-j                                  \n\
                                                                   \n\
                                                                   \n\
       =[ jd_spider v1.0.0-dev-                           ]        \n\
Xm_spider tip: [yellow]请合理输入Choose Option里的序号:)[/]''')
    # # 删除data文件
    # del_folder(['./data/'])
    # 输入商品大分类（例如手机，平板，电脑，衣服，鞋子等等）
    # # 初始化代理IP池
    # my_output("请稍等，正在请求2条以上的代理IP...")
    # 功能
    while 1:
        opt_list = [dict(option='爬取商品数据'),dict(option='查询已爬取商品类别'),dict(option='删除商品数据'),dict(option='查看数据目录'),dict(option='退出Xm_spider系统')]
        my_print('Choose Option',['Option'],opt_list,[10])
        # 输入功能选项序号
        opt_id = int(my_input('请输入功能选项序号：'))
        # 删除商品类别
        if opt_id == 2:
            search_data = select_data('classify')
            for item in search_data:
                item['time'] = format_time(item['time'])
            my_print('Classify Info',['Classify','Time'],search_data,[30,30])
            del_brand_id = my_input('请输入要删除商品序号(back返回)：')
            if del_brand_id == 'back':
                continue
            else:
                del_brand_id = int(del_brand_id)
            del_brand = search_data[del_brand_id]['classify']
            delete_data('classify',del_brand)
            delete_data('brand',del_brand)
            delete_data('goods',del_brand)
            delete_data('comment_info',del_brand)
            my_output('删除完成！')
        elif opt_id == 1:
            search_data = select_data('classify')
            for item in search_data:
                item['time'] = format_time(item['time'])
            my_print('Classify Info',['Classify','Time'],search_data,[30,30])
            my_output('查询完成！')
        elif opt_id == 3:
            file_data = list()
            file_path = 'file:///D:/phpstudy_pro/WWW/spider_data'
            file_path = "[link=%s][i]Please [red]click[/red][/i] [yellow]Me[/]"%file_path
            file_data.append(dict(path = file_path))
            my_print(title='Brand Popularity',columns=['Path'],data=file_data,format=[30])
            my_output('查询完成！')
        elif opt_id == 4:
            my_output('退出完成！')
            exit()
        elif opt_id == 0:
            # 进入功能后
            classify_name = my_input('请输入商品类别：')
            # 存储类别数据
            # 获取当前时间戳
            ticks = time.time()
            tmpDict = dict(classify=classify_name,time=ticks)
            insert_data('classify',[tmpDict])
            # 查看数据库中有没有该类别的数据
            data = []
            data = select_data('brand',classify_name)
            if len(data) == 0:
                # 如果数据库中没有改类别的数据
                url = 'http://search.jd.com/Search?keyword=' + classify_name
                # pp(dict(url=url))
                while 1:
                    try:
                        data = get_url_data(url,brand_regix)
                        data = data[:50]
                    except Exception as e:
                        traceback.print_exc()
                        exit()
                    if len(data) > 0:
                        break
                # 向data中加入商品类别
                for item in data:
                    item['classify'] = classify_name
                    item['brand_classify'] = classify_name + '|' + item['brand_id'] + '|' +  item['name'] 
                # # 存
                insert_data('brand',data)
            # write_csv('./data/brand.csv',data)
            # 打印
            my_print('Brand Type',['Classify','Name','Url'],data,[15,50,75])
            # 输入品牌对应id
            opt_list = [dict(option='全品牌购买热度'),dict(option='单品牌数据分析'),dict(option='退出Xm_spider系统')]
            my_print('Choose Option',['Option'],opt_list,[10])
            choose_id = int(my_input('请选择下一步操作：'))
            if choose_id == 1:
                input_brand_id = int(my_input('请输入商品序号：'))
            elif choose_id == 0:
                input_brand_id = 999
            elif choose_id == 2:
                my_output('退出完成！')
                exit()
            else:
                print('input error!')
                exit()
            brand_num = len(data)
            if input_brand_id < 0 or input_brand_id >= brand_num:
                # 记录所以品牌的评论数量
                all_brand_data = list()
                # 循环时记得设置sleep
                print('\n[yellow]爬取前20热门品牌...[/]')
                for i in range(20):
                    # 获取品牌评论人数数据
                    try:
                        brand_data = get_brand_data(data,i)
                    except Exception as e:
                        traceback.print_exc()
                        print(762)
                        exit()
                    all_brand_data.append(brand_data)
                my_output('爬取完成！')
                # 排序
                tmp_brand_data = sorted(all_brand_data, key = lambda i:i['comment_num'], reverse=True)
                top_10_brand_data = tmp_brand_data[:10]
                # pp(top_10_brand_data)
                # write_csv('./test.csv',top_10_brand_data)
                my_print(title='Top 10 brand',columns=['Name','Comment_Num','score1Count','score2Count','score3Count','score4Count','score5Count','45Score_Rate','Good_Rate'],data=top_10_brand_data,format=[30,13,13,13,13,13,13,13,13])
                # pp('top 10 brand:')
                # pp(top_10_brand_data)
                file_url = draw_diagram(top_10_brand_data,classify_name)
                final_data = list()
                link_pie = "[link=%s][i]Please [red]click[/red][/i] [yellow]Me[/]"%file_url[0]
                final_data.append(dict(brand=classify_name,diagram = link_pie))
                my_print(title='Brand Popularity',columns=['Brand','Diagram'],data=final_data,format=[30,30])
                my_output('分析完成！')
            elif 0 <= input_brand_id < brand_num:
                my_output('暂未实现！')
                continue
                get_brand_data(data,input_brand_id)

    # # # 获取data
    # # url = 'http://search.jd.com/Search?keyword=手机'
    # # # # 获取品牌
    # # data = get_url_data(url,brand_regix)
    # # write_csv('./data/brand.csv',data)
    # # # 获取分类
    # # data = get_url_data(url,classify_regix)
    # # write_csv('./data/classify.csv',data)
    # # # 获取配置
    # # data = get_url_data(url,config_regix)
    # # write_csv('./data/config.csv',data)
    # # 将数据写入文件
    # # 从csv中读取data
    # data = read_csv('./data/brand.csv')
    # if data == None:
    #     print("读取文件不存在！")
    #     exit()
    # # pp(data)
    # xiaomi = data[0]
    # newurl = xiaomi['url'] + comment_url
    # newdata = get_goods_data(newurl,goods_regix)
    # pp(newdata)
    # comm_data = list()
    # for item in newdata:
    #     newurl = item['comment_url']
    #     tmp = get_comment_info(newurl)
    #     comm_data.append(tmp)
    # write_csv('./data/comm_data.csv',comm_data)


    # # 删除
    # image = './image'
    # data = './data'
    # del_folder([image,data])

# regix = '''<li id="(.*?)" data-initial="(.*?)" style="display:block;">.*?<a href="(.*?)".*?rel="nofollow" onclick="searchlog\((.*?)\)" title="(.*?)">.*?<i></i>.*?</a>.*?</li>'''

# url = 'http://search.jd.com/Search?keyword=衣服&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&stock=1&page=1&s=30&click=0&scrolling=y'
# resp = sess.get(url=url,headers=headers)
# t = resp.text
# with open('./index.html','w') as f:
#     f.write(t)
# res = re.findall(regix,t,re.S)
# pp(res)
