import csv
import re
import requests



def Begin():
    InputGood = input('输入需要爬取的商品: ')
    InputPages = int(input('爬取几页内容(int值): '))
    DataValue = 0
    list_place = []
    for i in range(InputPages):
        DataValue += 44
        # https://s.taobao.com/search?data-value=44&ajax=true&_ksTS=1649942899010_1086&q={InputGood}&type=p&tmhkh5=&from=sea_1_searchbutton&catId=100&spm=a2141.241046-.searchbar.d_2_searchbox&bcoffset=-2&ntoffset=4&p4ppushleft=2%2C48&s=44
        url = f'https://s.taobao.com/search?data-value={DataValue}&ajax=true&_ksTS=1649942899010_1086&q={InputGood}&type=p&tmhkh5=&from=sea_1_searchbutton&catId=100&spm=a2141.241046-.searchbar.d_2_searchbox&bcoffset=-2&ntoffset=4&p4ppushleft=2%2C48&s=44'
        headers = {
            "accept":"*/*",
            "accept-encoding":"gzip, deflate, br",
            "accept-language":"zh,en;q=0.9,es;q=0.8,zh-CN;q=0.7,ca;q=0.6",
            "cookie":"cna=d5NsGlwS2RACAV+p45yqRuio; lid=tb9618665199; enc=Rz6OnMfLP%2FAjJnFNjcLgwrmXx%2BrrkNd2KyipRd9VokNy1go3feB1E6nvAFqUrS11Po%2FO9dwblXBUbVStxWDwolfhWhIj2RaAOG8I0BUjwoU%3D; _m_h5_tk=bb70d05a27f8e781e94bed2e67757c39_1649697976965; _m_h5_tk_enc=8d99e8a4d63c0b55d64ebf7336c8e7a8; xlly_s=1; sgcookie=E100Fti8OUodwFoj5SACfZx8%2BQY2%2F5opFg6j%2FzNXJTt%2FxbbmsCDQkbFVtb79mlnYRWfTte%2F6EEhacmUSvlpo0gHApo5CuggpY%2Fy81tojTJFLJoM%3D; uc3=vt3=F8dCvCh2ztdz2miMiaY%3D&id2=UUpgRSE2i97AqMC8ug%3D%3D&nk2=F5RMHy25V5QrHyfL&lg2=WqG3DMC9VAQiUQ%3D%3D; t=93a62c185eee81c50f452e7972169264; tracknick=tb9618665199; uc4=nk4=0%40FY4HWrRJIGu82rTsY2Oq2o76M4Zy8d0%3D&id4=0%40U2gqyknY4f5csQvp0dge5vl86%2FY38HCO; lgc=tb9618665199; _tb_token_=7319e53337d87; cookie2=201283e5dcb2ae56ef9c24c4c30ce326; x5sec=7b22617365727665723b32223a223865386339313264623033353765663034356233363138623766393039343265434f616c35354947454c54616772507a722f36335a6a43756f66474e41673d3d227d; tfstk=ca5CBJOE5DmI1VyrTwaaaaEVw7RNa9c6SfxNRliwlUULLRjMXs0rgnTIGei6aUL1.; l=eBI7FWq7LiSlvfOABO5Zhurza77tBQRXG1PzaNbMiIncC6vCizvt1CxQ0V2FoLxRRWXcTDYX4K_zHLetyFc0-ykfoTB7K9cdvdCwCef..; isg=BBISxoZkpPSr3diFTYfIvHyJY9j0Ixa95EA1Itxpy0Rm777pyLAtzH1JW1NTmI5V",
            "referer":"https://detail.tmall.com/item.htm?spm=a230r.1.14.16.61764f39hiJas9&id=651107744915&ns=1&abbucket=1",
            "sec-ch-ua":"\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"100\", \"Google Chrome\";v=\"100\"",
            "sec-ch-ua-mobile":"?0",
            "sec-ch-ua-platform":"\"Windows\"",
            "sec-fetch-dest":"script",
            "sec-fetch-mode":"no-cors",
            "sec-fetch-site":"same-origin",
            "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36"
        }
        r = requests.get(url, headers = headers)
        datas = r.json()['mods']['itemlist']['data']['auctions']
        print(datas)
        del(datas[:3])
        for data in datas:
            try:
                # print(f'{data["raw_title"]} --- {data["view_sales"]} --- {data["item_loc"]} --- {data["view_price"]} --- {"https:" + data["detail_url"]} --- {data["nid"]}')
                '''    GoodName: 商品名称        MonthlySales: 月售量      PointOrigin: 发货地          Price: 价格                GoodLink: 商品链接            GoodID: 商品ID   '''

                data_title = data["raw_title"]
                data_saless = data["view_sales"]
                data_loc = data["item_loc"].split(" ")[0]
                data_price = data["view_price"]
                data_url = "https:" + data["detail_url"]
                data_id = data["nid"]
                data_picturl = data["pic_url"]
                data_sale = re.sub('人付款', '', data_saless)
                list_place.append(data_loc)
                # print(data_sale)

                with open("淘宝关于{}商品共爬取{}页(名称，价格，发货地，月销售量).csv".format(InputGood,InputPages),'a',newline='', encoding='utf-8') as csvfile:
                    writer = csv.writer(csvfile)
                    writer.writerows([[data_title,data_sale,data_loc,data_price, data_url,data_id,data_picturl]])
                csvfile.close()
            except:
                pass
    dicts = {x: list_place.count(x) for x in set(list_place)}
    for i in dicts:
        with open("{}商品{}页发货地数量统计.csv".format(InputGood,InputPages), 'a', newline='', encoding="utf-8") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerows([[i, dicts[i]]])
            csvfile.close()


if __name__ == '__main__':
    Begin()