import requests
from requests.exceptions import ReadTimeout,ConnectionError,RequestException
from bs4 import BeautifulSoup as Bs
import json,re,html
import time,xlwt,xlrd,os
from xlutils.copy import copy
# p = os.path.dirname(__file__)
# parent_path = os.path.dirname(p)
# path = parent_path+'/TV.xls'
# print(path)


import csv
'''Python3.X 有：r  w  a  r+等操作'''
'''Python2.X rb wb r w a r'''


# def add_data(slist):
#     add_wt = xlrd.open_workbook(path)
#     row = add_wt.sheets()[0].nrows
#     new_add_wt = copy(add_wt)
#     sheet = new_add_wt.get_sheet(0)
#     i = 0
#     for j in slist:
#         sheet.write(row, i, j)
#         i += 1
#     new_add_wt.save(path)


def get_html_data1(page):# 获的各个子标题的URL
    url_string = r'https://s.taobao.com/search?q=%E7%94%B5%E8%84%91&imgfile=&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_20180824&ie=utf8&bcoffset=6&ntoffset=6&p4ppushleft=1%2C48&s='+page
    results = request_url(url_string)
    if requests:
        # print(results)
        return results
    else:
        print('no datas')


def request_url(url_string):#错误机制
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
        'Host': 's.taobao.com'
    }
    try:
        with requests.get(url_string,headers=header) as f:
            if f.status_code == requests.codes.ok:
                return f.content.decode('utf-8')
            else:
                print('Stats',f.status_code,f.reason)
    except ReadTimeout:
        print('Time Out')
    except ConnectionError:
        print('Connection Error')
    except RequestException:
        print('RequestError')


def get_html_data(page):#获取主题下URL
    url_string = r'https://s.taobao.com/search?q=%E7%94%B5%E8%84%91&imgfile=&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_20180824&ie=utf8&bcoffset=6&ntoffset=6&p4ppushleft=1%2C48&s='+page
    results = request_url(url_string)
    if requests:
        return results
    else:
        print('no datas')


def parse_html(html_doc):
    #json
    list_all=[]
    content = re.findall(r'g_page_config = (.*?) g_srp_loadCss', html_doc, re.S)[0].strip()[:-1]
    obj = json.loads(content)
    lis1 = obj['mods']['itemlist']['data']['auctions']
    for i in lis1:
        list_all.append([i['raw_title'], i['view_price']])
    return list_all


def main():
    for i in range(0, 3):
        # 1.1请求
        html_doc = get_html_data(str(i*44))
        # 2.1清洗数据
        # parse_html(html_doc=get_html_data())
        data = parse_html(html_doc)
        with open('TV.csv', 'a+', newline='', encoding='utf-8') as f_csv:
            writer = csv.writer(f_csv)
            writer.writerow(['name', 'price'])
            for row in data:
                writer.writerow(row)
        time.sleep(5)


if __name__ == '__main__':
    main()

























#电视手机
# obj = json.loads(content)
# lis1 = obj['mods']['grid']['data']['spus']
# for i in lis1:
#     print(i['title'],i['price'])