import requests
import re
from bs4 import  BeautifulSoup
from lxml import etree
import  time
import  csv
fp=open('D://text.csv','w+')
writer=csv.writer(fp)
writer.writerow(('id','name'))
writer.writerow(('1','xiaoming'))
writer.writerow(('2','zhangsan'))
writer.writerow(('3','lisi'))
fp.close()

fcsv=open('D://doubanboook.csv','wt',newline='',encoding='utf-8')
header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}
writer=csv.writer(fcsv)
writer.writerow(('name','url','authir','publisher','date','price','rate','comment'))
urls=['http://book.douban.com/top250?start={}'.format(str(i)) for i in range(0,250,25)]
for url in urls:
    html=requests.get(url,header)
  #  print(html.text)
    selector=etree.HTML(html.text)
    infos=selector.xpath('//tr[@class="item"]')
    print(len(infos))
    for info in infos:
        name=info.xpath('td/div/a/@title')[0]
        link=info.xpath('td/a/@href')[0]
        book_infos=info.xpath('td/p/text()')[0]
        author=book_infos.split('/')[-0]
        publisher=book_infos.split('/')[-3]
        date=book_infos.split('/')[-2]
        price=book_infos.split('/')[-1]
        rate=info.xpath('td/div/span[2]/text()')[0]
        commonts=info.xpath('td/p/span/text()')
        comment=commonts[0] if len(commonts) !=0 else "空"
        writer.writerow((name,link,author,publisher,date,price,rate,comment))

fcsv.close()

