# conding=utf-8
import os

import requests
import re
from lxml import etree
import csv
import threading
import time

'''
<div id="frs_list_pager" class="pagination-default clearfix"><span class="pagination-current pagination-item ">1</span>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=50" class=" pagination-item ">2</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=100" class=" pagination-item ">3</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=150" class=" pagination-item ">4</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=200" class=" pagination-item ">5</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=250" class=" pagination-item ">6</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=300" class=" pagination-item ">7</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=350" class=" pagination-item ">8</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=400" class=" pagination-item ">9</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=450" class=" pagination-item ">10</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=50" class="next pagination-item ">下一页&gt;</a>
<a href="//tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&amp;ie=utf-8&amp;pn=2230850" class="last pagination-item ">尾页</a>
</div>
    一共10页, 每页有50条贴吧连接, 获取每页的贴吧链接, 进入贴吧, 获取图片url, 在进行下载
'''


def writeCsv(fileName, data_li):
    thlock = threading.Lock()
    thlock.acquire()
    f = open(fileName, 'a+', encoding='utf-8', newline='')
    writer = csv.writer(f)
    writer.writerow(data_li)
    f.close()
    thlock.release()


def getContent(url, key=False):
    headers = {'User-Agent': 'Moziall/5.0'}
    try:
        conn = requests.get(url, headers=headers)
        if key:
            return conn.content
        return conn.text
    except Exception as e:
        writeCsv('error_log', [url, str(e)])
        return ''


def downloadPhoto(images):
    print('images', images)
    for image_li in images:
        print('image_li', image_li)
        for img in image_li:
            filename = str(time.time())
            fileTpye = img.split('.')[-1]
            filename = filename + '.' + fileTpye
            try:
                # conn = getContent(img, True)
                # with open('images/' + filename, 'wb') as fp:
                #     fp.write(conn)
                # time.sleep(0.3)

                recv = requests.get(img, headers={'User-Agent':'Moziall/5.0'}, stream=True)

                file_size = int(recv.headers.get('content-length'))
                print(file_size, 'file_size')

                chunk_size = 2 * 1024
                data_size = 0
                with open('images/'+filename, "wb") as code:
                    for data in recv.iter_content(chunk_size=chunk_size):
                        data_size += len(data)
                        code.write(data)
                        # print('downloads : ', int(100*(data_size/file_size)))
                    print('downloads success : ', filename)
            except Exception as e:
                writeCsv('error_log', [img, str(e)])
                print('下载图片出错了')
            finally:
                filename = ''
                fileTpye = ''


def getPhotoUrl(href_li):
    '''
    根据返回的 a标签的href的列表, 将每个链接的页面img提取出来
    :param href_li:
    :return:
    '''
    baseUrl = 'https://tieba.baidu.com'
    for i in href_li:
        href = i[0]
        title = i[1]
        url = baseUrl + href
        conn = getContent(url)
        page = etree.HTML(conn)
        images = page.xpath('//div[@id="j_p_postlist"]//cc//img[@class="BDE_Image"]/@src')
        writeCsv('page_image.csv', list(i) + images)
        yield images


def getPageUrl(page, fileName='kw_a_href.csv'):
    '''
    提取页面中的贴吧链接
    :param page: 请求的html
    :param fileName: 保存的文件名
    :return: 信息列表 [('/p/6166753666', '我头像怎么样'), ...]
    '''
    page = etree.HTML(page)
    href = page.xpath('//ul[@id="thread_list"]/li/div/div/div/div[1]/a/@href')
    title = page.xpath('//ul[@id="thread_list"]/li/div/div/div/div[1]/a/@title')
    print(len(href), len(title), 'href, title')

    href_li = []
    with open(fileName, 'a+', encoding='utf-8', newline='') as fp:
        writer = csv.writer(fp)
        for i in zip(href, title):
            href_li.append(i)
            writer.writerow(i)
    print(href_li)
    return href_li


def run():
    for num in range(0, 450 + 1, 50):
        print(num)
        kwurl = 'http://tieba.baidu.com/f?kw=%E7%BE%8E%E5%A5%B3&ie=utf-8&pn=' + str(num)
        page = getContent(kwurl)
        href_li = getPageUrl(page)
        downloadPhoto(getPhotoUrl(href_li))


if '__main__' == __name__:
    run()
