#-*- ecoding = utf-8 -*-
import random
import requests
import parsel
import pymysql
import sys

proxy_list = [
    '183.95.80.102:8080',
    '123.160.31.71:8080',
    '115.231.128.79:8080',
    '166.111.77.32:80',
    '43.240.138.31:8080',
    '218.201.98.196:3128'
]

my_headers = [
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
]

def get_header():  # 获得herder
    header = random.choice(my_headers)
    head = {"User-Agent": header}
    return head

# 伪装浏览器获取源代码
def main():

    # 建立数据库连接
    db = pymysql.connect(host = "localhost", user = "root", password = "", database = "blog",charset = "utf8")
    cursor = db.cursor()  # 交流的中间对象

    # 将数据库中已经有的label查出来
    s = 'select content from taginfo'
    cursor.execute(s)
    db.commit()
    res = cursor.fetchall()
    print(res)

    # 用一个列表存当前已经有的tag
    label = []
    for j in res:
        for i in j:
            label.append(i)


    url = "https://blog.csdn.net/pythonxuexi123/article/list"
    head = get_header()
    response = requests.get(url = url, headers = head)
    #print(response.text)

    selector = parsel.Selector(response.text)  # 返回一个selector对象
    href = selector.css('.article-list div.article-item-box a::attr(href)').getall()

    total_tags = []  # 所有的文章tag
    blogIds = []  # 新插入的文章id合集
    blogtag_id = [] # 每一行存的是一个blog对应的tag

    for link in href:
        #print(link)
        link = str(link)

        # 子文章部分
        res_son = requests.get(url = link, headers = head)
        selector_son = parsel.Selector(res_son.text)  # 返回一个selector对象
        title = selector_son.css('#articleContentId::text').get()  # 文章标题
        content = selector_son.css('#content_views').get()  # 文章内容
        sp = selector_son.css('div.tags-box.artic-tag-box span.label::text').getall()  #
        tags = selector_son.css('div.tags-box.artic-tag-box a.tag-link::text').getall()  # 这片文章的tag
        sp_href = selector_son.css('div.tags-box.artic-tag-box a.tag-link::attr(href)').getall()  # 链接
        time = selector_son.css('div.bar-content span.time::text').get()  # 时间
        time = time.replace('-', '')
        time = time.replace(' ', '')
        time = time.replace(':', '')
        print(tags)

        #  统计每篇文章的tag数量
        count_tags = 0
        for href in sp_href:
            if "https://so.csdn.net" in href:
                count_tags += 1

        #  把每篇文章的tag放到总的tags中
        j = 0
        blt = []
        for i in reversed(tags):
            total_tags.append(i)
            blt.append(i)
            j += 1
            if j == count_tags:
                break
        blogtag_id.append(blt)  # 当前的每一篇文章有什么标签

        #  将这片文章的标题，内容插入到数据库
        try:
            sql = '''insert into blogInfo(userId, createDate, content, star, blogState, blogTitle, blogFromOther)
                value(2, %s, %s, 0, 1, %s, %s)'''
            cursor.execute(sql, [time, content, title, link])
            db.commit()

            # 获得当前插入的blogid
            sql = "select max(blogId) from blogInfo"
            cursor.execute(sql)
            db.commit()
            id = cursor.fetchall()
            blogIds.append(id[0][0])
            print("success_1")
        except Exception as e:
            print(e)

    #  将所有文章含有的tags插入到数据库
    start_tagid = len(label) + 1  # 下一次的tagid从这里开始
    total_tags = list(set(total_tags))  # 去重
    for i in label:
        for j in total_tags:
            #print(i + '-' + j)
            if i == j:
                total_tags.remove(i)
                print('-------' + i + '被删除')
    print('==')
    print(total_tags)

    # 把新的tag插入数据库
    pp = 1
    if len(total_tags) > 0:
        for tag in total_tags:
            sql_1 = 'insert into taginfo(content) values(' + "'" + tag + "'" + ')';
            print("second:" + sql_1)
            cursor.execute(sql_1)
            db.commit()

            # sql_2 = 'select tagId from taginfo where content=' + "'" + tag + "'"
            # cursor.execute(sql_2)
            # db.commit()
            # nowtagid = cursor.fetchall()

            tagid = str(len(label) + pp)
            print(tagid)
            sql_3 = 'alter table `blogtags` add column' + "`" + tagid + "`" + ' int(1)'
            cursor.execute(sql_3)
            db.commit()
            print("success_2")
            pp += 1

    #制造sql语句
    alltags = ''
    total_tags = label + total_tags  # 当前表中所有的tag
    for i in range(1, len(total_tags)):
        alltags += str(i)
        alltags += ','
    alltags += str(len(total_tags))
    insert_tags = 'insert into `blogtags`'  # 拼装出sql的前半部分

    # 把当前blog的tag转换为tagid存下来
    blogtagid = []
    for i in blogtag_id:
        now = []
        for j in i:
            for k in total_tags:
                if j == k:
                    now.append(total_tags.index(k) + 1)
        blogtagid.append(now)

    print('每个blog的tag')
    print(blogtag_id)
    print('每个blog的tagid')
    print(blogtagid)
    print('所有tag')
    print(total_tags)


    # 匹配每个blog有哪些tag
    tmp = [[0] * (len(total_tags) + 1) for _ in range(len(blogIds))]  # 最终存储了每个blog是否有这些信息
    print(tmp)
    for i in range(0, len(blogtagid)):
            for j in range(0, len(blogtagid[i])):
                print(blogtagid[i][j])
                print(i)
                tmp[i][blogtagid[i][j]] = 1

    print('对应要加的值')
    print(tmp)
    print(len(tmp))
    #print(len(tmp[0]))

    print(len(tmp))
    for i in range(0, len(tmp)):
        t = ''
        x = str(blogIds[i])
        print(x)
        t += x
        t += ','

        for j in range(0, len(tmp[i]) - 2):
            t += str(tmp[i][j])
            t += ','
        t += str(tmp[i][len(tmp[i]) - 1])
        fsql = insert_tags + ' values(' + t + ')'
        print(fsql)
        cursor.execute(fsql)
        db.commit()
        print('success_3')




#解析数据，获得每篇文章的url地址
#re正则表达式，或者css 或者xpath

if __name__ == "__main__":
    main()