import requests
import urllib
import pyquery
import re
import moment
import os
import json
import retry
import pprint
import logging
import pymongo

logging.basicConfig(
    format="[%(asctime)s] >>> %(levelname)s  %(name)s: %(message)s", level=logging.INFO)

class MONGODB(object):
    """mongo连接类"""

    def __init__(self, karg):
        super(MONGODB, self).__init__()
        self.conf = karg
        self.logger = logging.getLogger(type(self).__name__)
        self.db = self.conf["db"]
        self.col = self.conf["col"]
        self.link = self.DB_link(*self.conf["col_key"])

    def DB_link(self, key):
        """Mongo连接"""
        con = pymongo.MongoClient(*self.conf["link"])
        if self.conf.get("authdb", None):
            con[self.conf["authdb"]].authenticate(*self.conf["auth"])
        dbs = con.database_names()

        if self.db not in dbs:
            print(f"| {self.db} created ")
        db = con[self.db]
        db[self.col].ensure_index(key, unique=True)
        return db

    def COL_save(self, data):
        """数据存储方法"""
        db = self.link
        try:
            self.logger.info(f"saved: {list(data.items())[0][1]} {data['date']}")
            db[self.col].insert(data)
        except pymongo.errors.DuplicateKeyError as e:
            self.logger.info(e)
        except Exception as e:
            self.logger.info(e)

key = "twitter"
mongoconf = {
    "link": ['127.0.0.1', 32768],
    "db": "work_object",
    "col": key,
    "col_key": ['tweetid']
}


class spider():

    def __init__(self):
        self.mongo = MONGODB(mongoconf)
        self.session = requests.Session()
        self.keylist = ["bitcoin"]
        self.search_url = "https://twitter.com/search?q={}&src=typd"
        self.proxies_uri = "http://127.0.0.1:1087"
        self.search_url_top = "https://twitter.com/i/search/timeline?vertical=news&q={}&src=typd&include_available_features=1&include_entities=1&max_position={}&reset_error_state=false"
        self.search_url_next = "https://twitter.com/i/search/timeline?vertical=news&q={}&src=typd&composed_count=0&include_available_features=1&include_entities=1&include_new_items_bar=true&interval=240000&latent_count=0&min_position={}&reset_error_state=false"
        self.detail_url = "https://twitter.com/{}"
        self.session.proxies = {"https": self.proxies_uri, "http": self.proxies_uri}
        self.datas = {}
        self.session.headers = {
            "accept": "application/json, text/javascript, */*; q=0.01",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
            "cache-control": "no-cache",
            "pragma": "no-cache",
            "referer": "https://twitter.com/search?q=bullish&src=typd",
            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
            "x-requested-with": "XMLHttpRequest",
            "x-twitter-active-user": "yes"
        }

    def checktime(self,status,time):
        return status and moment.unix(time) < moment.date("3 years ago")

    def data_parser(self, types, resp):
        jqdata = pyquery.PyQuery(resp.text)
        jsondata = None
        timestatus = True
        if "next" in types:
            jsondata = resp.json()
        if "get_searchres_top" in types:
            contents = jqdata("#timeline > div.stream-container")
            self.maxpositon = contents.attr("data-max-position")
            self.minpositon = contents.attr("data-min-position")
        elif "get_searchres_next" in types:
            _, key = types.split("\r\n\t")
            if jsondata["items_html"].strip():
                for item in pyquery.PyQuery(jsondata["items_html"])("li.js-stream-item.stream-item.stream-item").items():
                    timestamp = int(item("span._timestamp").attr("data-time-ms")[:10])
                    if self.checktime(timestatus, timestamp):
                        timestatus = False
                        break
                    self.mongo.COL_save({
                        "tweetid":item.attr("data-item-id"),
                        "uri" :item("div.tweet").attr("data-screen-name"),
                        "user_id": item("div.tweet").attr("data-user-id"),
                        "user_name": item("strong.fullname").text(),
                        "date": moment.unix(timestamp).strftime("%Y-%m-%d %H:%M:%S"),
                        "content": ' '.join(item("p.tweet-text").text().split()) ,
                    })

            res_types = "max_position" if "max_position" in jsondata else "min_position"
            return jsondata[res_types], res_types, timestatus




    def get_data(self, format_url, format_data=None, types="", params=None, extraheader=None):
        params = params if params else {}
        url = format_url if format_data is None else format_url.format(
            *format_data)
        pprint.pprint(url)
        resp = self.session.get(url, params=params, headers=extraheader)
        resp.encoding = "utf8"
        if resp.status_code == 200:
            return self.data_parser(types, resp)
        else:
            pprint.pprint(resp.status_code)


    def get_post_pagedetail(self, key):
        self.datas[key] = {}
        self.get_data(self.search_url, [key], types="get_searchres_top")

    def get_post_text(self, nextparams, key):
        nextparams, types, time = self.get_post_text_next(
            self.search_url_top.format(key, nextparams), key)
        while time:
            try:
                format_url = self.search_url_top if types == "max_position" else self.search_url_next
                nextparams, types, time = self.get_post_text_next(format_url.format(key, nextparams), key)
            except Exception as e:
                pprint.pprint(e)
                break
            

    @retry.retry(tries=3,delay=5)
    def get_post_text_next(self, nexturl, key):
        return self.get_data(nexturl, types="get_searchres_next\r\n\t{}".format(key))

    def start(self):
        for key in self.keylist:
            key = f'%23{key}'
            self.get_post_pagedetail(key)
            self.get_post_text(urllib.parse.quote(self.maxpositon), key)

if __name__ == '__main__':
    spider().start()
