#!-*-coding:utf-8-*-

from __future__ import with_statement

import time
import random
import re
import os

import eventlet
from eventlet import wsgi,websocket
from eventlet.green import urllib2,socket
from eventlet.support import six

######################################################################################################
'''
This is a mashup of the websocket example and the multi-user chat example, 
showing how you can do the same sorts of things with websockets that you can do with regular sockets.
'''
PORT_W = 7000

participants_w = set()

@websocket.WebSocketWSGI
def handle_w(ws):
    participants_w.add(ws)
    try:
        while True:
            m = ws.wait()
            if m is None:
                break
            for p in participants_w:
                p.send(m)
    finally:
        participants_w.remove(ws)


def dispatch_w(environ, start_response):
    """Resolves to the web page or the websocket depending on the path."""
    if environ['PATH_INFO'] == '/chat':
        return handle_w(environ, start_response)
    else:
        start_response('200 OK', [('content-type', 'text/html')])
        html_path = os.path.join(os.path.dirname(__file__), 'websocket_chat.html')
        return [open(html_path).read() % {'port': PORT_W}]

def websocket_chat():
    # run an example app from the command line
    listener = eventlet.listen(('127.0.0.1', PORT_W))
    print("\nVisit http://localhost:7000/ in your websocket-capable browser.\n")
    wsgi.server(listener, dispatch_w)
######################################################################################################
@websocket.WebSocketWSGI
def handle(ws):
    """  This is the websocket handler function.  Note that we
    can dispatch based on path in here, too."""
    if ws.path == '/echo':
        while True:
            m = ws.wait()
            if m is None:
                break
            ws.send(m)

    elif ws.path == '/data':
        for i in six.moves.range(10000):
            ws.send("0 %s %s\n" % (i, random.random()))
            eventlet.sleep(0.1)


def dispatch(environ, start_response):
    """ This resolves to the web page or the websocket depending on
    the path."""
    if environ['PATH_INFO'] == '/data':
        return handle(environ, start_response)
    else:
        start_response('200 OK', [('content-type', 'text/html')])
        return [open(os.path.join(
                     os.path.dirname(__file__),
                     'websocket.html')).read()]

def websocket():
    # run an example app from the command line
    listener = eventlet.listen(('127.0.0.1', 7000))
    print("\nVisit http://localhost:7000/ in your websocket-capable browser.\n")
    wsgi.server(listener, dispatch)
######################################################################################################
"""This is a recursive web crawler.  Don't go pointing this at random sites;
it doesn't respect robots.txt and it is pretty brutal about how quickly it
fetches pages.

This is a kind of "producer/consumer" example; the fetch function produces
jobs, and the GreenPool itself is the consumer, farming out work concurrently.
It's easier to write it this way rather than writing a standard consumer loop;
GreenPool handles any exceptions raised and arranges so that there's a set
number of "workers", so you don't have to write that tedious management code
yourself.
"""
# http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
url_regex2 = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))')

def fetch2(url, outq):
    """Fetch a url and push any urls found into a queue."""
    print("fetching", url)
    data = ''
    with eventlet.Timeout(5, False):
        data = urllib2.urlopen(url).read()
    for url_match in url_regex2.finditer(data):
        new_url = url_match.group(0)
        outq.put(new_url)


def producer(start_url):
    """Recursively crawl starting from *start_url*.  Returns a set of
    urls that were found."""
    pool = eventlet.GreenPool()
    seen = set()
    q = eventlet.Queue()
    q.put(start_url)
    # keep looping if there are new urls, or workers that may produce more urls
    while True:
        while not q.empty():
            url = q.get()
            # limit requests to eventlet.net so we don't crash all over the internet
            if url not in seen and 'eventlet.net' in url:
                seen.add(url)
                pool.spawn_n(fetch2, url, q)
        pool.waitall()
        if q.empty():
            break
    return seen

def producer_consumer():
    seen = producer("http://www.baidu.com")
    print("I saw these urls:")
    print("\n".join(seen))
######################################################################################################
"""This is a recursive web crawler.  Don't go pointing this at random sites;
it doesn't respect robots.txt and it is pretty brutal about how quickly it
fetches pages.

The code for this is very short; this is perhaps a good indication
that this is making the most effective use of the primitves at hand.
The fetch function does all the work of making http requests,
searching for new urls, and dispatching new fetches.  The GreenPool
acts as sort of a job coordinator (and concurrency controller of
course).
"""
# http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
url_regex = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))')

def fetch(url, seen, pool):
    """Fetch a url, stick any found urls into the seen set, and
    dispatch any new ones to the pool."""
    print("fetching", url)
    data = ''
    with eventlet.Timeout(5, False):
        data = urllib2.urlopen(url).read()
    for url_match in url_regex.finditer(data):
        new_url = url_match.group(0)
        print('new_url =',new_url)
        # only send requests to eventlet.net so as not to destroy the internet
        if new_url not in seen and 'eventlet.net' in new_url:
            seen.add(new_url)
            # while this seems stack-recursive, it's actually not:
            # spawned greenthreads start their own stacks
            pool.spawn_n(fetch, new_url, seen, pool)


def crawl(start_url):
    """Recursively crawl starting from *start_url*.  Returns a set of
    urls that were found."""
    pool = eventlet.GreenPool()
    seen = set()
    fetch(start_url, seen, pool)
    pool.waitall()
    return seen

def recursive_crawler():
    seen = crawl('http://www.baidu.com')
    print("I saw these urls:")
    print("\n".join(seen))
######################################################################################################
"""A simple web server that accepts POSTS containing a list of feed urls,
and returns the titles of those feeds.
"""
feedparser = eventlet.import_patched('feedparser')

# the pool provides a safety limit on our concurrency
pool = eventlet.GreenPool()

def fetch_title(url):
    d = feedparser.parse(url)
    return d.feed.get('title', '')


def app(environ, start_response):
    if environ['REQUEST_METHOD'] != 'POST':
        start_response('403 Forbidden', [])
        return []

    # the pile collects the result of a concurrent operation -- in this case,
    # the collection of feed titles

    for line in environ['wsgi.input'].readlines():
        url = line.strip()
        if url:
            pile.spawn(fetch_title, url)
    # since the pile is an iterator over the results,
    # you can use it in all sorts of great Pythonic ways
    titles = '\n'.join(pile)
    start_response('200 OK', [('Content-type', 'text/plain')])
    return [titles]

def feedscraper():
    from eventlet import wsgi
    wsgi.server(eventlet.listen(('localhost', 9010)), app)
######################################################################################################
'''
This is a little different from the echo server, in that it broadcasts the messages to all participants, not just the sender.
'''
PORT = 3001
participants = set()

def read_chat_forever(writer, reader):
    line = reader.readline()
    while line:
        print("Chat:", line.strip())
        for p in participants:
            try:
                if p is not writer:  # Don't echo
                    p.write(line)
                    p.flush()
            except socket.error as e:
                # ignore broken pipes, they just mean the participant
                # closed its connection already
                if e[0] != 32:
                    raise
        line = reader.readline()
    participants.remove(writer)
    print("Participant left chat.")

def chat_server():
    try:
        print("ChatServer starting up on port %s" % PORT)
        server = eventlet.listen(('0.0.0.0', PORT))
        while True:
            new_connection, address = server.accept()
            print("Participant joined chat. address =",address)
            new_writer = new_connection.makefile('w')
            participants.add(new_writer)
            eventlet.spawn_n(read_chat_forever,
                             new_writer,
                             new_connection.makefile('r'))
    except (KeyboardInterrupt, SystemExit):
        print("ChatServer exiting.")
######################################################################################################
"""Spawn multiple workers and collect their results.

Demonstrates how to use the eventlet.green.socket module.
"""
def geturl(url):
    c = socket.socket()
    ip = socket.gethostbyname(url)
    print('url ',url,', ip =',ip)
    c.connect((ip, 80))
    print('%s connected' % url)
    c.sendall('GET /\r\n\r\n')
    return c.recv(1024)

def socketconnect():
    urls = ['www.baidu.com', 'www.sohu.com', 'www.sina.com.cn']
    pile = eventlet.GreenPile()
    print('pile =',pile)
    for x in urls:
        pile.spawn(geturl, x)

    # note that the pile acts as a collection of return values from the functions
    # if any exceptions are raised by the function they'll get raised here
    for url, result in zip(urls, pile):
        print('%s: %s' % (url, repr(result)[:50]))
        #print('%s: %s' % (url,result))
######################################################################################################
"""\
Simple server that listens on port 6000 and echos back every input to
the client.  To try out the server, start it up by running this file.

Connect to it with:
  telnet localhost 6000

You terminate your connection by terminating telnet (typically Ctrl-]
and then 'quit')
"""
def handle(fd):
    print("client connected")
    while True:
        # pass through every non-eof line
        x = fd.readline()
        if not x:
            break
        fd.write(x)
        fd.flush()
        print("echoed", x)
    print("client disconnected")

def echoserver():
    print("server socket listening on port 6000")
    server = eventlet.listen(('0.0.0.0', 6000))
    print('server =',server)
    pool = eventlet.GreenPool()
    while True:
        try:
            new_sock, address = server.accept()
            print("accepted", address)
            pool.spawn_n(handle, new_sock.makefile('rw'))
        except (SystemExit, KeyboardInterrupt):
            break
######################################################################################################
"""This is a simple example of running a wsgi application with eventlet.
For a more fully-featured server which supports multiple processes,
multiple threads, and graceful code reloading, see:

http://pypi.python.org/pypi/Spawning/
"""
def hello_world(env, start_response):
    if env['PATH_INFO'] != '/':
        start_response('404 Not Found', [('Content-Type', 'text/plain')])
        return ['Not Found\r\n']
    start_response('200 OK', [('Content-Type', 'text/plain')])
    return ['Hello, World!\r\n']

def wsgi_example():
    wsgi.server(eventlet.listen(('',8089)),hello_world)
######################################################################################################
"""
This is a simple web "crawler" that fetches a bunch of urls using a pool to
control the number of outbound connections. It has as many simultaneously open
connections as coroutines in the pool.

The prints in the body of the fetch function are there to demonstrate that the
requests are truly made in parallel.
"""
def fetch_url(url):
    print('openning ',url)
    time.sleep(2)
    body=urllib2.urlopen(url).read()
    #time.sleep(random.randint(1,4))
    print('done with ',url)
    return url,body

def webcrawler():
    urls=['http://photocdn.sohu.com/20160903/Img467469434.jpeg',\
        'http://photocdn.sohu.com/20160903/Img467469428.jpeg',
        'http://photocdn.sohu.com/20160903/Img467451129.jpeg',
        'http://img.mp.itc.cn/upload/20160902/c7613eff56a74591b8a595e214692cb6_th.jpeg']
    pool=eventlet.GreenPool(200)
    print '--------------for------------'
    for url,body in pool.imap(fetch_url,urls):
        print('got body from',url,' of length',len(body))

######################################################################################################

if __name__=='__main__':
    print '-------------------eventlet examples:-------------------'
    #webcrawler()
    #wsgi_example()
    #echoserver()
    #socketconnect()
    #chat_server()
    #feedscraper()
    #recursive_crawler()
    #producer_consumer()
    #websocket()
    websocket_chat()
