#!/usr/bin/python
# coding=utf-8
import httplib, urllib, json, codecs

# Gets the latests tweets matching the query
def get_tweets(query,count,rpp=100):
  tweets = []
  rpp = 100 if count > 100 else count
  search_params = '?'+urllib.urlencode({'q':query, 'rpp':rpp})
  while len(tweets) < count:
    search_results = search_tweets_json(search_params)
    try:
      tweets += search_results['results']
      search_params = search_results['next_page']
    except:
      if 'error' in search_results:
        print "ERROR: %s" % search_results['error']
      else:
        print "Only %d tweets were found." % (len(tweets))
      break
    print "%.2d%% - %d/%d tweets downloaded." % (len(tweets)*100/count, len(tweets), count)
  return tweets

# Makes a GET http request to get the tweets
def search_tweets_json(params):
  conn = httplib.HTTPConnection('search.twitter.com',80)
  conn.request('GET','/search.json%s' % params)
  resp_json = conn.getresponse().read()
  conn.close()
  return json.loads(resp_json)

# Downloads tweets and save them to a file
def scrape_tweets(filename, query, count):
  f=open(filename,'w')
  tweets = get_tweets(query,count)
  f.write(json.dumps(tweets))
  f.close()

# Loads tweets from a file
def load_tweets(filename):
  f=codecs.open(filename,encoding='utf-8',mode='r')
  tweets = json.load(f)
  f.close()
  return tweets
 
if __name__ == '__main__':
  import sys

  def usage():
    print 'Usage:',sys.argv[0], "<output>", "<query>", "<count>"

  if len(sys.argv) != 4:
    usage()
    exit()
  
  (output, query, count) = sys.argv[1:]
  count = int(count)

  print 'Searching up to %d tweets containing "%s"' % (count, query)

  scrape_tweets(output,query,count)
