import pandas as pd
import json
import os
import imp
from IPython.display import Image
# !pip install twitter
# import twitter
# The first time one accesses the page http://twitter.com/apps/new, one is requested to sign in.
Image("pngs/apptwitter_new.png")
# First, one need to log in her Twitter account (assuming that one already possesses it).
Image("pngs/login_to_twitter.png")
# First, one should create a new application (or use an App already created).
Image("pngs/create_app.png")
# One always has the option to creat a new App (if needed).
Image("pngs/select_app.png")
# After creating/accessing an App, one should click the link "Keys and Access Tokens."
Image("pngs/app_created.png")
# Next, to access tokens, one should click "Token Actions > Create my access token."
Image("pngs/create_tokens_pass.png")
# To obtain tokens, one should click "Test OAuth."
Image("pngs/tokens_created.png")
# Thus, one retrieves her OAuth settings.
Image("pngs/token_pass_final.png")
input_dir='/home/mab/github_repos/TwitterMining'
output_dir='/home/mab/Desktop/twitTemp'
cred_dic=None
# cred_dic='/home/mab/github_repos/TwitterMining/credentials/auth_cred.txt'
# cred_dic='/media/sergios-len/Elements/Brighton_workshop/auth_cred.txt'
pp= !pwd
os.chdir(input_dir)
from test_class_tpa import create_df
import collect_tweets_notebook as ctn
os.chdir(pp[0])
def create_beaker_com_dict(sps):
nsps={}
for k,v in sps.items():
nsps[k]=[]
if k=='date_split':
for kk in sorted(v.keys()):
nsps[k].append(v[kk].strftime('%Y%m%d'))
else:
for kk in sorted(v.keys()):
nsps[k].append(v[kk])
return nsps
vv=ctn.UserAuth(auth_file=cred_dic)
vv.login()
vv.check_login()
twi_api=vv.get_auth()
search_term='#France - Paris'
sea=ctn.TwitterSearch(twi_api,search_text=search_term,working_path=output_dir,out_file_dir=None,
max_pages=10,results_per_page=100,sin_id=None,max_id=None,verbose=True)
sea.streamsearch()
columnss=['id','user_id','username','created_at','language','hashtag_count','retweet_count','mention_count',
'statuses_count','followers_count','friends_count','listed_count','videos_count','photos_count',
'undef_count','coordinates','bounding','place','hashtags','mentions','text']
for i in columnss:
print i
# json_dir='/home/mab/Desktop/twitTemp/Output'
# selt='rr'
# outname=''
# pdf,httoadds=create_df(json_dir,selt,outname,multihas=False,r_or_p='python')
# pdf=pdf[columnss]
# print pdf.head(3)
# Accessing the columns of a Tweet in the Data Frame from the id of the Tweet:
# some_id='736628805335420928'
# ppd=pdf[pdf['id']==some_id]
# tx=ppd.text.tolist()[0]
# screen_name=ppd.username.tolist()[0]
# idt=ppd.id.tolist()[0]
# print tx
# link=' https://twitter.com/%s/status/%s ' %(screen_name,idt)
# link
Image("pngs/twitter_search.png")
Image("pngs/twitter_search_result.png")