From 3d26d021d2666b55f4239f3b467e430ca936f182 Mon Sep 17 00:00:00 2001 From: Rohith Gilla Date: Thu, 3 May 2018 15:29:34 +0530 Subject: [PATCH] Adding xkcd scrapper and cricket notifications --- python2.7/CricketNotifications.py | 93 +++++++++++++++++++++++++++++++ python2.7/xkcd.py | 28 ++++++++++ 2 files changed, 121 insertions(+) create mode 100755 python2.7/CricketNotifications.py create mode 100755 python2.7/xkcd.py diff --git a/python2.7/CricketNotifications.py b/python2.7/CricketNotifications.py new file mode 100755 index 0000000..7571ecf --- /dev/null +++ b/python2.7/CricketNotifications.py @@ -0,0 +1,93 @@ +import requests +from bs4 import BeautifulSoup +import time +import notify2 +notify2.init("CricBuzz") +n=notify2.Notification(None,icon="/home/rohith-gilla/icon.png") +n.set_urgency(notify2.URGENCY_NORMAL) +n.set_timeout(1000) +url="http://synd.cricbuzz.com/j2me/1.0/livematches.xml" +r=requests.get(url) +soup=BeautifulSoup(r.content,'html.parser') +temp=soup.find_all('match') +series_names=[] +datapath=[] +for i in temp: + i=str(i) + i=i.split('datapath')[1] + i=i.split('"') + datapath.append(i[1]) + series_names.append(i[13]) +for i in range(len(datapath)): + print str(i+1)+")"+series_names[i] +ip=input("Enter the match number: ") +datapath=datapath[ip-1] +series_names=series_names[ip-1] +com_url=datapath+"commentary.xml" +old_overs="0" +counter=0; +loop_v=0 +while(loop_v==0): + r=requests.get(com_url) + soup=BeautifulSoup(r.content,'html.parser') + try: + temp=soup.find('c') + comm=str(temp) + comm=comm.replace('","") + test=comm + except: + counter+=1 + pass + temp=str(soup.find_all('mscr')) + bat_tem=temp.split('sname="')[1].split('"')[0] + runs=temp.split('r="')[3] + runs=runs.split('"')[0] + wickets=temp.split('wkts="')[1] + wickets=wickets.split('"')[0] + overs=temp.split('ovrs') + overs=overs[1] + overs=overs.split('"')[1] + batsman=temp.split('btsmn') + try: + bat1=batsman[1].split('sname="')[1].split('"')[0] + r1=batsman[1].split('r="')[1].split('"')[0] + except: + counter+=1 + pass + try: + bat2=batsman[3].split('sname="')[1].split('"')[0] + r2=batsman[3].split('r="')[1].split('"')[0] + except: + counter+=1 + pass + # print "____"*20 + n.update(bat_tem+":"+runs + "/"+wickets + " Overs : "+str(overs)) + if(old_overs ==overs): + pass + else: + print "____"*20 + print comm + print "Score :"+runs + "/"+wickets + print "Overs :"+str(overs) + try: + print bat1+" :"+r1," "+bat2+" :"+r2 + n.update(bat_tem+" :"+runs + "/"+wickets + "Overs : "+str(overs),bat1+" :"+r1+" "+bat2+" :"+r2) + except: + pass + old_overs=overs + n.show() + if(int(wickets)==10): + loop_v=1 + time.sleep(15) +# print datapath,series_names + + + + + + + + +# print datapath +# print series_names diff --git a/python2.7/xkcd.py b/python2.7/xkcd.py new file mode 100755 index 0000000..d08a8ba --- /dev/null +++ b/python2.7/xkcd.py @@ -0,0 +1,28 @@ +import re +import requests +from bs4 import BeautifulSoup +import random +import os +l=[] #For storing random numbers +def download_img(data,filename): #Function to download images + if (os.path.isdir('XKCD')): #Asserts for existence + pass + else: + os.mkdir('XKCD') #If false create a folder + op_file=open('XKCD/'+filename,'wb') + op_file.write(data) #Download off + print "Downloaded",filename +choice=input("How many random images you want to download?? xD \n") +for i in range(choice): + l.append(str(random.randint(1,1933))) #Last comic till date is 1933 +for i in l: + url="https://xkcd.com/"+str(i)+"/" + r=requests.get(url) + soup=BeautifulSoup(r.content,'html.parser') + filename=str(soup.select('#ctitle')).split('">') + filename=filename[1].split('<') + filename=filename[0] #Getting filename using string manip + img_url=soup.select('#comic') + img_url=str(img_url).split('src=')[1] + img_url='https:'+img_url.split('"')[1] + download_img(requests.get(img_url).content,filename+'.png') #Caling the func i times