commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
4d30756e722cafa40fa449e48c967eeebc58500a | Add a manage.py command to import realm filters | vabs22/zulip,calvinleenyc/zulip,sup95/zulip,deer-hope/zulip,kokoar/zulip,ryanbackman/zulip,ahmadassaf/zulip,akuseru/zulip,shaunstanislaus/zulip,Gabriel0402/zulip,souravbadami/zulip,JanzTam/zulip,ufosky-server/zulip,noroot/zulip,amallia/zulip,vikas-parashar/zulip,kaiyuanheshang/zulip,synicalsyntax/zulip,ashwinirudrappa/zulip,aps-sids/zulip,voidException/zulip,krtkmj/zulip,huangkebo/zulip,sonali0901/zulip,ahmadassaf/zulip,zhaoweigg/zulip,arpith/zulip,PaulPetring/zulip,rishig/zulip,willingc/zulip,willingc/zulip,sonali0901/zulip,LeeRisk/zulip,ryansnowboarder/zulip,dhcrzf/zulip,wangdeshui/zulip,ahmadassaf/zulip,zulip/zulip,zacps/zulip,so0k/zulip,jessedhillon/zulip,swinghu/zulip,verma-varsha/zulip,noroot/zulip,isht3/zulip,dwrpayne/zulip,Vallher/zulip,blaze225/zulip,eastlhu/zulip,eeshangarg/zulip,eastlhu/zulip,m1ssou/zulip,rht/zulip,ericzhou2008/zulip,hj3938/zulip,easyfmxu/zulip,amanharitsh123/zulip,Qgap/zulip,synicalsyntax/zulip,babbage/zulip,joshisa/zulip,Diptanshu8/zulip,lfranchi/zulip,itnihao/zulip,xuxiao/zulip,tommyip/zulip,tbutter/zulip,JPJPJPOPOP/zulip,andersk/zulip,synicalsyntax/zulip,Vallher/zulip,eeshangarg/zulip,zofuthan/zulip,esander91/zulip,bastianh/zulip,hackerkid/zulip,showell/zulip,johnnygaddarr/zulip,christi3k/zulip,amallia/zulip,praveenaki/zulip,jimmy54/zulip,cosmicAsymmetry/zulip,dxq-git/zulip,arpitpanwar/zulip,kokoar/zulip,glovebx/zulip,calvinleenyc/zulip,shubhamdhama/zulip,saitodisse/zulip,aliceriot/zulip,mahim97/zulip,levixie/zulip,Diptanshu8/zulip,ashwinirudrappa/zulip,reyha/zulip,sonali0901/zulip,amallia/zulip,arpith/zulip,umkay/zulip,jphilipsen05/zulip,tdr130/zulip,dotcool/zulip,vakila/zulip,itnihao/zulip,punchagan/zulip,grave-w-grave/zulip,arpitpanwar/zulip,yocome/zulip,jainayush975/zulip,fw1121/zulip,yocome/zulip,EasonYi/zulip,dotcool/zulip,souravbadami/zulip,shrikrishnaholla/zulip,udxxabp/zulip,wavelets/zulip,seapasulli/zulip,susansls/zulip,bluesea/zulip,aps-sids/zulip,xuxiao/zulip,amyliu345/zulip,arpitpanwar/zulip,he15his/zulip,hustlzp/zulip,christi3k/zulip,EasonYi/zulip,proliming/zulip,hayderimran7/zulip,wdaher/zulip,susansls/zulip,tdr130/zulip,MariaFaBella85/zulip,technicalpickles/zulip,LeeRisk/zulip,yuvipanda/zulip,mdavid/zulip,tommyip/zulip,natanovia/zulip,Juanvulcano/zulip,swinghu/zulip,cosmicAsymmetry/zulip,he15his/zulip,punchagan/zulip,jrowan/zulip,amallia/zulip,blaze225/zulip,MayB/zulip,noroot/zulip,Drooids/zulip,blaze225/zulip,rht/zulip,adnanh/zulip,adnanh/zulip,babbage/zulip,schatt/zulip,peiwei/zulip,yuvipanda/zulip,avastu/zulip,calvinleenyc/zulip,wdaher/zulip,zachallaun/zulip,DazWorrall/zulip,dawran6/zulip,he15his/zulip,ipernet/zulip,krtkmj/zulip,amanharitsh123/zulip,zofuthan/zulip,aakash-cr7/zulip,stamhe/zulip,karamcnair/zulip,so0k/zulip,codeKonami/zulip,seapasulli/zulip,pradiptad/zulip,tiansiyuan/zulip,mahim97/zulip,paxapy/zulip,hustlzp/zulip,pradiptad/zulip,KJin99/zulip,luyifan/zulip,mansilladev/zulip,atomic-labs/zulip,ryansnowboarder/zulip,noroot/zulip,tommyip/zulip,atomic-labs/zulip,JPJPJPOPOP/zulip,easyfmxu/zulip,proliming/zulip,ryanbackman/zulip,suxinde2009/zulip,joshisa/zulip,lfranchi/zulip,rht/zulip,karamcnair/zulip,tiansiyuan/zulip,tiansiyuan/zulip,rishig/zulip,kaiyuanheshang/zulip,bowlofstew/zulip,tiansiyuan/zulip,praveenaki/zulip,shubhamdhama/zulip,ipernet/zulip,schatt/zulip,tbutter/zulip,Batterfii/zulip,babbage/zulip,hafeez3000/zulip,MayB/zulip,willingc/zulip,nicholasbs/zulip,krtkmj/zulip,firstblade/zulip,jessedhillon/zulip,synicalsyntax/zulip,bluesea/zulip,hj3938/zulip,bitemyapp/zulip,KingxBanana/zulip,hayderimran7/zulip,aakash-cr7/zulip,yuvipanda/zulip,reyha/zulip,itnihao/zulip,tdr130/zulip,vabs22/zulip,Batterfii/zulip,bastianh/zulip,aliceriot/zulip,ryansnowboarder/zulip,Frouk/zulip,tommyip/zulip,thomasboyt/zulip,mansilladev/zulip,punchagan/zulip,zachallaun/zulip,reyha/zulip,DazWorrall/zulip,zacps/zulip,ikasumiwt/zulip,brockwhittaker/zulip,amyliu345/zulip,hengqujushi/zulip,showell/zulip,jeffcao/zulip,Juanvulcano/zulip,gigawhitlocks/zulip,MariaFaBella85/zulip,glovebx/zulip,aps-sids/zulip,luyifan/zulip,peiwei/zulip,praveenaki/zulip,xuxiao/zulip,showell/zulip,saitodisse/zulip,j831/zulip,hengqujushi/zulip,armooo/zulip,tommyip/zulip,kaiyuanheshang/zulip,joshisa/zulip,rishig/zulip,voidException/zulip,LeeRisk/zulip,alliejones/zulip,akuseru/zulip,tdr130/zulip,wweiradio/zulip,bowlofstew/zulip,ahmadassaf/zulip,xuxiao/zulip,codeKonami/zulip,bowlofstew/zulip,firstblade/zulip,thomasboyt/zulip,KingxBanana/zulip,sup95/zulip,kou/zulip,he15his/zulip,wavelets/zulip,Qgap/zulip,kou/zulip,tommyip/zulip,j831/zulip,thomasboyt/zulip,swinghu/zulip,themass/zulip,dotcool/zulip,MayB/zulip,levixie/zulip,verma-varsha/zulip,hustlzp/zulip,luyifan/zulip,johnny9/zulip,codeKonami/zulip,voidException/zulip,moria/zulip,firstblade/zulip,RobotCaleb/zulip,j831/zulip,jainayush975/zulip,amyliu345/zulip,hackerkid/zulip,mansilladev/zulip,gkotian/zulip,qq1012803704/zulip,fw1121/zulip,jackrzhang/zulip,eeshangarg/zulip,susansls/zulip,Batterfii/zulip,amanharitsh123/zulip,karamcnair/zulip,kou/zulip,themass/zulip,shrikrishnaholla/zulip,jrowan/zulip,dawran6/zulip,dattatreya303/zulip,pradiptad/zulip,joyhchen/zulip,zulip/zulip,saitodisse/zulip,PhilSk/zulip,Suninus/zulip,armooo/zulip,zwily/zulip,hengqujushi/zulip,vabs22/zulip,nicholasbs/zulip,Vallher/zulip,Cheppers/zulip,joyhchen/zulip,RobotCaleb/zulip,RobotCaleb/zulip,so0k/zulip,johnny9/zulip,Juanvulcano/zulip,zofuthan/zulip,akuseru/zulip,yocome/zulip,jerryge/zulip,zhaoweigg/zulip,MayB/zulip,littledogboy/zulip,Frouk/zulip,jrowan/zulip,zorojean/zulip,rishig/zulip,andersk/zulip,wavelets/zulip,tommyip/zulip,gigawhitlocks/zulip,bastianh/zulip,susansls/zulip,eeshangarg/zulip,glovebx/zulip,schatt/zulip,amanharitsh123/zulip,Galexrt/zulip,yocome/zulip,gigawhitlocks/zulip,jeffcao/zulip,Suninus/zulip,hackerkid/zulip,esander91/zulip,tiansiyuan/zulip,huangkebo/zulip,avastu/zulip,eeshangarg/zulip,m1ssou/zulip,niftynei/zulip,timabbott/zulip,christi3k/zulip,arpith/zulip,cosmicAsymmetry/zulip,schatt/zulip,dawran6/zulip,jainayush975/zulip,lfranchi/zulip,TigorC/zulip,zofuthan/zulip,mansilladev/zulip,niftynei/zulip,KJin99/zulip,j831/zulip,seapasulli/zulip,stamhe/zulip,krtkmj/zulip,adnanh/zulip,jeffcao/zulip,vaidap/zulip,isht3/zulip,luyifan/zulip,punchagan/zulip,glovebx/zulip,tbutter/zulip,vakila/zulip,verma-varsha/zulip,swinghu/zulip,JPJPJPOPOP/zulip,niftynei/zulip,so0k/zulip,cosmicAsymmetry/zulip,dotcool/zulip,jonesgithub/zulip,ApsOps/zulip,themass/zulip,Galexrt/zulip,jessedhillon/zulip,hustlzp/zulip,ufosky-server/zulip,sharmaeklavya2/zulip,xuanhan863/zulip,joyhchen/zulip,zwily/zulip,kaiyuanheshang/zulip,ryanbackman/zulip,timabbott/zulip,dxq-git/zulip,easyfmxu/zulip,shrikrishnaholla/zulip,developerfm/zulip,isht3/zulip,dotcool/zulip,eeshangarg/zulip,jackrzhang/zulip,ahmadassaf/zulip,brainwane/zulip,aakash-cr7/zulip,jimmy54/zulip,wweiradio/zulip,tbutter/zulip,timabbott/zulip,deer-hope/zulip,swinghu/zulip,bastianh/zulip,dwrpayne/zulip,Vallher/zulip,Suninus/zulip,sup95/zulip,wdaher/zulip,arpith/zulip,alliejones/zulip,samatdav/zulip,verma-varsha/zulip,punchagan/zulip,fw1121/zulip,gigawhitlocks/zulip,rishig/zulip,developerfm/zulip,JPJPJPOPOP/zulip,ahmadassaf/zulip,Gabriel0402/zulip,synicalsyntax/zulip,vakila/zulip,babbage/zulip,ericzhou2008/zulip,jainayush975/zulip,bssrdf/zulip,Frouk/zulip,dotcool/zulip,dwrpayne/zulip,wavelets/zulip,esander91/zulip,mdavid/zulip,dxq-git/zulip,showell/zulip,kokoar/zulip,jerryge/zulip,vikas-parashar/zulip,calvinleenyc/zulip,KJin99/zulip,dxq-git/zulip,armooo/zulip,johnny9/zulip,hackerkid/zulip,samatdav/zulip,m1ssou/zulip,so0k/zulip,sonali0901/zulip,ryanbackman/zulip,Jianchun1/zulip,akuseru/zulip,saitodisse/zulip,grave-w-grave/zulip,littledogboy/zulip,guiquanz/zulip,bssrdf/zulip,MayB/zulip,umkay/zulip,fw1121/zulip,ryanbackman/zulip,dhcrzf/zulip,yocome/zulip,jphilipsen05/zulip,eeshangarg/zulip,zacps/zulip,MayB/zulip,Drooids/zulip,schatt/zulip,amyliu345/zulip,ApsOps/zulip,suxinde2009/zulip,zacps/zulip,praveenaki/zulip,vaidap/zulip,esander91/zulip,mdavid/zulip,Suninus/zulip,hackerkid/zulip,wdaher/zulip,ericzhou2008/zulip,sharmaeklavya2/zulip,ipernet/zulip,SmartPeople/zulip,vaidap/zulip,j831/zulip,PhilSk/zulip,saitodisse/zulip,DazWorrall/zulip,Batterfii/zulip,willingc/zulip,wangdeshui/zulip,esander91/zulip,KJin99/zulip,firstblade/zulip,Gabriel0402/zulip,dattatreya303/zulip,EasonYi/zulip,amyliu345/zulip,stamhe/zulip,Diptanshu8/zulip,Gabriel0402/zulip,ericzhou2008/zulip,atomic-labs/zulip,DazWorrall/zulip,umkay/zulip,alliejones/zulip,vakila/zulip,amallia/zulip,so0k/zulip,LAndreas/zulip,dhcrzf/zulip,LeeRisk/zulip,ashwinirudrappa/zulip,mdavid/zulip,technicalpickles/zulip,praveenaki/zulip,joyhchen/zulip,eastlhu/zulip,saitodisse/zulip,ipernet/zulip,Drooids/zulip,arpitpanwar/zulip,shubhamdhama/zulip,ipernet/zulip,shubhamdhama/zulip,wangdeshui/zulip,fw1121/zulip,Gabriel0402/zulip,Cheppers/zulip,umkay/zulip,krtkmj/zulip,lfranchi/zulip,suxinde2009/zulip,schatt/zulip,shubhamdhama/zulip,Qgap/zulip,jerryge/zulip,bowlofstew/zulip,jrowan/zulip,vabs22/zulip,jainayush975/zulip,AZtheAsian/zulip,TigorC/zulip,RobotCaleb/zulip,paxapy/zulip,brockwhittaker/zulip,gkotian/zulip,avastu/zulip,vakila/zulip,punchagan/zulip,seapasulli/zulip,ikasumiwt/zulip,susansls/zulip,ashwinirudrappa/zulip,natanovia/zulip,sonali0901/zulip,codeKonami/zulip,MariaFaBella85/zulip,Qgap/zulip,MariaFaBella85/zulip,zachallaun/zulip,jimmy54/zulip,stamhe/zulip,hj3938/zulip,DazWorrall/zulip,vikas-parashar/zulip,andersk/zulip,natanovia/zulip,mansilladev/zulip,udxxabp/zulip,dhcrzf/zulip,LAndreas/zulip,voidException/zulip,littledogboy/zulip,bssrdf/zulip,peguin40/zulip,wavelets/zulip,dxq-git/zulip,bastianh/zulip,Vallher/zulip,pradiptad/zulip,alliejones/zulip,alliejones/zulip,aliceriot/zulip,jessedhillon/zulip,zachallaun/zulip,natanovia/zulip,hafeez3000/zulip,akuseru/zulip,LAndreas/zulip,udxxabp/zulip,brainwane/zulip,littledogboy/zulip,ahmadassaf/zulip,m1ssou/zulip,souravbadami/zulip,vakila/zulip,umkay/zulip,samatdav/zulip,MariaFaBella85/zulip,dwrpayne/zulip,itnihao/zulip,PhilSk/zulip,synicalsyntax/zulip,zhaoweigg/zulip,codeKonami/zulip,gkotian/zulip,bssrdf/zulip,jphilipsen05/zulip,johnnygaddarr/zulip,bastianh/zulip,LAndreas/zulip,zhaoweigg/zulip,zacps/zulip,suxinde2009/zulip,swinghu/zulip,zwily/zulip,natanovia/zulip,TigorC/zulip,ApsOps/zulip,yocome/zulip,jonesgithub/zulip,ufosky-server/zulip,jessedhillon/zulip,zwily/zulip,tdr130/zulip,Cheppers/zulip,avastu/zulip,udxxabp/zulip,he15his/zulip,JanzTam/zulip,gkotian/zulip,easyfmxu/zulip,TigorC/zulip,aakash-cr7/zulip,nicholasbs/zulip,deer-hope/zulip,dawran6/zulip,noroot/zulip,Diptanshu8/zulip,Frouk/zulip,zorojean/zulip,sharmaeklavya2/zulip,saitodisse/zulip,timabbott/zulip,LeeRisk/zulip,peguin40/zulip,jessedhillon/zulip,KJin99/zulip,alliejones/zulip,atomic-labs/zulip,shubhamdhama/zulip,samatdav/zulip,krtkmj/zulip,kaiyuanheshang/zulip,themass/zulip,Qgap/zulip,johnny9/zulip,nicholasbs/zulip,moria/zulip,Galexrt/zulip,Suninus/zulip,ipernet/zulip,technicalpickles/zulip,vabs22/zulip,bitemyapp/zulip,praveenaki/zulip,joyhchen/zulip,avastu/zulip,qq1012803704/zulip,rishig/zulip,ufosky-server/zulip,proliming/zulip,mdavid/zulip,jackrzhang/zulip,jimmy54/zulip,jeffcao/zulip,ipernet/zulip,m1ssou/zulip,PhilSk/zulip,Cheppers/zulip,Cheppers/zulip,PaulPetring/zulip,tiansiyuan/zulip,jerryge/zulip,qq1012803704/zulip,nicholasbs/zulip,bowlofstew/zulip,shrikrishnaholla/zulip,ApsOps/zulip,zofuthan/zulip,bssrdf/zulip,Cheppers/zulip,AZtheAsian/zulip,vabs22/zulip,dwrpayne/zulip,calvinleenyc/zulip,wangdeshui/zulip,hengqujushi/zulip,mohsenSy/zulip,huangkebo/zulip,mahim97/zulip,bitemyapp/zulip,kou/zulip,zachallaun/zulip,lfranchi/zulip,mdavid/zulip,he15his/zulip,Frouk/zulip,johnnygaddarr/zulip,zorojean/zulip,arpitpanwar/zulip,showell/zulip,gkotian/zulip,zorojean/zulip,sharmaeklavya2/zulip,Jianchun1/zulip,yocome/zulip,babbage/zulip,krtkmj/zulip,nicholasbs/zulip,Vallher/zulip,stamhe/zulip,mohsenSy/zulip,Suninus/zulip,ryansnowboarder/zulip,huangkebo/zulip,adnanh/zulip,jerryge/zulip,wweiradio/zulip,amyliu345/zulip,xuxiao/zulip,hackerkid/zulip,shrikrishnaholla/zulip,LAndreas/zulip,itnihao/zulip,glovebx/zulip,seapasulli/zulip,RobotCaleb/zulip,vaidap/zulip,joshisa/zulip,samatdav/zulip,karamcnair/zulip,wangdeshui/zulip,johnny9/zulip,sharmaeklavya2/zulip,avastu/zulip,rht/zulip,johnnygaddarr/zulip,technicalpickles/zulip,xuanhan863/zulip,arpith/zulip,shrikrishnaholla/zulip,aakash-cr7/zulip,dnmfarrell/zulip,moria/zulip,seapasulli/zulip,proliming/zulip,sup95/zulip,arpitpanwar/zulip,tdr130/zulip,jackrzhang/zulip,xuanhan863/zulip,aliceriot/zulip,bitemyapp/zulip,bitemyapp/zulip,synicalsyntax/zulip,vikas-parashar/zulip,isht3/zulip,wweiradio/zulip,pradiptad/zulip,codeKonami/zulip,wavelets/zulip,lfranchi/zulip,shaunstanislaus/zulip,sharmaeklavya2/zulip,souravbadami/zulip,grave-w-grave/zulip,udxxabp/zulip,natanovia/zulip,armooo/zulip,aps-sids/zulip,tdr130/zulip,LAndreas/zulip,johnny9/zulip,ufosky-server/zulip,ericzhou2008/zulip,jackrzhang/zulip,zorojean/zulip,vakila/zulip,JanzTam/zulip,mohsenSy/zulip,dattatreya303/zulip,PaulPetring/zulip,deer-hope/zulip,willingc/zulip,qq1012803704/zulip,ikasumiwt/zulip,jonesgithub/zulip,blaze225/zulip,KingxBanana/zulip,firstblade/zulip,shaunstanislaus/zulip,hayderimran7/zulip,joyhchen/zulip,ApsOps/zulip,tbutter/zulip,fw1121/zulip,hj3938/zulip,wangdeshui/zulip,thomasboyt/zulip,hustlzp/zulip,peiwei/zulip,cosmicAsymmetry/zulip,ashwinirudrappa/zulip,wdaher/zulip,Drooids/zulip,Qgap/zulip,wweiradio/zulip,yuvipanda/zulip,Juanvulcano/zulip,zofuthan/zulip,KJin99/zulip,paxapy/zulip,niftynei/zulip,Jianchun1/zulip,brainwane/zulip,aliceriot/zulip,KingxBanana/zulip,suxinde2009/zulip,EasonYi/zulip,jessedhillon/zulip,dhcrzf/zulip,levixie/zulip,nicholasbs/zulip,proliming/zulip,brainwane/zulip,amallia/zulip,tbutter/zulip,shubhamdhama/zulip,brockwhittaker/zulip,proliming/zulip,hackerkid/zulip,bitemyapp/zulip,KingxBanana/zulip,ApsOps/zulip,zhaoweigg/zulip,guiquanz/zulip,wdaher/zulip,pradiptad/zulip,umkay/zulip,qq1012803704/zulip,Suninus/zulip,hafeez3000/zulip,dotcool/zulip,mansilladev/zulip,moria/zulip,mohsenSy/zulip,zhaoweigg/zulip,jerryge/zulip,babbage/zulip,hayderimran7/zulip,PaulPetring/zulip,jimmy54/zulip,peiwei/zulip,SmartPeople/zulip,verma-varsha/zulip,Jianchun1/zulip,grave-w-grave/zulip,Batterfii/zulip,ikasumiwt/zulip,AZtheAsian/zulip,akuseru/zulip,peguin40/zulip,armooo/zulip,xuanhan863/zulip,AZtheAsian/zulip,andersk/zulip,ryanbackman/zulip,moria/zulip,ericzhou2008/zulip,SmartPeople/zulip,reyha/zulip,MariaFaBella85/zulip,timabbott/zulip,schatt/zulip,zulip/zulip,Drooids/zulip,karamcnair/zulip,noroot/zulip,easyfmxu/zulip,qq1012803704/zulip,sonali0901/zulip,jrowan/zulip,bssrdf/zulip,LeeRisk/zulip,armooo/zulip,KJin99/zulip,Juanvulcano/zulip,timabbott/zulip,dattatreya303/zulip,praveenaki/zulip,xuanhan863/zulip,zofuthan/zulip,atomic-labs/zulip,rht/zulip,hengqujushi/zulip,m1ssou/zulip,qq1012803704/zulip,EasonYi/zulip,hayderimran7/zulip,brainwane/zulip,j831/zulip,kou/zulip,huangkebo/zulip,TigorC/zulip,dxq-git/zulip,samatdav/zulip,ikasumiwt/zulip,ikasumiwt/zulip,SmartPeople/zulip,alliejones/zulip,JanzTam/zulip,Batterfii/zulip,DazWorrall/zulip,AZtheAsian/zulip,ikasumiwt/zulip,johnnygaddarr/zulip,brockwhittaker/zulip,bluesea/zulip,ryansnowboarder/zulip,jonesgithub/zulip,JPJPJPOPOP/zulip,eastlhu/zulip,eastlhu/zulip,gigawhitlocks/zulip,zwily/zulip,armooo/zulip,mahim97/zulip,jimmy54/zulip,reyha/zulip,JanzTam/zulip,tiansiyuan/zulip,dhcrzf/zulip,KingxBanana/zulip,jonesgithub/zulip,developerfm/zulip,dnmfarrell/zulip,ericzhou2008/zulip,christi3k/zulip,eastlhu/zulip,kokoar/zulip,hj3938/zulip,dnmfarrell/zulip,deer-hope/zulip,verma-varsha/zulip,zorojean/zulip,peiwei/zulip,cosmicAsymmetry/zulip,bluesea/zulip,guiquanz/zulip,umkay/zulip,RobotCaleb/zulip,hengqujushi/zulip,levixie/zulip,shrikrishnaholla/zulip,adnanh/zulip,technicalpickles/zulip,JanzTam/zulip,hafeez3000/zulip,gkotian/zulip,esander91/zulip,kaiyuanheshang/zulip,Gabriel0402/zulip,themass/zulip,jphilipsen05/zulip,EasonYi/zulip,mahim97/zulip,susansls/zulip,Galexrt/zulip,AZtheAsian/zulip,brockwhittaker/zulip,zorojean/zulip,bastianh/zulip,themass/zulip,jphilipsen05/zulip,brainwane/zulip,arpitpanwar/zulip,kaiyuanheshang/zulip,LAndreas/zulip,xuanhan863/zulip,kokoar/zulip,Jianchun1/zulip,aliceriot/zulip,suxinde2009/zulip,glovebx/zulip,kokoar/zulip,zachallaun/zulip,ApsOps/zulip,grave-w-grave/zulip,DazWorrall/zulip,paxapy/zulip,zulip/zulip,zachallaun/zulip,zhaoweigg/zulip,rht/zulip,punchagan/zulip,andersk/zulip,joshisa/zulip,jackrzhang/zulip,levixie/zulip,thomasboyt/zulip,luyifan/zulip,PaulPetring/zulip,johnny9/zulip,dawran6/zulip,bluesea/zulip,suxinde2009/zulip,PhilSk/zulip,niftynei/zulip,showell/zulip,fw1121/zulip,tbutter/zulip,lfranchi/zulip,dnmfarrell/zulip,blaze225/zulip,niftynei/zulip,amanharitsh123/zulip,amallia/zulip,dhcrzf/zulip,souravbadami/zulip,vikas-parashar/zulip,dattatreya303/zulip,adnanh/zulip,Frouk/zulip,blaze225/zulip,hafeez3000/zulip,deer-hope/zulip,hayderimran7/zulip,hayderimran7/zulip,atomic-labs/zulip,Diptanshu8/zulip,kou/zulip,levixie/zulip,pradiptad/zulip,firstblade/zulip,littledogboy/zulip,voidException/zulip,easyfmxu/zulip,developerfm/zulip,akuseru/zulip,jonesgithub/zulip,Juanvulcano/zulip,dnmfarrell/zulip,zwily/zulip,mansilladev/zulip,MariaFaBella85/zulip,vikas-parashar/zulip,ufosky-server/zulip,TigorC/zulip,kou/zulip,bowlofstew/zulip,shaunstanislaus/zulip,littledogboy/zulip,peguin40/zulip,bluesea/zulip,shaunstanislaus/zulip,hafeez3000/zulip,avastu/zulip,SmartPeople/zulip,gigawhitlocks/zulip,Vallher/zulip,hustlzp/zulip,hj3938/zulip,dwrpayne/zulip,moria/zulip,gkotian/zulip,Gabriel0402/zulip,showell/zulip,dattatreya303/zulip,yuvipanda/zulip,PhilSk/zulip,MayB/zulip,proliming/zulip,amanharitsh123/zulip,xuanhan863/zulip,developerfm/zulip,luyifan/zulip,jainayush975/zulip,so0k/zulip,technicalpickles/zulip,joshisa/zulip,guiquanz/zulip,aliceriot/zulip,ryansnowboarder/zulip,yuvipanda/zulip,dawran6/zulip,huangkebo/zulip,karamcnair/zulip,karamcnair/zulip,seapasulli/zulip,Cheppers/zulip,reyha/zulip,johnnygaddarr/zulip,moria/zulip,mahim97/zulip,voidException/zulip,hustlzp/zulip,guiquanz/zulip,Frouk/zulip,paxapy/zulip,zulip/zulip,RobotCaleb/zulip,dxq-git/zulip,levixie/zulip,wangdeshui/zulip,Drooids/zulip,paxapy/zulip,udxxabp/zulip,Batterfii/zulip,brainwane/zulip,zacps/zulip,dnmfarrell/zulip,EasonYi/zulip,christi3k/zulip,aakash-cr7/zulip,codeKonami/zulip,ashwinirudrappa/zulip,SmartPeople/zulip,sup95/zulip,LeeRisk/zulip,themass/zulip,jerryge/zulip,wavelets/zulip,vaidap/zulip,arpith/zulip,jeffcao/zulip,dnmfarrell/zulip,Galexrt/zulip,itnihao/zulip,JanzTam/zulip,bluesea/zulip,andersk/zulip,stamhe/zulip,stamhe/zulip,jeffcao/zulip,voidException/zulip,adnanh/zulip,bitemyapp/zulip,itnihao/zulip,natanovia/zulip,andersk/zulip,Diptanshu8/zulip,luyifan/zulip,dwrpayne/zulip,Qgap/zulip,esander91/zulip,yuvipanda/zulip,johnnygaddarr/zulip,rht/zulip,hengqujushi/zulip,jonesgithub/zulip,peiwei/zulip,rishig/zulip,JPJPJPOPOP/zulip,shaunstanislaus/zulip,littledogboy/zulip,vaidap/zulip,thomasboyt/zulip,zwily/zulip,wweiradio/zulip,peiwei/zulip,Drooids/zulip,souravbadami/zulip,noroot/zulip,he15his/zulip,developerfm/zulip,bssrdf/zulip,willingc/zulip,PaulPetring/zulip,firstblade/zulip,eastlhu/zulip,joshisa/zulip,m1ssou/zulip,babbage/zulip,jackrzhang/zulip,sup95/zulip,kokoar/zulip,brockwhittaker/zulip,hj3938/zulip,ryansnowboarder/zulip,grave-w-grave/zulip,huangkebo/zulip,mdavid/zulip,bowlofstew/zulip,jrowan/zulip,aps-sids/zulip,calvinleenyc/zulip,thomasboyt/zulip,Jianchun1/zulip,wdaher/zulip,Galexrt/zulip,peguin40/zulip,jeffcao/zulip,willingc/zulip,deer-hope/zulip,udxxabp/zulip,mohsenSy/zulip,gigawhitlocks/zulip,ufosky-server/zulip,PaulPetring/zulip,isht3/zulip,shaunstanislaus/zulip,atomic-labs/zulip,mohsenSy/zulip,ashwinirudrappa/zulip,jimmy54/zulip,hafeez3000/zulip,Galexrt/zulip,easyfmxu/zulip,zulip/zulip,guiquanz/zulip,wweiradio/zulip,xuxiao/zulip,guiquanz/zulip,isht3/zulip,aps-sids/zulip,zulip/zulip,peguin40/zulip,swinghu/zulip,xuxiao/zulip,technicalpickles/zulip,aps-sids/zulip,glovebx/zulip,developerfm/zulip,christi3k/zulip,jphilipsen05/zulip,timabbott/zulip | zerver/management/commands/import_realm_filters.py | zerver/management/commands/import_realm_filters.py | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.models import RealmFilter, get_realm
import logging
class Command(BaseCommand):
help = """Imports realm filters to database"""
def handle(self, *args, **options):
realm_filters = {
"zulip.com": [
("#(?P<id>[0-9]{2,8})", "https://trac.zulip.net/ticket/%(id)s"),
],
"mit.edu/zephyr_mirror": [],
}
for domain, filters in realm_filters.iteritems():
realm = get_realm(domain)
if realm is None:
logging.error("Failed to get realm for domain %s" % (domain,))
continue
for filter in filters:
RealmFilter(realm=realm, pattern=filter[0], url_format_string=filter[1]).save()
logging.info("Created realm filter %s for %s" % (filter[0], domain))
| apache-2.0 | Python |
|
c7fa4500b22104b34b50bbcacc3b64923d6da294 | Add a parser for plain text | bjoernricks/trex,bjoernricks/trex | trex/parsers.py | trex/parsers.py | # -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from io import TextIOWrapper
from rest_framework.parsers import BaseParser
class PlainTextParser(BaseParser):
media_type = "text/plain"
def parse(self, stream, media_type=None, parser_context=None):
print "Running PlainTextParser"
charset = self.get_charset(media_type)
if charset:
stream = TextIOWrapper(stream, encoding=charset)
return stream
def get_charset(self, media_type):
if not media_type:
return None
charset = None
msplit = media_type.split(" ");
for m in msplit:
m = m.strip()
if "charset" in m:
csplit = m.split("=")
if len(csplit) > 1:
charset = csplit[1]
return charset.strip().lower()
return None
| mit | Python |
|
72d7e2a37bec5f7ae904ed2119dd8c30c22801fb | Add clinvar bot users | usajusaj/gennotes,usajusaj/gennotes,PersonalGenomesOrg/gennotes,PersonalGenomesOrg/gennotes,PersonalGenomesOrg/gennotes,usajusaj/gennotes | gennotes_server/migrations/0002_add_clinvar_bot_users.py | gennotes_server/migrations/0002_add_clinvar_bot_users.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.db import migrations
def add_clinvar_bot_users(apps, schema_editor):
usernames = ['clinvar-variant-importer', 'clinvar-data-importer']
for username in usernames:
get_user_model().objects.get_or_create(username=username)
class Migration(migrations.Migration):
dependencies = [
('gennotes_server', '0001_initial'),
]
operations = [
migrations.RunPython(add_clinvar_bot_users),
]
| mit | Python |
|
d56515878d4a1d4d56a10426fe5d6c45de97a671 | Create servos.py | somchaisomph/RPI.GPIO.TH | gadgets/motors/servos.py | gadgets/motors/servos.py | from gadgets.th_gpio import TH_GPIO
import time
class Servo5V():
def __init__(self,pin_number=12,freq=100):
self.pin_number = pin_number
self.freq = freq
self.pwm = TH_GPIO().pwm_create(self.pin_number,freq=self.freq)
self.width = float(1000/self.freq)
def set_freq(self,freq):
self.freq = freq
self.pwm.set_freq(freq)
self.width = float(1000/self.freq)
def write(self,angle):
duty = float(angle)/self.width + 2.5
self.pwm.change(duty)
def cleanup(self):
TH_GPIO().disable_pin(self.pin_number)
| mit | Python |
|
0a074f3af770f049cf6f112bdc7fa5ae35c4a6dc | Create Run.py | SkinnyRat/Benchmark-MNIST | ImageNet/Run.py | ImageNet/Run.py | # From https://groups.google.com/a/tensorflow.org/forum/#!topic/discuss/4xjc7tSrb18
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, math, time
import cv2, csv
import numpy as np
import tensorflow as tf
import CIFAR10
from datetime import datetime
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
HOME = '/HOME/' # /HOME/DATA/
width = 24
height = 24
categories = []
with open(HOME + "DATA/LABELS", 'r') as csvfile:
Labels = csv.reader(csvfile, delimiter=' ', quotechar='|')
for L in Labels:
categories.append(L) # L[0]
filename = HOME + "DATA/0000.png"
#im = Image.open(filename)
#im.save(filename, format='PNG', subsampling=0, quality=100)
with tf.Session() as sess:
input_img = tf.image.decode_png(tf.read_file(filename), channels=3)
tf_cast = tf.cast(input_img, tf.float32)
float_image = tf.image.resize_image_with_crop_or_pad(tf_cast, height, width)
float_image = tf.image.per_image_standardization(float_image)
images = tf.expand_dims(float_image, 0)
logits = CIFAR10.inference(images)
_, top_k_pred = tf.nn.top_k(logits, k=5)
variable_averages = tf.train.ExponentialMovingAverage(CIFAR10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
ckpt = tf.train.get_checkpoint_state(HOME+'MODEL')
if ckpt and ckpt.model_checkpoint_path:
print("Model path = ", ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found.')
exit(0)
#init_op = tf.initialize_all_variables()
#sess.run(init_op)
_, top_indices = sess.run([_, top_k_pred])
for key, value in enumerate(top_indices[0]):
print ("Type %20s" % categories[value] + "\t\t" + str(_[0][key]))
| apache-2.0 | Python |
|
4fb80eede37a2af23c165cb0997989c039f8166e | Add utterance extraction script | MontrealCorpusTools/SPADE,MontrealCorpusTools/SPADE | utterances.py | utterances.py | #######################################
## SPADE utterance extraction script ##
#######################################
## Processes and extracts start-times and end-times for all speaker utterances.
## Used for extracting data collected as part of the SPeech Across Dialects of English
## (SPADE) project.
## Input:
## - corpus name (e.g., Buckeye, SOTC)
## - corpus metadata (stored in a YAML file), which
## specifies the path to the audio, transcripts and metadata
## Output:
## - CSV of utterance metadata for the corpus
import yaml
import time
from datetime import datetime
import sys
import os
import argparse
base_dir = os.path.dirname(os.path.abspath(__file__))
script_dir = os.path.join(base_dir, 'Common')
sys.path.insert(0, script_dir)
import common
from polyglotdb import CorpusContext
from polyglotdb.utils import ensure_local_database_running
from polyglotdb.config import CorpusConfig
from polyglotdb.io.enrichment import enrich_lexicon_from_csv
def utterance_export(config, corpus_name, corpus_directory, dialect_code, speakers, ignored_speakers=None):
## Main duration export function. Collects durational information into query format
## and outputs CSV file of measures
csv_path = os.path.join(base_dir, corpus_name, '{}_utterances.csv'.format(corpus_name))
with CorpusContext(config) as c:
print("Beginning duration export")
beg = time.time()
## Process stress information for the vowel. All vowels in this analysis
## should contain primary stress, and so filter for stressed based on
## either the list of stressed vowels defined in the YAML file, or those
## which have had a primary stress label applied during lexical enrichment.
q = c.query_graph(c.utterance).filter(c.utterance.speaker.name.not_in_(ignored_speakers))
q = q.columns(c.utterance.speaker.name.column_name('speaker'),
c.utterance.id.column_name('utterance_label'),
c.utterance.begin.column_name('utterance_begin'),
c.utterance.end.column_name('utterance_end'),
c.utterance.discourse.name.column_name('discourse'))
for sp, _ in c.hierarchy.speaker_properties:
if sp == 'name':
continue
q = q.columns(getattr(c.utterance.speaker, sp).column_name(sp))
## Write the query to a CSV file
print("Writing CSV")
q.to_csv(csv_path)
end = time.time()
time_taken = time.time() - beg
print('Query took: {}'.format(end - beg))
print("Results for query written to " + csv_path)
common.save_performance_benchmark(config, 'utterance_export', time_taken)
## Process command-line arguments (corpus metadata, corpus reset, etc).
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus_name', help='Name of the corpus')
parser.add_argument('-r', '--reset', help="Reset the corpus", action='store_true')
parser.add_argument('-d', '--docker', help="This script is being called from Docker", action='store_true')
args = parser.parse_args()
corpus_name = args.corpus_name
reset = args.reset
docker = args.docker
directories = [x for x in os.listdir(base_dir) if os.path.isdir(x) and x != 'Common']
if args.corpus_name not in directories:
print(
'The corpus {0} does not have a directory (available: {1}). Please make it with a {0}.yaml file inside.'.format(
args.corpus_name, ', '.join(directories)))
sys.exit(1)
corpus_conf = common.load_config(corpus_name)
print('Processing...')
# sanity check database access
common.check_database(corpus_name)
ignored_speakers = corpus_conf.get('ignore_speakers', [])
stressed_vowels = corpus_conf.get('stressed_vowels', [])
if reset:
common.reset(corpus_name)
ip = common.server_ip
if docker:
ip = common.docker_ip
## start processing the corpus
with ensure_local_database_running(corpus_name, port=common.server_port, ip=ip, token=common.load_token()) as params:
config = CorpusConfig(corpus_name, **params)
config.formant_source = 'praat'
# Common set up
## Check if the corpus already exists as a database: if not, import the audio and
## transcripts and store in graph format
common.loading(config, corpus_conf['corpus_directory'], corpus_conf['input_format'])
## Add information to the corpus regarding lexical, speaker, and linguistic information
common.lexicon_enrichment(config, corpus_conf['unisyn_spade_directory'], corpus_conf['dialect_code'])
common.speaker_enrichment(config, corpus_conf['speaker_enrichment_file'])
common.basic_enrichment(config, corpus_conf['vowel_inventory'] + corpus_conf['extra_syllabic_segments'], corpus_conf['pauses'])
## Call the utterance export function, as defined above
utterance_export(config, corpus_name, corpus_conf['corpus_directory'], corpus_conf['dialect_code'], corpus_conf['speakers'], ignored_speakers=ignored_speakers)
print('Finishing up!')
| mit | Python |
|
b453943f86f97e38e52af3a1b048ee93b0177df8 | add a test to make sure we don't have any more missing migrations | django-pci/django-axes,jazzband/django-axes | axes/tests/test_models.py | axes/tests/test_models.py | from django.test import TestCase
class MigrationsCheck(TestCase):
def setUp(self):
from django.utils import translation
self.saved_locale = translation.get_language()
translation.deactivate_all()
def tearDown(self):
if self.saved_locale is not None:
from django.utils import translation
translation.activate(self.saved_locale)
def test_missing_migrations(self):
from django.db import connection
from django.apps.registry import apps
from django.db.migrations.executor import MigrationExecutor
executor = MigrationExecutor(connection)
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.state import ProjectState
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
self.assertEqual({}, changes)
| mit | Python |
|
1e7aee8c5597a7ccd9f2bc8f4e05e3ae489c3bfd | Add bot.py to run as an actual bot, via pywikibot | dissemin/oabot,dissemin/oabot,dissemin/oabot,CristianCantoro/oabot,CristianCantoro/oabot | src/bot.py | src/bot.py | from app import *
from time import sleep
import pywikibot
def run_bot(template_param, access_token=None, site=None, max_edits=100000):
cached_pages = list_cache_contents('bot_cache/')
edits_made = 0
for page_name in cached_pages:
print(page_name)
cache_fname = 'bot_cache/'+to_cache_name(page_name)
with open(cache_fname, 'r') as f:
page_json = json.load(f)
if run_bot_on_page(page_json, template_param, access_token=access_token, site=site):
edits_made += 1
sleep(3)
if edits_made >= max_edits:
return
def run_bot_on_page(proposed_edits, template_param, access_token=None, site=None):
page_name = proposed_edits['page_name']
for edit in proposed_edits['proposed_edits']:
edit_hash = edit['orig_hash']
change = edit['proposed_change']
match = re.findall(r'^' + template_param, change)
if match:
try:
app.logger.info('Attempting change on {}: {}'.format(page_name, change))
change_made = perform_bot_edit(page_name, '[[Wikipedia:OABOT|Open access bot]]: add %s identifier to citation with #oabot.' % match[0], edit_hash, change, access_token=access_token, site=site)
if change_made:
return True
except ValueError:
app.logger.exception('perform_bot_edit failed on {}'.format(page_name))
return False
def perform_bot_edit(page_name, summary, template_hash, proposed_addition, access_token=None, site=None):
# Get the page
text = main.get_page_over_api(page_name)
# Perform each edit
new_text, change_made = make_new_wikicode_for_bot(text, template_hash, proposed_addition, page_name)
# Save the page
if change_made:
if site:
page = pywikibot.Page(site, page_name)
page.text = new_text
page.save(summary)
else:
edit_wiki_page(page_name, new_text, access_token, summary, bot='yes')
# Remove the cache
cache_fname = "bot_cache/"+to_cache_name(page_name)
if os.path.isfile(cache_fname):
os.remove(cache_fname)
return change_made
def make_new_wikicode_for_bot(text, template_hash, proposed_addition, page_name):
wikicode = mwparserfromhell.parse(text)
change_made = False
for template in wikicode.filter_templates():
edit = main.TemplateEdit(template, page_name)
if edit.orig_hash == template_hash:
try:
edit.update_template(proposed_addition)
change_made = True
except ValueError:
app.logger.exception('update_template failed on {}'.format(page_name))
pass # TODO report to the user
return unicode(wikicode), change_made
if __name__ == '__main__':
import sys
template_param = sys.argv[1]
app.logger.info("Starting additions for parameter: {}".format(template_param))
site = pywikibot.Site()
site.login()
run_bot(template_param, site=site)
| mit | Python |
|
eb517e5b323ea183571b9d4967f46821729dd3e7 | add part 6 | f0lie/basic_python_workshop | part_6.py | part_6.py | # Let's try to draw a point moving on a line
# To make things simple, we are going back to 1D
pos = 1
velo = 1
# Since there are multiple positions at a time, we can represent as a list
line_1 = [' ', ' ', ' ']
line_2 = 3*[' ']
# Note how they are equal statements
print(line_1, line_2)
# If we wanted to print the line without the list notation
# We can join an empty string with a list
print("".join(line_1)) | mit | Python |
|
98a6fd1b1d095d6babc55c5d415c2450743fdba6 | Add antibody audits | ENCODE-DCC/encoded,ENCODE-DCC/encoded,ENCODE-DCC/snovault,ENCODE-DCC/encoded,ENCODE-DCC/encoded,T2DREAM/t2dream-portal,T2DREAM/t2dream-portal,ENCODE-DCC/snovault,T2DREAM/t2dream-portal,ENCODE-DCC/snovault,ENCODE-DCC/snovault,ENCODE-DCC/snovault,T2DREAM/t2dream-portal | src/encoded/audit/antibody_lot.py | src/encoded/audit/antibody_lot.py | from snovault import (
AuditFailure,
audit_checker,
)
from .conditions import rfa
@audit_checker('antibody_lot', frame=['characterizations'],
condition=rfa('ENCODE3', 'modERN'))
def audit_antibody_missing_characterizations(value, system):
'''
Check to see what characterizations are lacking for each antibody,
for the cell lines we know about.
'''
if not value['characterizations']:
detail = '{} '.format(value['@id']) + \
'does not have any supporting characterizations submitted.'
yield AuditFailure('no characterizations submitted', detail,
level='NOT_COMPLIANT')
return
primary_chars = []
secondary_chars = []
num_compliant_primary = 0
compliant_secondary = False
for char in value['characterizations']:
if 'primary_characterization_method' in char:
primary_chars.append(char)
if char['status'] in ['compliant', 'exempt from standards']:
num_compliant_primary += 1
if 'secondary_characterization_method' in char:
secondary_chars.append(char)
if char['status'] in ['compliant', 'exempt from standards']:
compliant_secondary = True
if not primary_chars:
detail = '{} '.format(value['@id']) + \
'does not have any primary characterizations submitted.'
yield AuditFailure('no primary characterizations', detail,
level='NOT_COMPLIANT')
if not secondary_chars:
detail = '{} '.format(value['@id']) + \
'does not have any secondary characterizations submitted.'
yield AuditFailure('no secondary characterizations', detail,
level='NOT_COMPLIANT')
if len(primary_chars) != num_compliant_primary:
detail = '{} '.format(value['@id']) + \
'needs compliant primary in one or more cell types.'
yield AuditFailure('need compliant primaries', detail,
level='NOT_COMPLIANT')
if secondary_chars and not compliant_secondary:
detail = '{} '.format(value['@id']) + \
'needs a compliant secondary characterization.'
yield AuditFailure('need compliant secondary', detail,
level='NOT_COMPLIANT')
return
| mit | Python |
|
e8560c42e3ae73f1753073b8ad6aef7d564e6d65 | Implement basic active monitoring algorithm | kaushikSarma/VM-Load-balancing,kaushikSarma/VM-Load-balancing,kaushikSarma/VM-Load-balancing,kaushikSarma/VM-Load-balancing | Host/original.py | Host/original.py | import sys
from functools import reduce
tempVmId = -1
def enhancedActiveVMLoadBalancer(vmStateList, currentAllocationCounts):
'''
vmStateList: Dict<vmId, vmState>
currentAllocationCounts: Dict<vmId, currentActiveAllocationCount>
'''
global tempVmId
vmId = -1
totalAllocations = reduce(lambda x, y: x + y, currentAllocationCounts)
if(totalAllocations < len(vmStateList)):
for i, vm in enumerate(vmStateList):
if(currentAllocationCounts[i] == 0):
vmId = i
break
else:
minCount = sys.maxint
for i, vm in enumerate(vmStateList):
curCount = currentAllocationCounts[i]
if(curCount < minCount):
if(i != tempVmId):
vmId = i
break
tempVmId = vmId
print("Returning, ", vmId)
return vmId
enhancedActiveVMLoadBalancer([
{'cpu': 10, 'mem': 10},
{'cpu': 17, 'mem': 40},
{'cpu': 40, 'mem': 20},
{'cpu': 80, 'mem': 15}
], [1, 4, 1, 1])
| mit | Python |
|
0a5e419dd91317d3a9d755cc5e8ee32c3a68d4af | Fix dates in show pending notifications | andrei-karalionak/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,AleksNeStu/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,uskudnik/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,vladan-m/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,vladan-m/ggrc-core,prasannav7/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core | src/ggrc/notification/__init__.py | src/ggrc/notification/__init__.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: mouli@meics.org
# Maintained By: miha@reciprocitylabs.com
from collections import defaultdict
from freezegun import freeze_time
from datetime import date, datetime
from ggrc.extensions import get_extension_modules
from ggrc.models import Notification
from ggrc.utils import merge_dict
from ggrc import db
from sqlalchemy import and_
class NotificationServices():
def __init__(self):
self.services = self.all_notifications()
def all_notifications(self):
services = {}
for extension_module in get_extension_modules():
contributions = getattr(
extension_module, 'contributed_notifications', None)
if contributions:
if callable(contributions):
contributions = contributions()
services.update(contributions)
return services
def get_service_function(self, name):
if name not in self.services:
raise ValueError("unknown service name: %s" % name)
return self.services[name]
def call_service(self, name, pn):
service = self.get_service_function(name)
return service(pn)
services = NotificationServices()
def get_notification_data(notifications):
if not notifications:
return {}
aggregate_data = {}
def merge_into(destination, source):
if destination is None:
return source
for pn in notifications:
data = services.call_service(pn.object_type.name, pn)
aggregate_data = merge_dict(aggregate_data, data)
return aggregate_data
def get_pending_notifications():
notifications = db.session.query(Notification).filter(
Notification.sent_at == None).all() # noqa
notif_by_day = defaultdict(list)
for notification in notifications:
notif_by_day[notification.send_on].append(notification)
data = {}
today = datetime.combine(date.today(), datetime.min.time())
for day, notif in notif_by_day.iteritems():
current_day = max(day, today)
with freeze_time(current_day):
data[current_day] = get_notification_data(notif)
return notifications, data
def get_todays_notifications():
notifications = db.session.query(Notification).filter(
and_(Notification.send_on <= date.today(),
Notification.sent_at == None # noqa
)).all()
return notifications, get_notification_data(notifications)
def generate_notification_email(data):
pass
def dispatch_notifications():
pass
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: mouli@meics.org
# Maintained By: miha@reciprocitylabs.com
from collections import defaultdict
from freezegun import freeze_time
from datetime import date
from ggrc.extensions import get_extension_modules
from ggrc.models import Notification
from ggrc.utils import merge_dict
from ggrc import db
from sqlalchemy import and_
class NotificationServices():
def __init__(self):
self.services = self.all_notifications()
def all_notifications(self):
services = {}
for extension_module in get_extension_modules():
contributions = getattr(
extension_module, 'contributed_notifications', None)
if contributions:
if callable(contributions):
contributions = contributions()
services.update(contributions)
return services
def get_service_function(self, name):
if name not in self.services:
raise ValueError("unknown service name: %s" % name)
return self.services[name]
def call_service(self, name, pn):
service = self.get_service_function(name)
return service(pn)
services = NotificationServices()
def get_notification_data(notifications):
if not notifications:
return {}
aggregate_data = {}
def merge_into(destination, source):
if destination is None:
return source
for pn in notifications:
data = services.call_service(pn.object_type.name, pn)
aggregate_data = merge_dict(aggregate_data, data)
return aggregate_data
def get_pending_notifications():
notifications = db.session.query(Notification).filter(
Notification.sent_at == None).all() # noqa
notif_by_day = defaultdict(list)
for notification in notifications:
notif_by_day[notification.send_on].append(notification)
data = {}
for day, notif in notif_by_day.iteritems():
with freeze_time(day):
data[day] = get_notification_data(notif)
return notifications, data
def get_todays_notifications():
notifications = db.session.query(Notification).filter(
and_(Notification.send_on <= date.today(),
Notification.sent_at == None # noqa
)).all()
return notifications, get_notification_data(notifications)
def generate_notification_email(data):
pass
def dispatch_notifications():
pass
| apache-2.0 | Python |
b7e09bb39aa6161215799960bd5fda33a882e40f | fix docstring | yepengxj/theano_exercises,DeercoderResearch/theano_exercises,mathrho/theano_exercises,hamukazu/theano_exercises,xinmei9322/theano_exercises,OlafLee/theano_exercises,voidException/theano_exercises,yt752/theano_exercises,MarvinBertin/theano_exercises,goodfeli/theano_exercises,IQ17/theano_exercises,juampatronics/theano_exercises | 01_basics/03_advanced_expressions/01_basic_indexing_soln.py | 01_basics/03_advanced_expressions/01_basic_indexing_soln.py | # Fill in the TODOs in this exercise, then run the script to see if your
# solution works.
import numpy as np
import theano.tensor as T
def increment_odd(x):
"""
x: a Theano vector
Returns:
y: a Theano vector equal to x, but with all odd-numbered elements
incremented by 1.
"""
raise NotImplementedError("TODO: implement the function.")
if __name__ == "__main__":
x = T.vector()
xv = np.zeros((4,), dtype=x.dtype)
yv = increment_odd(x).eval({x:xv})
assert np.allclose(yv, np.array([0., 1., 0., 1.]))
| bsd-3-clause | Python |
|
787b46749a26f8078c1ac4e914aea7fbd0ced8c6 | Add test for checking if privatecode in journey's is unique per day | bliksemlabs/bliksemintegration,bliksemlabs/bliksemintegration | bin/test.py | bin/test.py | import helper
import logging
import psycopg2
from settings.const import database_connect
conn = psycopg2.connect(database_connect)
cur = conn.cursor()
cur.execute("""
SELECT j.id,jp.operator_id,j.operator_id FROM
(select journeypatternref,count(distinct pointorder) as points from pointinjourneypattern group by journeypatternref) as pattern,
(select timedemandgroupref,count(distinct pointorder) as timepoints from pointintimedemandgroup group by timedemandgroupref) as timepattern,
journey as j LEFT JOIN journeypattern as jp ON (j.journeypatternref = jp.id)
WHERE
j.journeypatternref = pattern.journeypatternref AND
j.timedemandgroupref = timepattern.timedemandgroupref AND
points != timepoints;
""")
rows = cur.fetchall()
cur.close()
timegroupsValid = len(rows) == 0
assert timegroupsValid
cur.execute("""
SELECT links.operator_id,rechts.operator_id FROM
(SELECT j.id,j.operator_id,j.privatecode,validdate FROM journey as j LEFT JOIN availabilityconditionday USING (availabilityconditionref) where
isavailable = true) as links,
(SELECT j.id,j.operator_id,j.privatecode,validdate FROM journey as j LEFT JOIN availabilityconditionday USING (availabilityconditionref) where
isavailable = true) as rechts
WHERE links.id != rechts.id AND links.validdate = rechts.validdate AND links.privatecode = rechts.privatecode
""")
rows = cur.fetchall()
cur.close()
duplicateTripidentifiers = len(rows) == 0
assert uniqueTripidentifiers
| bsd-2-clause | Python |
|
3693b1aea769af1e0fbe31007a00f3e33bcec622 | Add function to solve two pair sum | ueg1990/aids | aids/sorting_and_searching/pair_sum.py | aids/sorting_and_searching/pair_sum.py | '''
Given an integer array, output all pairs that sum up to a specific value k
'''
from binary_search import binary_search_iterative
def pair_sum_sorting(arr, k):
'''
Using sorting - O(n logn)
'''
number_of_items = len(arr)
if number_of_items < 2:
return
arr.sort()
for index, item in enumerate(arr):
index_pair = binary_search_iterative(arr, index, number_of_items - 1, k - item)
if index_pair and index_pair > index:
print item, arr[index_pair]
def pair_sum_set(arr, k):
'''
Using set - O(n) (time - average case), O(n) (space)
'''
if len(arr) < 2:
return
seen = set()
output = set()
for item in arr:
target = k - item
if target not in seen:
seen.add(target)
else:
output.add(item, target) # print item, target
# for output with non-duplicate i.e. (1,3) and (3,1) are the samw thing
# output.add((min(num, target), max(num, target)))
print '\n'.join([str(item) for item in output]) | mit | Python |
|
72cfd9b52e860aaaca05e7ef7941d0b4e17ad95f | Add vocab_word.py | acsalu/streethunt-matcher,hsiaoching/streethunt-matcher | vocab_word.py | vocab_word.py | import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
from numpy import *
from scipy.cluster.vq import kmeans,vq
def buildVocabulary(path,k,grid_m,grid_n):
files = [ f for f in listdir(path) if isfile(join(path,f)) ]
dict_vocab = array([])
for i in range(0,grid_m):
for j in range(0,grid_n):
for f in files:
total_desc = array([])
img = cv2.imread(path+f)
desc = localFeature(img,grid_m,grid_n,i,j)
if len(desc.shape) == 1:
desc = array([desc])
if len(total_desc) == 0:
total_desc = desc
else:
total_desc = np.append(total_desc,desc,axis = 0)
vocab,dist = kmeans(total_desc,k) # k is the seed number
if len(dict_vocab) == 0:
dict_vocab = [vocab]
else:
dict_vocab = np.append(dict_vocab,[vocab],axis = 0)
def findWord(dict_vocab,path,grid_m,grid_n):
files = [ f for f in listdir(path) if isfile(join(path,f)) ]
word_hist = array([])
for f in files:
img = cv2.imread(path+f)
line_hist = array([])
for i in range(0,grid_m):
for j in range(0,grid_n):
desc = localFeature(img,grid_m,grid_n,i,j)
hist = buildWordHist(desc,dict_vocab[grid_n*i+j])
if len(line_hist) == 0:
line_hist = hist
else
line_hist = np.hstack((line_hist,hist))
if len(word_hist) == 0:
word_hist = line_hist
else:
word_hist = np.vstack((word_hist,line_hist))
return word_hist
def buildWordHist(desc,dict_part):
index,temp = vq(desc,dict_part)
k = dict_part.shape[0]
hist,bucket = np.histogram(index,bins = range(k+1))
return hist
def main():
path = '/home/alicelee0606/helloflask/'
d_path = path+'database/'
t_path = path+'testcase/'
k = 180
grid_m = 1
grid_n = 1
dict_vocab = buildVocabulary(d_path,k,grid_m,grid_n)
d_hist = findWord(dict_vocab,d_path,grid_m,grid_n)
t_hist = findWord(dict_vocab,t_path,grid_m,grid_n)
| mit | Python |
|
9a67d63650b751c7b876f248bb3d82e619b37725 | Add new script to create a list of words from frequencies | lucach/spellcorrect,lucach/spellcorrect,lucach/spellcorrect,lucach/spellcorrect | frequenciesToWords.py | frequenciesToWords.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Spell corrector - http://www.chiodini.org/
# Copyright © 2015 Luca Chiodini <luca@chiodini.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import codecs
import sys
def main():
parser = argparse.ArgumentParser(
description="Script to get pure words from unigrams frequencies.")
parser.add_argument("-f", "--file", help="source file to be processed",
required=True)
parser.add_argument("-o", "--output", help="output file with results",
required=True)
args = parser.parse_args()
words = set()
# Process input file and save keys.
with codecs.open(args.file, 'r', 'utf8') as f:
idx = 0
for line in f:
if idx > 0: # skip first line (header)
vals = line.rsplit(' ', 1)
words.add(vals[0])
idx += 1
# Write keys to output file.
with codecs.open(args.output, 'w', 'utf8') as f:
for w in words:
f.write("%s\n" % w)
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | Python |
|
875fd0f57b1cbead04bd60b7d8c19cd1f106595a | add example python server | appunite/discovery-ios,appunite/discovery-ios | Server/server.py | Server/server.py | #!/usr/bin/env python
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.options import define, options, parse_command_line
import os
import json
import uuid
define("port", default=8888, help="run on the given port", type=int)
clients = set()
metadatas = dict()
class DiscoveryClient():
connection = None
relations = set()
def __init__(self, c):
self.connection = c
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
clients.add(DiscoveryClient(self))
return None
def on_close(self):
for client in clients:
if client.connection == self:
clients.remove(client)
break
def on_message(self, msg):
payload = json.loads(msg)
# decompose json
body = payload["body"]
header = payload["header"]
# handle `absence`
if header["type"] == "absence":
print "Recived `absence` message: %s" % (body["id"])
for client in clients:
if client.connection == self:
client.relations.remove(body["id"])
# handle `presence`
if header["type"] == "presence":
print "Recived `presence` message: %s" % (body["id"])
payload = json.dumps({"header": {"type": "metadata"}, "body": metadatas[body["id"]]})
for client in clients:
if client.connection == self:
client.relations.add(body["id"])
# send metadata user to client
client.connection.write_message(payload, binary=True)
# handle `metadata`
if header["type"] == "metadata":
print "Recived `metadata` message: %s" % (body)
metadatas[body["id"]] = body
payload = json.dumps({"header": {"type": "metadata"}, "body": body})
for client in clients:
client.connection.ws_connection.write_message(payload, binary=True)
app = tornado.web.Application([
(r'/chat', WebSocketHandler)
])
if __name__ == '__main__':
parse_command_line()
print "Listening on port %i" % (options.port)
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| mit | Python |
|
4535d6c41e17031b943e7016fc7de6f76b890f17 | Put the test into the correct directory. | zepheira/amara,zepheira/amara,zepheira/amara,zepheira/amara,zepheira/amara,zepheira/amara | test/lib/test_inputsource.py | test/lib/test_inputsource.py | ########################################################################
# test/xslt/test_inputsource.py
import os
from amara.lib import inputsource, iri, treecompare
module_dir = os.path.dirname(os.path.abspath(__file__))
rlimit_nofile = 300
try:
import resource
except ImportError:
pass
else:
rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + 10
def test_many_inputsources():
assert rlimit_nofile < 20000, "is your file limit really that large?"
# Amara's inputsource consumes a filehandle, in the 'stream' attribute
# See what happens if we run out of file handles.
sources = []
filename = os.path.join(module_dir, "borrowed", "da_20000714_02.xslt")
for i in range(rlimit_nofile):
try:
sources.append(inputsource(filename))
except:
print "Failed after", i, "files"
raise
| apache-2.0 | Python |
|
5bbb2a994397374356964b1db4c23b6b8ff5c848 | Add the 'version' variable. | jeremiedecock/tictactoe-py,jdhp-sap/data-pipeline-standalone-scripts,jdhp-sap/sap-cta-data-pipeline,jeremiedecock/tictactoe-py,jdhp-sap/sap-cta-data-pipeline,jeremiedecock/pyai,jeremiedecock/podcast-manager,jeremiedecock/pyai,jeremiedecock/podcast-manager,jdhp-sap/data-pipeline-standalone-scripts | TODO/__init__.py | TODO/__init__.py | # The MIT License
#
# Copyright (c) 2016 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.1.dev0'
__all__ = ['TODO']
| mit | Python |
|
fcd96cb766f3211a185a3aadbd7c8dde795134ca | Add ILCommand class | ShivamSarodia/ShivyC,ShivamSarodia/ShivyC,ShivamSarodia/ShivyC | il_commands.py | il_commands.py | """Classes representing IL commands, including procedures to generate asm code
from a given IL command.
"""
import spots
class ILCommand:
"""Base interface for all IL commands"""
def __init__(self):
raise NotImplementedError
def input_values(self):
"""Return set of values read by this command."""
raise NotImplementedError
def output_values(self):
"""Return set of values modified by this command."""
raise NotImplementedError
def clobber_spots(self):
"""Return set of spots that are clobbered by this command."""
raise NotImplementedError
def make_asm(self, spotmap, asm_code):
"""Generate assembly code for this command. Generated assembly can read
any of the values returned from input_values, may overwrite any values
returned from output_values, and may change the value of any spots
returned from clobber_spots without worry.
asm_code (ASMCode) - Object to which to save generated code.
spotmap - Dictionary mapping each input/output value to a spot.
"""
raise NotImplementedError
class AddCommand:
"""ADD - adds arg1 and arg2, then saves to output"""
def __init__(self, output, arg1, arg2):
self.output = output
self.arg1 = arg1
self.arg2 = arg2
def input_values(self):
return {self.arg1, self.arg2}
def output_values(self):
return {self.output}
def clobber_spots(self):
# Current implementation lazily clobbers RAX always.
return set(spots.RAX)
def make_asm(self, spotmap, asm_code):
arg1_asm = spotmap[self.arg1].asm_str(self.arg1.ctype.size)
arg2_asm = spotmap[self.arg2].asm_str(self.arg2.ctype.size)
output_asm = spotmap[self.output].asm_str(self.output.ctype.size)
rax_asm = spots.RAX.asm_str(self.arg1.ctype.size)
asm_code.add_command("mov", rax_asm, arg1_asm)
asm_code.add_command("add", rax_asm, arg2_asm)
asm_code.add_command("mov", output_asm, rax_asm)
| mit | Python |
|
f51bccaebdf0992a708ac96d329b3218df23c3d0 | Create welcome.py | SiarheiGribov/pyBot,SiarheiGribov/pyBot | welcome.py | welcome.py | # coding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append('pyBot/ext_libs')
import re
import ast
import json
import time
import login
import datetime
import requests
from urllib2 import urlopen
from random import randrange
from sseclient import SSEClient as EventSource
minutes = 33
signUrl = 'https://ru.wikipedia.org/w/?action=raw&utf8=1&title=User:LatitudeBot/Sign'
token, cookies = login.login()
signList = []
signData = urlopen(signUrl).readlines()
for line in signData:
line = str(line.decode('UTF-8').rstrip('\n'))
if re.match(r'^\*', line):
signList.append('{{Hello}} ' + re.sub(r'^\*\s', '', line) + ' ~~~~~')
r_users = []
payload = {'action': 'query', 'format': 'json', 'list': 'recentchanges',
'rcprop': 'user|timestamp', 'rcshow': '!bot|!anon', 'rctype': 'new|edit', 'rcend': (datetime.datetime.now() - datetime.timedelta(minutes = minutes)).strftime("%Y-%m-%d %H:%M:%S"), 'rclimit': 5000, 'token': token}
r_changes = json.loads(requests.post('https://ru.wikipedia.org/w/api.php', data=payload, cookies=cookies).text)
users = ast.literal_eval('{query}'.format(**r_changes))
users = ast.literal_eval('{recentchanges}'.format(**users))
usersCheck = []
usersList = ''
for user in users:
if user['user'] not in usersCheck:
usersCheck.append(user['user'])
usersList += user['user'] + '|'
payload = {'action': 'query', 'format': 'json', 'utf8': '', 'list': 'users', 'ususers': usersList.rstrip('|'), 'usprop': 'blockinfo|editcount|groups', 'token': token}
r_userinfo = json.loads(requests.post('https://ru.wikipedia.org/w/api.php', data=payload, cookies=cookies).text)
userinfo = ast.literal_eval('{query}'.format(**r_userinfo))
userinfo = ast.literal_eval('{users}'.format(**userinfo))
for user in userinfo:
if ('blockid' not in user) and ('invalid' not in user):
if (user['editcount'] > 0) and (user['editcount'] < 25):
payload = {'action': 'query', 'format': 'json', 'utf8': '', 'list': 'logevents', 'letype': 'delete', 'letitle': 'User talk:' + user['name'], 'token': token}
r_isdelete = json.loads(requests.post('https://ru.wikipedia.org/w/api.php', data=payload, cookies=cookies).text)
isdelete = ast.literal_eval('{query}'.format(**r_isdelete))
isdelete = ast.literal_eval('{logevents}'.format(**isdelete))
if len(isdelete) == 0:
r_users.append(user['name'])
for r_user in r_users:
random_index = randrange(0, len(signList))
sign = signList[random_index]
payload = {'action': 'edit', 'format': 'json', 'title': 'User talk:' + r_user, 'utf8': '', 'createonly': '', 'notminor': '', 'text': sign, 'summary': u'Добро пожаловать!', 'token': token}
r_edit = requests.post('https://ru.wikipedia.org/w/api.php', data=payload, cookies=cookies)
| mit | Python |
|
732898dc4858ae5cfc7eac3e470069ac702f6c12 | Add a command for deactivating a generation | Sinar/mapit,chris48s/mapit,New-Bamboo/mapit,Sinar/mapit,opencorato/mapit,chris48s/mapit,chris48s/mapit,Code4SA/mapit,opencorato/mapit,opencorato/mapit,Code4SA/mapit,Code4SA/mapit,New-Bamboo/mapit | mapit/management/commands/mapit_generation_deactivate.py | mapit/management/commands/mapit_generation_deactivate.py | # This script deactivates a particular generation
from optparse import make_option
from django.core.management.base import BaseCommand
from mapit.models import Generation
class Command(BaseCommand):
help = 'Deactivate a generation'
args = '<GENERATION-ID>'
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),
make_option('--force', action='store_true', dest='force',
help='Force deactivation, even if it would leave no active generations'))
def handle(self, generation_id, **options):
generation_to_deactivate = Generation.objects.get(id=int(generation_id, 10))
if not generation_to_deactivate.active:
raise CommandError, "The generation %s wasn't active" % (generation_id,)
active_generations = Generation.objects.filter(active=True).count()
if active_generations <= 1 and not options['force']:
raise CommandError, "You're trying to deactivate the only active generation. If this is what you intended, please re-run the command with --force"
generation_to_deactivate.active = False
if options['commit']:
generation_to_deactivate.save()
print "%s - deactivated" % generation_to_deactivate
else:
print "%s - not deactivated, dry run" % generation_to_deactivate
| agpl-3.0 | Python |
|
ffdf48c758877dd869f4fb4ce598635ff6545d5d | add script for just building the *.zip | opendns/dynamicipupdate,opendns/dynamicipupdate,opendns/dynamicipupdate,opendns/dynamicipupdate,opendns/dynamicipupdate | mac/scripts/build-temp.py | mac/scripts/build-temp.py | #!/usr/bin/env python
import sys
import os
import os.path
import re
import time
import subprocess
import stat
import shutil
"""
Release build script designed to automate as much of the proces as possible
and minimize errors.
Pushing an update to mac client is involved. Files that must be changed:
* Info.plist
* conf.php and mac-ipupdater-relnotes-$ver.html
* IpUpdaterAppCast.xml
(update pubDate, sparkle:version and sparkle:shortVersionString)
Checklist for pushing a new release:
* edit Info.plist to set new version
* create mac-ipupdater-relnotes-$ver.html, check it in and deploy it
* run this script
* verify it made the right changes to IpUpdaterAppCast.xml
* checkin and deploy the binary to the website
* update conf.php to account for new version, check it in and deploy to website
* checkin and deploy IpUpdaterCast.xml
"""
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, ".."))
RELEASE_BUILD_DIR = os.path.join(SRC_DIR, "build", "Release")
INFO_PLIST_PATH = os.path.realpath(os.path.join(SCRIPT_DIR, "..", "Info.plist"))
WEBSITE_DESKTOP_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, "..", "..", "..", "website", "desktop"))
APPENGINE_SRC_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, "..", "..", "..", "appengine-opendnsupdate"))
APP_CAST_PATH = os.path.join(APPENGINE_SRC_DIR, "IpUpdaterAppCast.xml")
def exit_with_error(s):
print(s)
sys.exit(1)
def ensure_dir_exists(path):
if not os.path.exists(path) or not os.path.isdir(path):
exit_with_error("Directory '%s' desn't exist" % path)
def ensure_file_exists(path):
if not os.path.exists(path) or not os.path.isfile(path):
exit_with_error("File '%s' desn't exist" % path)
def ensure_file_doesnt_exist(path):
if os.path.exists(path):
exit_with_error("File '%s' already exists and shouldn't. Forgot to update version in Info.plist?" % path)
def readfile(path):
fo = open(path)
data = fo.read()
fo.close()
return data
def writefile(path, data):
fo = open(path, "w")
fo.write(data)
fo.close()
def get_file_size(filename):
st = os.stat(filename)
return st[stat.ST_SIZE]
def run_cmd_throw(*args):
cmd = " ".join(args)
print("Running '%s'" % cmd)
cmdproc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res = cmdproc.communicate()
errcode = cmdproc.returncode
if 0 != errcode:
print "Failed with error code %d" % errcode
print "Stdout:"
print res[0]
print "Stderr:"
print res[1]
raise Exception("'%s' failed with error code %d" % (cmd, errcode))
return (res[0], res[1])
# a really ugly way to extract version from Info.plist
def extract_version_from_plist(plist_path):
plist = readfile(plist_path)
#print(plist)
regex = re.compile("CFBundleVersion</key>(.+?)<key>", re.DOTALL | re.MULTILINE)
m = regex.search(plist)
version_element = m.group(1)
#print("version_element: '%s'" % version_element)
regex2 = re.compile("<string>(.+?)</string>")
m = regex2.search(version_element)
version = m.group(1)
version = version.strip()
#print("version: '%s'" % version)
return version
# build version is either x.y or x.y.z
def ensure_valid_version(version):
m = re.match("\d+\.\d+", version)
if m: return
m = re.match("\d+\.\d+\.\d+", version)
if m: return
print("version ('%s') should be in format: x.y or x.y.z" % version)
sys.exit(1)
def zip_name(version):
return "OpenDNS-Updater-Mac-%s.zip" % version
def zip_path(version):
return os.path.join(RELEASE_BUILD_DIR, zip_name(version))
def build_and_zip(version):
os.chdir(SRC_DIR)
print("Cleaning release target...")
xcodeproj = "OpenDNS Updater.xcodeproj"
run_cmd_throw("xcodebuild", "-project", xcodeproj, "-configuration", "Release", "clean");
print("Building release target...")
(out, err) = run_cmd_throw("xcodebuild", "-project", xcodeproj, "-configuration", "Release", "-target", "OpenDNS Updater")
ensure_dir_exists(RELEASE_BUILD_DIR)
os.chdir(RELEASE_BUILD_DIR)
(out, err) = run_cmd_throw("zip", "-9", "-r", zip_name(version), "OpenDNS Updater.app")
def main():
version = extract_version_from_plist(INFO_PLIST_PATH)
print("Building mac updater version '%s'" % version)
ensure_valid_version(version)
build_and_zip(version)
ensure_file_exists(zip_path(version))
print("Built '%s'" % zip_path(version))
if __name__ == "__main__":
main()
| bsd-3-clause | Python |
|
f04d683d44507a53be39a2db54d545d2f2a1361b | Add example settings module | AustralianAntarcticDataCentre/save_emails_to_files,AustralianAntarcticDataCentre/save_emails_to_files | settings_example.py | settings_example.py | import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
| mit | Python |
|
29c20a662f347e720c5228f0853eaa6ac0164379 | Create ScreenSocket.py | flyinactor91/EV3-Remote,flyinactor91/EV3-Remote | ScreenSocket.py | ScreenSocket.py | #!/usr/bin/env python
##--Zachary Trette
##-- accepts commands for screen responses
##-- EV3- Remote - https://github.com/flyinactor91/EV3-Remote
## 2013-12-1
from socket import *
import sys, os
import pygame
from pygame.locals import *
def setup():
pygame.init()
w = 640
h = 480
size=(w,h)
screen = pygame.display.set_mode(size,HWSURFACE|DOUBLEBUF|RESIZABLE)
return screen, size
def runCue(SorI, strOrImage):
if SorI == "I":
im = pygame.image.load(strOrImage)
scrn.blit(pygame.transform.scale(im,size),(0,0))
pygame.display.flip()
elif SorI == "T":
basicfont = pygame.font.SysFont(None, 48)
text = basicfont.render(strOrImage, True, (255, 0, 0), (0, 0, 0))
textrect = text.get_rect()
textrect.centerx = scrn.get_rect().centerx
textrect.centery = scrn.get_rect().centery
scrn.fill((0,0,0))
scrn.blit(text, textrect)
pygame.display.flip()
elif SorI == "C":
basicfont = pygame.font.SysFont(None, 48)
text = basicfont.render("", True, (0, 0, 0), (0, 0, 0))
textrect = text.get_rect()
textrect.centerx = scrn.get_rect().centerx
textrect.centery = scrn.get_rect().centery
scrn.fill((0,0,0))
scrn.blit(text, textrect)
pygame.display.flip()
TCP_PORT = 5678
defaultTimeout = 5
if len(sys.argv) == 2:
TCP_IP = sys.argv[1]
BUFFER_SIZE = 1024
screenSocket = socket(AF_INET, SOCK_STREAM)
screenSocket.bind(('' , TCP_PORT))
screenSocket.listen(1)
dne = False
scrn, size = setup()
while not dne:
connectionSocket , addr = screenSocket.accept()
connectionSocket.settimeout(defaultTimeout)
msg = connectionSocket.recv(BUFFER_SIZE)
msg = msg.strip()
if msg == 'QUIT':
print "DONE"
dne = True
else:
t = msg[0]
s = msg[1:].strip()
runCue(t,s)
#connectionSocket.send()
connectionSocket.close()
screenSocket.close()
| mit | Python |
|
d79ed2b4aa8315579688f4c6e9bfc8980e9717e3 | Create chghost.py | merc-devel/merc | merc/features/ircv32/chghost.py | merc/features/ircv32/chghost.py | from merc import capability
from merc import feature
from merc import message
class ChgHostFeature(feature.Feature):
NAME = __name__
install = ChgHostFeature.install
@ChgHostFeature.register_user_capability
class ChgHostCapability(capability.Capability):
NAME = "chghost"
class _ChgHost(message.Command):
def handle_for(self, app, user, prefix):
user.check_is_irc_operator()
target = self.get_target(app, user)
old_hostmask = target.hostmask
target.username = self.username
target.host = self.host
app.network.user_broadcast(target, old_hostmask,
ChgHost(self.username, self.host))
@ChgHostFeature.register_user_command
class ChgHost(_ChgHost):
NAME = "CHGHOST"
MIN_ARITY = 2
def __init__(self, username, host, *args):
self.username = username
self.host = host
def as_command_params(self):
return [self.username, self.host]
def can_send_to(self, user):
return ChgHostCapability(user).get()
def get_target(self, app, user):
return user
@ChgHostFeature.register_user_command
class SAChgHost(_ChgHost):
NAME = "SACHGHOST"
MIN_ARITY = 3
def __init__(self, target, username, host, *args):
self.target = target
self.username = username
self.host = host
def get_target(self, app, user):
return app.users.get(self.target)
| mit | Python |
|
f11f5cf946c61f45d5059ecdd828018cf0bb7a55 | Add pygments based lexical output processing | remibergsma/cloudstack-cloudmonkey,apache/cloudstack-cloudmonkey,remibergsma/cloudstack-cloudmonkey,apache/cloudstack-cloudmonkey,Interoute/cloudmonkey-interoute,Interoute/cloudmonkey-interoute | cloudmonkey/lexer.py | cloudmonkey/lexer.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
from pygments import highlight
from pygments.console import ansiformat
from pygments.formatter import Formatter
from pygments.formatters import Terminal256Formatter
from pygments.lexer import bygroups, include, RegexLexer
from pygments.token import *
import sys
except ImportError, e:
print e
MONKEY_COLORS = {
Token: '',
Whitespace: 'reset',
Text: 'reset',
Name: 'green',
Operator: 'teal',
Operator.Word: 'lightgray',
String: 'purple',
Keyword: '_red_',
Error: 'red',
Literal: 'yellow',
Number: 'blue',
}
def get_colorscheme():
return MONKEY_COLORS
class MonkeyLexer(RegexLexer):
keywords = ['[a-z]*id', '[a-zA-Z]*:']
attributes = ['[Tt]rue', '[Ff]alse']
params = ['[a-z]*[Nn]ame', 'type', '[Ss]tate']
uuid_rgx = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
date_rgx = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:]{8}-[0-9]{4}'
def makelistre(lis):
return r'(' + r'|'.join(lis) + r')'
tokens = {
'root': [
(r' ', Whitespace),
(date_rgx, Number),
(uuid_rgx, Literal),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'^[-=]*\n', Operator.Word),
(r'Error', Error),
(makelistre(keywords), Keyword),
(makelistre(attributes), Literal),
(makelistre(params) + r'( = )(.*)', bygroups(Name, Operator,
String)),
(makelistre(params), Name),
(r'(^[a-zA-Z]* )(=)', bygroups(Name, Operator)),
(r'\S+', Text),
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos - 1] == ']'
class MonkeyFormatter(Formatter):
def __init__(self, **options):
Formatter.__init__(self, **options)
self.colorscheme = get_colorscheme()
def format(self, tokensource, outfile):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
def monkeyprint(text):
fmter = MonkeyFormatter()
lexer = MonkeyLexer()
lexer.encoding = 'utf-8'
fmter.encoding = 'utf-8'
highlight(text, lexer, fmter, sys.stdout)
| apache-2.0 | Python |
|
e5055344001b8fa313ff7bcde3284d90bb6c2d62 | add preprocess program | JamisHoo/Distributed-Image-Search-Engine,JamisHoo/Distributed-Image-Search-Engine,JamisHoo/Distributed-Image-Search-Engine,JamisHoo/Distributed-Image-Search-Engine | compress.py | compress.py | #!/usr/bin/env python3
###############################################################################
# Copyright (c) 2015 Jamis Hoo
# Distributed under the MIT license
# (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT)
#
# Project:
# Filename: compress.py
# Version: 1.0
# Author: Jamis Hoo
# E-mail: hjm211324@gmail.com
# Date: Jul 26, 2015
# Time: 13:48:16
# Description:
###############################################################################
import os
import tarfile
TAR_DIR = "TAR/"
WORDS_DICTIONARY = "words"
INVERTED_INDEX = "index"
COMPRESS_DIR = "COMPRESS/"
BLOCK_SIZE = int(1024 * 1024 * 1024 * 10) # 10 GiB
# tolerate 1 GiB empty space at the end of each block
MIN_BLOCK_SIZE = int(BLOCK_SIZE * 0.9) # 9 GiB
# check files and dirs
if not os.path.isfile(INVERTED_INDEX):
print(INVERTED_INDEX, "does not exist. ")
exit(1)
if not os.path.isfile(WORDS_DICTIONARY):
print(WORDS_DICTIONARY, "does not exist. ")
exit(1)
if not os.path.isdir(TAR_DIR):
print(TAR_DIR, "does not exist or isn't a directory. ")
exit(1)
if os.path.exists(COMPRESS_DIR) and not os.path.isdir(COMPRESS_DIR):
print(COMPRESS_DIR, "exists and is not directory. ")
exit(1)
if not os.path.exists(COMPRESS_DIR):
os.mkdir(COMPRESS_DIR)
# load words dictionary
# words dictionary: tar_filename -> keywords splited with comma
words_dictionary = dict()
words_dict_file = open(WORDS_DICTIONARY)
for l in words_dict_file:
index = l[: l.find('\t')]
keywords = ",".join([ x.strip() for x in l[l.find('\t') + 1: -1].split(", ") ])
words_dictionary[index] = keywords
words_dict_file.close()
# find the next compress block
compress_block_counter = 0
block_filename = format(compress_block_counter, "08x")
existing_compress_blocks = sorted(os.listdir(COMPRESS_DIR))
if len(existing_compress_blocks):
last_block_filename = existing_compress_blocks[-1]
last_block_size = os.path.getsize(COMPRESS_DIR + "/" + last_block_filename)
compress_block_counter = int(last_block_filename, 16)
if last_block_size > MIN_BLOCK_SIZE:
compress_block_counter += 1
# we use 8 digit hex number as filename, in the range of uint32
block_filename = format(compress_block_counter, "08x")
block_handler = open(COMPRESS_DIR + "/" + block_filename, "ab")
print("Append at", COMPRESS_DIR + block_filename, hex(block_handler.tell()))
# append content to block handler
# return (block index, offset, size)
def append_to_block(content):
global block_handler
global compress_block_counter
global block_filename
if block_handler.tell() + len(content) > BLOCK_SIZE:
block_handler.close()
compress_block_counter += 1
block_filename = format(compress_block_counter, "08x")
block_handler = open(COMPRESS_DIR + "/" + block_filename, "ab")
offset = block_handler.tell()
block_index = compress_block_counter
block_handler.write(content)
assert block_handler.tell() - offset == len(content)
return (block_index, offset, len(content))
inverted_index = dict()
# traverse each tar archive
for tar in os.listdir(TAR_DIR):
if tar[: -4] not in words_dictionary:
print("WARN: TAR", tar[: -4], "not in words dictionary. ")
continue
keywords = words_dictionary[tar[: -4]]
print(tar, ":", keywords)
tar_handler = tarfile.open(TAR_DIR + "/" + tar)
for tar_mem in tar_handler.getmembers():
content = tar_handler.extractfile(tar_mem).read()
file_info = append_to_block(content)
if keywords not in inverted_index:
inverted_index[keywords] = [file_info]
else:
inverted_index[keywords].append(file_info)
# append inverted index
index_handler = open(INVERTED_INDEX, "a")
for keywords, positions in inverted_index.items():
output_str = keywords + "\t"
for pos in positions:
for i in pos:
output_str += format(i, "x") + ","
output_str = output_str[: -1] + "\n"
index_handler.write(output_str)
| mit | Python |
|
d4a7cdd400fe29458cc584455c7b082efed99e2b | Add files via upload | bengjerstad/multiuselogserver,bengjerstad/multiuselogserver,bengjerstad/multiuselogserver | timedscripts/rollupwinlog.py | timedscripts/rollupwinlog.py | import requests
import json
import pandas as pd
import sqlite3
from datetime import date
from datetime import datetime
from dateutil import parser
import time
SERVER = '10.24.25.130:8000'
conn = sqlite3.connect('usersrollup.db')
c = conn.cursor()
def makedb():
c.execute('''Create Table users (username text,compname text,stat text,time text)''')
conn.commit()
now = str(datetime.now())
r = requests.get('http://'+SERVER+'/get_dup')
newtxt = json.loads(r.text)
if (newtxt=={}):
print("Returned nothing.");
else:
#print(newtxt,now)
for x in newtxt:
time.sleep(5)
r = requests.get('http://'+SERVER+'/get_log?username='+x+'&compname=all')
thisreturn = json.loads(r.text)
#print(x,thisreturn)
for key,value in thisreturn.items():
data2 = (value['username'],value['compname'],value['stat'],now)
try:
c.execute("INSERT INTO users VALUES "+str(data2))
except sqlite3.OperationalError:
makedb()
c.execute("INSERT INTO users VALUES "+str(data2))
conn.commit()
#need to request to clear the log
#r = requests.get('http://'+SERVER+'/db?action=clearlog')
| mit | Python |
|
98fbfe6e65c4cb32ea0f4f6ce6cba77f7fadcb7b | Add test for vendor object creation | teamtaverna/core | app/api/tests/test_vendor_api.py | app/api/tests/test_vendor_api.py | from django.test import Client, TestCase
from .utils import obtain_api_key, create_admin_account
class VendorApiTest(TestCase):
"""Test for Vendor API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
self.admin_test_credentials = ('admin', 'admin@taverna.com', 'qwerty123')
create_admin_account(*self.admin_test_credentials)
self.header = {
'HTTP_X_TAVERNATOKEN': obtain_api_key(
self.client, *self.admin_test_credentials
)
}
self.vendors = (
('vendor1', 'info1'),
('vendor2', 'info2')
)
def make_request(self, query, method='GET'):
if method == 'GET':
return self.client.get(self.endpoint,
data={'query': query},
**self.header
).json()
if method == 'POST':
return self.client.post(self.endpoint,
data={'query': query},
**self.header
).json()
def create_vendor(self, name, info):
query = '''
mutation{
createVendor(input: {name: "%s", info: "%s"}){
vendor{
id,
originalId,
name,
info
}
}
}
''' % (name, info)
return self.make_request(query, 'POST')
def retrieve_vendor(self, vendor_id):
query = 'query {vendor(id: "%s") {name}}' % (vendor_id)
return self.make_request(query)
def create_multiple_vendors(self):
return [self.create_vendor(name, info) for name, info in self.vendors]
def test_creation_of_vendor_object(self):
# For new vendor record
response = self.create_vendor('vendor4', 'info4')
created_vendor = response['vendor']
expected = {
'vendor': {
'id': created_vendor['id'],
'originalId': created_vendor['originalId'],
'name': 'vendor4',
'info': 'info4'
}
}
self.assertEqual(expected, response)
| mit | Python |
|
2cd2d7a20f2d19221b40aac9bfa1303dbfd97459 | create metashare.wsgi | MiltosD/CEF-ELRC,MiltosD/CEFELRC,MiltosD/CEFELRC,MiltosD/CEF-ELRC,MiltosD/CEF-ELRC,MiltosD/CEFELRC,MiltosD/CEFELRC,MiltosD/CEF-ELRC,MiltosD/CEF-ELRC,MiltosD/CEF-ELRC,MiltosD/CEFELRC,MiltosD/CEFELRC,MiltosD/CEFELRC,MiltosD/CEF-ELRC | metashare/apache/metashare.wsgi | metashare/apache/metashare.wsgi | import os
import sys
path = '/var/www/CEF-ELRC'
if path not in sys.path:
sys.path.insert(0, path)
sys.path.insert(0, '{0}/metashare'.format(path))
sys.path.append('{0}/lib/python2.7/site-packages'.format(path))
os.environ['DJANGO_SETTINGS_MODULE'] = 'metashare.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler() | bsd-3-clause | Python |
|
123959bc3594299c2f1d4c54b11a996e92147347 | Add missing migration | mfcovington/django-system-maintenance,mfcovington/django-system-maintenance,mfcovington/django-system-maintenance | system_maintenance/migrations/0002_auto_20181214_2122.py | system_maintenance/migrations/0002_auto_20181214_2122.py | # Generated by Django 2.1.4 on 2018-12-14 21:22
from django.db import migrations
import markupfield_helpers.helpers
class Migration(migrations.Migration):
dependencies = [
('system_maintenance', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='documentationrecord',
name='documentation',
field=markupfield_helpers.helpers.MarkupField(blank=True, help_text='Document how to perform a task.', null=True, rendered_field=True),
),
migrations.AlterField(
model_name='maintenancerecord',
name='description',
field=markupfield_helpers.helpers.MarkupField(blank=True, help_text='Enter a description of the system maintenance performed.', null=True, rendered_field=True),
),
migrations.AlterField(
model_name='maintenancerecord',
name='problems',
field=markupfield_helpers.helpers.MarkupField(blank=True, help_text='Describe problems that arose during system maintenance.', null=True, rendered_field=True),
),
migrations.AlterField(
model_name='maintenancerecord',
name='procedure',
field=markupfield_helpers.helpers.MarkupField(blank=True, help_text='Enter details of how the system maintenance was performed.', null=True, rendered_field=True),
),
]
| bsd-3-clause | Python |
|
71b97c202373ee127d57bcdb53ef3f6f4e8d7d57 | Revert "Revert "Added a wrapper script for buildOnServer that merges certain files"" | AndroidX/androidx,androidx/androidx,androidx/androidx,AndroidX/androidx,androidx/androidx,androidx/androidx,androidx/androidx,AndroidX/androidx,androidx/androidx,AndroidX/androidx,AndroidX/androidx,androidx/androidx,AndroidX/androidx,AndroidX/androidx,androidx/androidx,androidx/androidx,AndroidX/androidx,AndroidX/androidx,AndroidX/androidx,androidx/androidx | busytown/doAllTheBuild.py | busytown/doAllTheBuild.py | #!/usr/bin/python
import os,sys,json
# SAMPLE USE CASE: python doAllTheBuild.py --no-daemon -PuseMaxDepVersions
#
# Runs both gradle builds, then merges the output that needs merging
# Arguments to this script are passed to both gradle builds without modification
# Exceptions to this policy:
# if DIST_DIR=path/to/dir is not passed, it is assumed to be out/dist(/ui)
# build variables OUT_DIR=out(/ui), ANDROID_HOME=prebuilts/fullsdk-linux
# are set before each gradle build
# -p frameworks/support(/ui) is passed by default
# arguments with '=' in them (e.g. SNAPSHOT=true) are prefixed to the gradle runs
# If you want to run only one gradle build, you do not want to merge.
# So do not run this; instead run the gradle build directly
os.chdir(os.path.dirname(os.path.abspath(__file__)))
os.chdir("../../../")
projectDirArg = "-p frameworks/support"
ui = "/ui"
tasks = "buildOnServer"
gradlew = "frameworks/support/gradlew"
gradlewC = "frameworks/support/ui/gradlew"
outDirArg = "OUT_DIR=out"
androidHomeArg = "ANDROID_HOME=prebuilts/fullsdk-linux"
androidxGradleCommand = " ".join([outDirArg, androidHomeArg, gradlew, tasks, projectDirArg])
composeGradleCommand = " ".join([outDirArg + ui, androidHomeArg, gradlewC, tasks, projectDirArg + ui])
distargs = [arg for arg in sys.argv if "DIST_DIR=" in arg]
distDir = "out/dist" if len(distargs) == 0 else distargs[0][8:]
distarg = "" if len(distargs) == 0 else " " + distargs[0]
distargC = "" if len(distargs) == 0 else " " + distargs[0] + ui
preargs = " ".join([arg for arg in sys.argv if '=' in arg and arg not in distargs]) # args of the form VAR=thing
postargs = " ".join([arg for arg in sys.argv if ".py" not in arg and arg not in distargs and arg not in preargs])
# remove "doAllTheBuild.py"
def runGradleBuilds():
os.system(" ".join([preargs + distarg, androidxGradleCommand, postargs]))
os.system(" ".join([preargs + distargC, composeGradleCommand, postargs]))
def mergeAggregateBuildInfoFiles() :
N_COMMON_ARTIFACTS = 2 #the number of artifacts in both androidx and compose
#benchmark-common and benchmark-junit4
androidxBuildInfo = json.load(open("androidx_aggregate_build_info.txt"))["artifacts"]
nitemsA = len(androidxBuildInfo)
composeBuildInfo = json.load(open("ui/androidx_aggregate_build_info.txt"))["artifacts"]
nitemsC = len(composeBuildInfo)
resultJson = {"artifacts":androidxBuildInfo + composeBuildInfo}
#assert len(androidxBuildInfo) == nitemsA + nitemsC - N_COMMON_ARTIFACTS
#TODO: make this actually work, and properly
with open("all_aggregate_build_info.txt", 'w') as outfile:
json.dump(resultJson, outfile, sort_keys=True, indent=4, separators=(',', ': '))
def mergeBuildInfoFolders():
os.system("cp -au ui/build-info/. build-info/")
# -a = all in directory; -u = overwrite iff newer
runGradleBuilds()
def doThingsInDistDir():
os.chdir(distDir)
mergeAggregateBuildInfoFiles()
mergeBuildInfoFolders()
doThingsInDistDir() | apache-2.0 | Python |
|
7c33e8c7a386e911d835f81e637515d40dfc4e62 | Add a Laplace equation solving benchmark (from Numpy) | numba/numba-benchmark | benchmarks/bench_laplace.py | benchmarks/bench_laplace.py | """
Benchmark Laplace equation solving.
From the Numpy benchmark suite, original code at
https://github.com/yarikoptic/numpy-vbench/commit/a192bfd43043d413cc5d27526a9b28ad343b2499
"""
import numpy as np
from numba import jit
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
@jit(nopython=True)
def laplace(N, Niter):
u = np.zeros((N, N))
u[0] = 1
for i in range(Niter):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
return u
class Laplace:
N = 150
Niter = 200
def setup(self):
# Warm up
self.run_laplace(10, 10)
def run_laplace(self, N, Niter):
u = laplace(N, Niter)
def time_laplace(self):
self.run_laplace(self.N, self.Niter)
| bsd-2-clause | Python |
|
8956ee3bd89b12da20ebb1946d41c4133467ae79 | Add py-pure-eval (#19180) | iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/py-pure-eval/package.py | var/spack/repos/builtin/packages/py-pure-eval/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPureEval(PythonPackage):
"""This is a Python package that lets you safely evaluate certain AST nodes
without triggering arbitrary code that may have unwanted side effects."""
homepage = "https://github.com/alexmojaki/pure_eval"
url = "https://github.com/alexmojaki/pure_eval/archive/master.zip"
git = "https://github.com/alexmojaki/pure_eval.git"
version('master', branch='master')
depends_on('python@3.5:3.9', type=('build', 'run'))
depends_on('py-setuptools@44:', type='build')
| lgpl-2.1 | Python |
|
6d1eda812d57c6c251fb037b005103172de886af | Update __init__.py | geekroot/erpnext,njmube/erpnext,gsnbng/erpnext,geekroot/erpnext,indictranstech/erpnext,indictranstech/erpnext,geekroot/erpnext,gsnbng/erpnext,indictranstech/erpnext,geekroot/erpnext,njmube/erpnext,njmube/erpnext,gsnbng/erpnext,Aptitudetech/ERPNext,indictranstech/erpnext,gsnbng/erpnext,njmube/erpnext | erpnext/__init__.py | erpnext/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
__version__ = '7.0.63'
def get_default_company(user=None):
'''Get default company for user'''
from frappe.defaults import get_user_default_as_list
if not user:
user = frappe.session.user
companies = get_user_default_as_list(user, 'company')
if companies:
default_company = companies[0]
else:
default_company = frappe.db.get_single_value('Global Defaults', 'default_company')
return default_company
def get_default_currency():
'''Returns the currency of the default company'''
company = get_default_company()
if company:
return frappe.db.get_value('Company', company, 'default_currency')
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
__version__ = '7.1.0-beta'
def get_default_company(user=None):
'''Get default company for user'''
from frappe.defaults import get_user_default_as_list
if not user:
user = frappe.session.user
companies = get_user_default_as_list(user, 'company')
if companies:
default_company = companies[0]
else:
default_company = frappe.db.get_single_value('Global Defaults', 'default_company')
return default_company
def get_default_currency():
'''Returns the currency of the default company'''
company = get_default_company()
if company:
return frappe.db.get_value('Company', company, 'default_currency')
| agpl-3.0 | Python |
f0bd64992b05b0e7edd4b1ac6e99e1cd9db213d6 | Create search.py | bm993a/ptwitch | search.py | search.py | apache-2.0 | Python |
||
c74a9943bbd9e7908ba884e0fea5b3390e8d668e | add migration | parksandwildlife/wastd,parksandwildlife/wastd,parksandwildlife/wastd,parksandwildlife/wastd | wastd/observations/migrations/0004_auto_20160905_1943.py | wastd/observations/migrations/0004_auto_20160905_1943.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-05 11:43
from __future__ import unicode_literals
from django.db import migrations, models
import wastd.observations.models
class Migration(migrations.Migration):
dependencies = [
('observations', '0003_auto_20160902_1206'),
]
operations = [
migrations.AlterField(
model_name='distinguishingfeatureobservation',
name='algal_growth',
field=models.CharField(choices=[('na', 'Not observed'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Algal growth on carapace'),
),
migrations.AlterField(
model_name='distinguishingfeatureobservation',
name='barnacles',
field=models.CharField(choices=[('na', 'Not observed'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Barnacles'),
),
migrations.AlterField(
model_name='distinguishingfeatureobservation',
name='damage_injury',
field=models.CharField(choices=[('na', 'Not observed'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Obvious damage or injuries'),
),
migrations.AlterField(
model_name='distinguishingfeatureobservation',
name='missing_limbs',
field=models.CharField(choices=[('na', 'Not observed'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Missing limbs'),
),
migrations.AlterField(
model_name='distinguishingfeatureobservation',
name='propeller_damage',
field=models.CharField(choices=[('na', 'Not observed'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Propeller strike damage'),
),
migrations.AlterField(
model_name='distinguishingfeatureobservation',
name='scanned_for_pit_tags',
field=models.CharField(choices=[('na', 'Not observed'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Scanned for PIT tags'),
),
migrations.AlterField(
model_name='distinguishingfeatureobservation',
name='tagging_scars',
field=models.CharField(choices=[('na', 'Not observed'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Tagging scars'),
),
migrations.AlterField(
model_name='encounter',
name='location_accuracy',
field=models.CharField(choices=[('10', 'GPS reading at exact location (10 m)'), ('1000', 'Site centroid or place name (1 km)'), ('10000', 'Rough estimate (10 km)')], default='1000', help_text='The accuracy of the supplied location.', max_length=300, verbose_name='Location accuracy (m)'),
),
migrations.AlterField(
model_name='mediaattachment',
name='attachment',
field=models.FileField(help_text='Upload the file', max_length=500, upload_to=wastd.observations.models.encounter_media, verbose_name='File attachment'),
),
]
| mit | Python |
|
9115628cf10e194f1975e01142d8ae08ab5c4b06 | Add test for pandas dataframe loading | fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python | joommf/test_odtreader.py | joommf/test_odtreader.py | def test_odtreader_dynamics_example():
from joommf.sim import Sim
from joommf.mesh import Mesh
from joommf.energies.exchange import Exchange
from joommf.energies.demag import Demag
from joommf.energies.zeeman import FixedZeeman
from joommf.drivers import evolver
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e3, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 0.5e-9 # simulation time (s)
gamma = 2.21e5
alpha = 0.1
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='dynamics_example', debug=True)
# Add energies.
sim.add_energy(Exchange(A))
sim.add_energy(Demag())
sim.add_energy(FixedZeeman(H))
sim.set_evolver(
evolver.LLG(t_sim, m_init, Ms, alpha, gamma, name='evolver'))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run()
assert sim.df.time.values[-1] == 0.5e-09
| bsd-2-clause | Python |
|
64d8f45e1868fd73415e8f1fe6acc21868d45fa7 | Add rfreceiver mode selector | EndPointCorp/appctl,EndPointCorp/appctl | catkin/src/appctl/scripts/rfreceiver_mode_select.py | catkin/src/appctl/scripts/rfreceiver_mode_select.py | #!/usr/bin/env python
"""
This node listens for keyfob button presses and changes the mode accordingly.
"""
import rospy
from appctl.msg import Mode
from std_msgs.msg import Byte
class ButtonHandler:
def __init__(self, modes, mode_pub):
self.modes = modes
self.mode_pub = mode_pub
def handle_msg(self, msg):
if msg.data in self.modes:
self.mode_pub.publish(mode=self.modes[msg.data])
def main():
rospy.init_node('rfreceiver_mode_select')
modes = {
1: 'tactile',
2: 'attended'
}
mode_pub = rospy.Publisher(
'/appctl/mode',
Mode,
queue_size = 1
)
button_handler = ButtonHandler(modes, mode_pub)
mode_sub = rospy.Subscriber(
'/rfreceiver/buttondown',
Byte,
button_handler.handle_msg
)
rospy.spin()
if __name__=='__main__':
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| apache-2.0 | Python |
|
9e58f5507ba0a128c696bdec218d244df27feb87 | add list_graspability script | pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc | jsk_arc2017_common/scripts/list_graspability.py | jsk_arc2017_common/scripts/list_graspability.py | #!/usr/bin/env python
import jsk_arc2017_common
graspability = jsk_arc2017_common.get_object_graspability()
for obj_id, obj in enumerate(graspability):
print('{:02}: {}'.format(obj_id+1, obj))
for style in graspability[obj]:
print(' {}: {}'.format(style, graspability[obj][style]))
| bsd-3-clause | Python |
|
bdc04453938366e28ff91b6e16c536eca84d8bef | add summary generator | Mozilla-TWQA/Hasal_analysis,Mozilla-TWQA/Hasal_analysis | summary.py | summary.py | #!/usr/bin/env python
import os
import json
import argparse
from argparse import ArgumentDefaultsHelpFormatter
from time import gmtime, strftime, mktime
import datetime
class DatetimeConverter(object):
TIME_STR_FORMAT = '%Y-%m-%dT%H:%M:%S'
@staticmethod
def get_UTC():
return gmtime()
@staticmethod
def get_string_UTC():
return strftime(DatetimeConverter.TIME_STR_FORMAT, gmtime())
@staticmethod
def get_datetime_from_string(input_time_string):
return datetime.strptime(input_time_string, DatetimeConverter.TIME_STR_FORMAT)
@staticmethod
def get_timestamp_from_string(input_time_string):
return mktime(DatetimeConverter.get_datetime_from_string(input_time_string).timetuple())
class SummaryGenerator(object):
def __init__(self, root_folder):
self.root_folder = root_folder
def list_to_hierarchy_dict(self, dict_root, input_list):
if input_list:
node = input_list[0]
if type(input_list[0]) is not str:
node = str(input_list[0])
current_node = dict_root.setdefault(node, {})
self.list_to_hierarchy_dict(current_node, input_list[1:])
def generate_summary_dict(self):
ret_dict = {}
for root, dirs, files in os.walk(self.root_folder):
has_time = False
time_list = []
time_sum = 0
time_counter = 0
for f in files:
if f.endswith('time'):
has_time = True
try:
t = int(f.replace('.time', ''))
time_list.append(t)
time_sum += t
time_counter += 1
except Exception:
pass
if has_time:
# generate hierarchy dir dict from list
dir_structure = root.split(os.sep)
self.list_to_hierarchy_dict(ret_dict, dir_structure)
# go to the inner dir
cur_dir = ret_dict
for next_dir in dir_structure:
cur_dir = cur_dir[next_dir]
cur_dir[str(time_sum / time_counter)] = time_list
return ret_dict
def run(self):
summary_dict = self.generate_summary_dict()
utc_time = DatetimeConverter.get_string_UTC()
ret_dict = {
'summary': summary_dict,
'UTC': utc_time
}
print(json.dumps(ret_dict, indent=4))
def main():
arg_parser = argparse.ArgumentParser(description='Summary Generator',
formatter_class=ArgumentDefaultsHelpFormatter)
arg_parser.add_argument('-d', '--dir', dest='root_folder', action='store', default='.',
help='the root folder', required=True)
args = arg_parser.parse_args()
sg = SummaryGenerator(args.root_folder)
sg.run()
if __name__ == '__main__':
main()
| mpl-2.0 | Python |
|
d75c519eb4c3b276f04ba58277d03801c8568ff0 | Create 4.py | utsavnarayan/projecteuler | solutions/4.py | solutions/4.py | # A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
def main():
max =0
for i in range(999,900,-1):
for j in range(999,900,-1):
product = str(i*j)
if ((product == product[::-1]) and ((i*j)>max)):
max = product
print max
main()
| mit | Python |
|
21ddecb7804501476d35290b0b0cb2b7311728ab | add hello world tornado | guillaumevincent/tornado-zeromq | server.py | server.py | import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def get(self):
self.write("Hello, world")
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| mit | Python |
|
19cfaf8534626e5c6b5193da40a17cc092b24758 | Use tuple instead of a list for DEFAULT_VIEWPORT_VIRTUAL_TAGS | Spirotot/taskwiki,phha/taskwiki | taskwiki/constants.py | taskwiki/constants.py | DEFAULT_VIEWPORT_VIRTUAL_TAGS = ("-DELETED", "-PARENT")
DEFAULT_SORT_ORDER = "due+,pri-,project+"
| DEFAULT_VIEWPORT_VIRTUAL_TAGS = ["-DELETED", "-PARENT"]
DEFAULT_SORT_ORDER = "due+,pri-,project+"
| mit | Python |
d93dada0fe434cd736d11b9cfb1635146130f24a | Add 031 | alexprengere/euler | 031/main.py | 031/main.py | # Integers avoid having to rely on decimal.Decimal
# to handle rounding errors
COINS = 1, 2, 5, 10, 20, 50, 100, 200
TARGET = 200
visited = set()
solutions = []
stack = [(0, (0,) * len(COINS))]
while stack:
total, state = stack.pop()
for cn, coin in enumerate(COINS):
new_total = total + coin
if new_total > TARGET:
continue
new_state = list(state)
new_state[cn] += 1
new_state = tuple(new_state)
if new_state not in visited:
visited.add(new_state)
if new_total == TARGET:
solutions.append(new_state)
else: # < TARGET
stack.append((new_total, new_state))
print(len(solutions))
| apache-2.0 | Python |
|
eaace54d5e7d8d2ebad42cf31cf071a9cf9d3e50 | test case for creating a new story | mohrm/umklapp_site,mohrm/umklapp_site,mohrm/umklapp_site | umklapp/test.py | umklapp/test.py | from django.test import TestCase
from django.test.utils import override_settings
from umklapp.models import *
class UmklappTestCase(TestCase):
def addUsers(self):
self.users = []
for i in range(0,7):
u = User.objects.create_user(
"user%d" % i,
"test@example.com",
"p455w0rd"
)
self.users.append(u)
class NewStoryTest(UmklappTestCase):
def setUp(self):
self.addUsers()
def testNewStory(self):
Story.create_new_story(self.users[0], self.users, "first")
| mit | Python |
|
d20e468a32d1f476196525848688ae64845c4dce | Add Python solution | baudm/sg-ski | sg-ski.py | sg-ski.py | #!/usr/bin/env python
import sys
def parse_map_file(path):
map_grid = []
with open(path, 'r') as f:
width, height = map(int, f.readline().split())
for line in f:
row = map(int, line.split())
map_grid.append(row)
assert height == len(map_grid)
assert width == len(map_grid[0])
return width, height, map_grid
def make_grid(width, height, initial_value):
return [width*[initial_value] for i in range(height)]
def get_length_and_elevation(x, y, map_grid, path_lengths, final_elevations):
path_length = path_lengths[y][x]
if path_length != -1:
return path_length, final_elevations[y][x]
current_elevation = map_grid[y][x]
longest_path = 0
lowest_elevation = current_elevation
neighbors = [
(x, y - 1), # up
(x, y + 1), # down
(x - 1, y), # left
(x + 1, y), # right
]
for xn, yn in neighbors:
try:
neighbor = map_grid[yn][xn]
except IndexError:
continue
if neighbor < current_elevation:
path_length, final_elevation = get_length_and_elevation(xn, yn, map_grid, path_lengths, final_elevations)
if path_length > longest_path or (path_length == longest_path and final_elevation < lowest_elevation):
longest_path = path_length
lowest_elevation = final_elevation
path_length = longest_path + 1
path_lengths[y][x] = path_length
final_elevations[y][x] = lowest_elevation
return path_length, lowest_elevation
def main():
sys.stdout.write('Processing...')
sys.stdout.flush()
try:
width, height, map_grid = parse_map_file(sys.argv[1])
except IOError as e:
sys.exit('Unable to read map file: {}'.format(e))
except ValueError as e:
sys.exit('Invalid map file: {}'.format(sys.argv[1]))
# Initialize corresponding grids for path lengths and final elevations
path_lengths = make_grid(width, height, -1)
final_elevations = make_grid(width, height, -1)
longest_path = -1
steepest_drop = -1
for y, row in enumerate(map_grid):
for x, initial_elevation in enumerate(row):
path_length, final_elevation = get_length_and_elevation(x, y, map_grid, path_lengths, final_elevations)
drop = initial_elevation - final_elevation
if path_length > longest_path or (path_length == longest_path and drop > steepest_drop):
longest_path = path_length
steepest_drop = drop
print '\rProcessing... DONE.'
print '\nlength = {}, drop = {}\n'.format(longest_path, steepest_drop)
if __name__ == '__main__':
main()
| mit | Python |
|
78e0135169d2c53b0b99c7811109eb1da040f14d | add bin2h.py | littlekernel/lk,littlekernel/lk,travisg/lk,skabet/lk,sndnvaps/lk,minglun-tsai/lk,srodrig1/lk,sndnvaps/lk,srodrig1/lk,sndnvaps/lk,sndnvaps/lk,nvll/lk,hollanderic/lkstuff,minglun-tsai/lk,travisg/lk,skabet/lk,sndnvaps/lk,ErikCorryGoogle/lk,nvll/lk,hollanderic/lkstuff,nvll/lk,nvll/lk,skabet/lk,srodrig1/lk,hollanderic/lkstuff,srodrig1/lk,srodrig1/lk,nvll/lk,ErikCorryGoogle/lk,hollanderic/lkstuff,littlekernel/lk,travisg/lk,ErikCorryGoogle/lk,travisg/lk,minglun-tsai/lk,littlekernel/lk,ErikCorryGoogle/lk,minglun-tsai/lk,skabet/lk,skabet/lk | tools/bin2h.py | tools/bin2h.py | #!/usr/bin/env python
# vim: set expandtab ts=4 sw=4 tw=100:
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-b", "--before", dest="before", action="append",
help="text to put before, may be specified more than once")
parser.add_option("-a", "--after", dest="after", action="append",
help="text to put after, may be specified more than once")
(options, args) = parser.parse_args()
if options.before and len(options.before) > 0:
for b in options.before:
print b
offset = 0
f = bytearray(sys.stdin.read())
for c in f:
if offset != 0 and offset % 16 == 0:
print ""
print "%#04x," % c,
offset = offset + 1
print ""
if options.after and len(options.after) > 0:
for a in options.after:
print a
| mit | Python |
|
d68cfae0cac869d6676643e33479383bc11b086a | Add najeto.py script. | kmzbrnoI/hJOPserver | utils/najeto.py | utils/najeto.py | """
Calculate elapsed distance based on hJOP HV csv output.
"""
import sys
import csv
class HV:
def __init__(self, addr: int, name: str, owner: str) -> None:
self.address: int = addr
self.name: str = name
self.owner: str = owner
self.start_forward: float = 0
self.start_backward: float = 0
self.end_forward: float = 0
self.end_backward: float = 0
if __name__ == '__main__':
if len(sys.argv) < 4:
sys.stderr.write(f'Usage: {sys.argv[0]} start.csv end.csv output.csv')
sys.exit(1)
start_fn, end_fn, out_fn = sys.argv[1:]
hvs = {}
with open(start_fn, encoding='cp1250') as start_file:
start_reader = csv.reader(start_file, delimiter=';')
for i, line in enumerate(start_reader):
if i == 0:
continue
addr, name, owner, forward, backward = int(line[0]), line[1], line[2], float(line[3]), float(line[4])
assert addr not in hvs, f'{addr} duplicity'
hvs[addr] = HV(addr, name, owner)
hvs[addr].start_forward = forward
hvs[addr].start_backward = backward
with open(end_fn, encoding='utf8') as end_file:
end_reader = csv.reader(end_file, delimiter=',')
for i, line in enumerate(end_reader):
if i == 0:
continue
addr, name, owner, forward, backward = int(line[0]), line[1], line[2], float(line[3]), float(line[4])
if addr in hvs:
if hvs[addr].name != name:
print(f'Warning: {addr}: name[begin] = {hvs[addr].name} != {name} = name[end]')
if hvs[addr].owner != owner:
print(f'Warning: {addr}: owner[begin] = {hvs[addr].owner} != {owner} = owner[end]')
hvs[addr].end_forward = forward
hvs[addr].end_backward = backward
else:
hvs[addr] = HV(addr, name, owner)
hvs[addr].end_forward = forward
hvs[addr].end_backward = backward
with open(out_fn, 'w', encoding='utf-8', newline='') as out_file:
writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['adresa', 'nazev', 'majitel', 'najeto_metru_vpred', 'najeto_metru_vzad'])
for hv in hvs.values():
forward: float = round(hv.end_forward - hv.start_forward, 2)
backward: float = round(hv.end_backward - hv.start_backward, 2)
assert (forward*backward) >= 0, f'HV {hv.address} has different signs for directions!'
if forward > 0 and backward > 0:
writer.writerow([hv.address, hv.name, hv.owner, forward, backward])
elif forward < 0 and backward < 0:
print(f'Omitting {hv.address} ({hv.name}) - negative diff')
| apache-2.0 | Python |
|
cf8a0105e0c4fc6af04ede6c7ae4fe4f4dac048e | add migrations | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0103_update_startupupdate_model.py | accelerator/migrations/0103_update_startupupdate_model.py | # Generated by Django 2.2.28 on 2022-05-09 11:20
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0102_update_program_model'),
]
operations = [
migrations.AddField(
model_name='startupupdate',
name='acquired_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_annualized_revenue_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Annualized revenue (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_total_funding_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Total Funding Raised (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='currency_type',
field=models.CharField(
choices=[
('USD', 'USD'), ('GBP', 'GBP'),
('EUR', 'EUR'), ('JPY', 'JPY'),
('AUD', 'AUD'), ('CAD', 'CAD'),
('CHF', 'CHF'), ('NZD', 'NZD'),
('NGN', 'NGN'), ('MXN', 'MXN')],
default='USD',
max_length=5,
verbose_name='Status Currency'),
),
migrations.AddField(
model_name='startupupdate',
name='ipo_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_annualized_revenue',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Annualized revenue'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_total_funding',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Total Funding Raised'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_valuation',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation'),
),
]
| mit | Python |
|
7014c5affa780044fd46911287d883024bae3fae | Create ipy_custom_hbox.py | satishgoda/learningqt,satishgoda/learningqt | basics/layout/ipy_custom_hbox.py | basics/layout/ipy_custom_hbox.py |
from PySide import QtCore
from PySide import QtGui
class MyHBoxLayout(QtGui.QHBoxLayout):
def __init__(self, *args, **kwargs):
super(MyHBoxLayout, self).__init__(*args, **kwargs)
@property
def margins(self):
return self.contentsMargins()
@margins.setter
def margins(self, margins):
self.setContentsMargins(*margins)
class MyWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
self.setLayout(MyHBoxLayout())
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
@property
def lay(self):
return self.layout()
self = MyWidget()
self.show()
##
self.lay.addWidget(QtGui.QPushButton('1'))
self.lay.addWidget(QtGui.QPushButton('2'))
self.lay.margins = [0] * 4
self.lay.setSpacing(15)
self.lay.addStretch()
self.lay.addWidget(QtGui.QPushButton('3'))
self.lay.setSpacing(0)
self.lay.setSpacing(10)
| mit | Python |
|
17067d5d25c5ce755ba86505ffcf1dd6fd572deb | Initialize version.py | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | version.py | version.py | """python-tutorials version."""
__version__ = '1.0.1'
| mit | Python |
|
bc518732086795c699290d49f30ad5d449b79f9e | add template for settings.py | cgutierr3z/proyecto-eees,cgutierr3z/proyecto-eees,cgutierr3z/proyecto-eees | proyecto_eees/settings-template.py | proyecto_eees/settings-template.py | """
Django settings for proyecto_eees project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = (
('Carlos Gutierrez', 'cgutierr3z@utp.edu.co'),
)
# Modelo de autenticacion de Usuarios
AUTH_USER_MODEL = 'eees.Usuario'
# Permite verificar si un usuario esta activo o no
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.AllowAllUsersModelBackend']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x9sa47hq)w24g2xg!&+gvs$!w@h3ubjeif+a@b-@!3d5h4k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'eees.apps.EeesConfig',
'widget_tweaks',
]
#AUTH_USER_MODEL = 'eees.Usuario'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'proyecto_eees.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proyecto_eees.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db-eees-AQWED.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'es-ES'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
#Configuraciones para enviar mensajes usando gmail
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_USE_TLS = True
EMAIL_USE_SSL = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'email@gmail.com'
EMAIL_HOST_PASSWORD = 'pwd'
EMAIL_PORT = 465
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SERVER_EMAIL = EMAIL_HOST_USER
| mit | Python |
|
c95ef4dc0771098e3589721851865f012b652136 | add requirements unit test | wwitzel3/awx,snahelou/awx,wwitzel3/awx,wwitzel3/awx,wwitzel3/awx,snahelou/awx,snahelou/awx,snahelou/awx | awx/main/tests/unit/test_python_requirements.py | awx/main/tests/unit/test_python_requirements.py | from pip.operations import freeze
from django.conf import settings
def test_req():
def check_is_in(src, dests):
if src not in dests:
src2 = [src[0].replace('_', '-'), src[1]]
if src2 not in dests:
print("%s not in" % src2)
return False
else:
print("%s not in" % src)
return False
return True
base_dir = settings.BASE_DIR
reqs_actual = []
xs = freeze.freeze(local_only=True, requirement=base_dir + "/../requirements/requirements.txt")
for x in xs:
if '## The following requirements were added by pip freeze' in x:
break
reqs_actual.append(x.split('=='))
reqs_expected = []
with open(base_dir + "/../requirements/requirements.txt") as f:
for line in f:
line.rstrip()
# TODO: process git requiremenst and use egg
if line.strip().startswith('#') or line.strip().startswith('git'):
continue
if line.startswith('-e'):
continue
line.rstrip()
reqs_expected.append(line.rstrip().split('=='))
for r in reqs_actual:
print(r)
not_found = []
for r in reqs_expected:
res = check_is_in(r, reqs_actual)
if res is False:
not_found.append(r)
raise RuntimeError("%s not found in \n\n%s" % (not_found, reqs_expected))
| apache-2.0 | Python |
|
5f47cf46c82d9a48a9efe5ad11c6c3a55896da12 | Implement abstract class for csc and csr matrix | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | cupy/sparse/compressed.py | cupy/sparse/compressed.py | from cupy import cusparse
from cupy.sparse import base
from cupy.sparse import data as sparse_data
class _compressed_sparse_matrix(sparse_data._data_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if not(base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
if dtype is None:
dtype = data.dtype
if dtype != 'f' and dtype != 'd':
raise ValueError('Only float32 and float64 are supported')
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
else:
raise ValueError(
'Only (data, indices, indptr) format is supported')
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data):
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()), shape=self.shape)
def _swap(self, x, y):
raise NotImplementedError
def get_shape(self):
"""Shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def sorted_indices(self):
"""Returns a copy of the matrix with sorted indices."""
x = self.copy()
x.sort_indices()
return x
| mit | Python |
|
e99af02407edf1424733350a359264c2202b27c3 | Add unit test for appending with write_raster_netcdf. | landlab/landlab,cmshobe/landlab,cmshobe/landlab,landlab/landlab,amandersillinois/landlab,landlab/landlab,cmshobe/landlab,amandersillinois/landlab | landlab/io/netcdf/tests/test_write_raster_netcdf.py | landlab/io/netcdf/tests/test_write_raster_netcdf.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.io.netcdf import WITH_NETCDF4, NotRasterGridError, write_raster_netcdf
def test_append_with_time(tmpdir):
field = RasterModelGrid(4, 3)
field.add_field("node", "topographic__elevation", np.ones(12, dtype=np.int64))
with tmpdir.as_cwd():
write_raster_netcdf(
"test.nc", field, append=False, format="NETCDF4", with_time=True
)
field.at_node["topographic__elevation"] *= 2
write_raster_netcdf("test.nc", field, append=True, format="NETCDF4")
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
root.variables[name][:],
[
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]],
],
)
assert root.variables[name][:].dtype == "int64"
assert "nt" in root.dimensions
assert len(root.dimensions["nt"]) == 2
root.close()
def test_without_time(tmpdir):
field = RasterModelGrid(4, 3)
field.add_field("node", "topographic__elevation", np.ones(12, dtype=np.int64))
with tmpdir.as_cwd():
write_raster_netcdf(
"test.nc", field, append=False, format="NETCDF4", with_time=False
)
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
root.variables[name][:], [[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]]
)
assert root.variables[name][:].dtype == "int64"
assert "nt" in root.dimensions
assert len(root.dimensions["nt"]) == 1
root.close()
def test_with_time(tmpdir):
field = RasterModelGrid(4, 3)
field.add_field("node", "topographic__elevation", np.ones(12, dtype=np.int64))
with tmpdir.as_cwd():
write_raster_netcdf(
"test.nc", field, append=False, format="NETCDF4", with_time=True
)
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
root.variables[name][:], [[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]]
)
assert root.variables[name][:].dtype == "int64"
assert "nt" in root.dimensions
assert len(root.dimensions["nt"]) == 1
root.close()
| mit | Python |
|
36333c275f4d3a66c8f14383c3ada5a42a197bea | Add module for displaying RAM usage | tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status | bumblebee/modules/memory.py | bumblebee/modules/memory.py | import bumblebee.module
import psutil
def fmt(num, suffix='B'):
for unit in [ "", "Ki", "Mi", "Gi" ]:
if num < 1024.0:
return "{:.2f}{}{}".format(num, unit, suffix)
num /= 1024.0
return "{:05.2f%}{}{}".format(num, "Gi", suffix)
class Module(bumblebee.module.Module):
def __init__(self, args):
super(Module, self).__init__(args)
self._mem = psutil.virtual_memory()
def data(self):
self._mem = psutil.virtual_memory()
free = self._mem.available
total = self._mem.total
return "{}/{} ({:05.02f}%)".format(fmt(self._mem.available), fmt(self._mem.total), 100.0 - self._mem.percent)
def warning(self):
return self._mem.percent < 20
def critical(self):
return self._mem.percent < 10
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | Python |
|
06b536cdfd684d12ce64670bde50fdcbf7a71bd2 | Add a workspace_binary rule to run a binary from the workspace root | ixdy/kubernetes-repo-infra,kubernetes/repo-infra,kubernetes/repo-infra,ixdy/kubernetes-repo-infra,kubernetes/repo-infra,ixdy/kubernetes-repo-infra | defs/run_in_workspace.bzl | defs/run_in_workspace.bzl | # Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This technique was inspired by the gazelle rule implementation in bazelbuild/rules_go:
# https://github.com/bazelbuild/rules_go/blob/86ade29284ca11deeead86c061e9ba9bd0d157e0/go/private/tools/gazelle.bzl
# Writes out a script which saves the runfiles directory,
# changes to the workspace root, and then runs a command.
def _workspace_binary_script_impl(ctx):
content = """#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
BASE=$(pwd)
cd $(dirname $(readlink WORKSPACE))
"$BASE/{cmd}" $@
""".format(cmd=ctx.file.cmd.short_path)
ctx.actions.write(output=ctx.outputs.executable, content=content, is_executable=True)
runfiles = ctx.runfiles(
files = [ctx.file.cmd, ctx.file.workspace],
)
return [DefaultInfo(runfiles=runfiles)]
_workspace_binary_script = rule(
attrs = {
"cmd": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
"workspace": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
},
executable = True,
implementation = _workspace_binary_script_impl,
)
# Wraps a binary to be run in the workspace root via bazel run.
#
# For example, one might do something like
#
# workspace_binary(
# name = "dep",
# cmd = "//vendor/github.com/golang/dep/cmd/dep",
# )
#
# which would allow running dep with bazel run.
def workspace_binary(name, cmd, visibility=None):
script_name = name + "_script"
_workspace_binary_script(
name=script_name,
cmd=cmd,
workspace = "//:WORKSPACE",
)
native.sh_binary(
name = name,
srcs = [":" + script_name],
visibility = visibility,
)
| apache-2.0 | Python |
|
eda3e6c005c1115a039f394d6f00baabebd39fee | Add command for full daily build process | california-civic-data-coalition/django-calaccess-downloads-website,california-civic-data-coalition/django-calaccess-downloads-website,california-civic-data-coalition/django-calaccess-downloads-website | calaccess_website/management/commands/updatebuildpublish.py | calaccess_website/management/commands/updatebuildpublish.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to the
website.
"""
import logging
from django.core.management import call_command
from calaccess_raw.management.commands.updatecalaccessrawdata import Command as updatecommand
logger = logging.getLogger(__name__)
class Command(updatecommand):
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to
the website.
"""
help = 'Update to the latest available CAL-ACCESS snapshot and publish the\
files to the website.'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.header('Creating latest file links')
call_command('createlatestlinks')
self.header('Baking downloads-website content')
call_command('build')
self.header('Publishing backed content to S3 bucket.')
call_command('publish')
self.success("Done!")
| mit | Python |
|
056966052d0c23395a205511dce2e9577f376539 | Add Sequence | toslunar/chainerrl,toslunar/chainerrl | chainerrl/links/sequence.py | chainerrl/links/sequence.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
import inspect
import chainer
from chainerrl.recurrent import RecurrentChainMixin
class Sequence(chainer.ChainList, RecurrentChainMixin):
def __init__(self, *layers):
self.layers = layers
links = [layer for layer in layers if isinstance(layer, chainer.Link)]
super().__init__(*links)
def __call__(self, x, **kwargs):
h = x
for layer in self.layers:
layer_argnames = inspect.getargspec(layer)[0]
layer_kwargs = {k: v for k, v in kwargs.items()
if k in layer_argnames}
h = layer(h, **layer_kwargs)
return h
| mit | Python |
|
b8acaf64187f5626ef6755ef00d2b2a1471d4914 | Add closure type inference test | pombredanne/numba,pombredanne/numba,cpcloud/numba,seibert/numba,numba/numba,gmarkall/numba,gmarkall/numba,numba/numba,jriehl/numba,stefanseefeld/numba,sklam/numba,GaZ3ll3/numba,stefanseefeld/numba,gmarkall/numba,sklam/numba,stuartarchibald/numba,gdementen/numba,ssarangi/numba,GaZ3ll3/numba,GaZ3ll3/numba,sklam/numba,stonebig/numba,numba/numba,stuartarchibald/numba,ssarangi/numba,seibert/numba,numba/numba,shiquanwang/numba,stefanseefeld/numba,pitrou/numba,jriehl/numba,ssarangi/numba,seibert/numba,ssarangi/numba,jriehl/numba,stefanseefeld/numba,cpcloud/numba,pitrou/numba,sklam/numba,gmarkall/numba,IntelLabs/numba,shiquanwang/numba,gdementen/numba,seibert/numba,pitrou/numba,ssarangi/numba,sklam/numba,stuartarchibald/numba,seibert/numba,IntelLabs/numba,cpcloud/numba,GaZ3ll3/numba,gdementen/numba,stonebig/numba,stonebig/numba,IntelLabs/numba,numba/numba,pombredanne/numba,stuartarchibald/numba,cpcloud/numba,shiquanwang/numba,gdementen/numba,pitrou/numba,stonebig/numba,gmarkall/numba,IntelLabs/numba,stuartarchibald/numba,pombredanne/numba,gdementen/numba,cpcloud/numba,jriehl/numba,GaZ3ll3/numba,stefanseefeld/numba,IntelLabs/numba,pitrou/numba,stonebig/numba,jriehl/numba,pombredanne/numba | numba/tests/closures/test_closure_type_inference.py | numba/tests/closures/test_closure_type_inference.py | import numpy as np
from numba import *
from numba.tests.test_support import *
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print a * b
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
testmod() | bsd-2-clause | Python |
|
79e3931d1a89fc1423e098b108a78302349c3f04 | Add a test scenario for establish of vpn connection | openstack/ec2-api,hayderimran7/ec2-api,stackforge/ec2-api,openstack/ec2-api,stackforge/ec2-api,hayderimran7/ec2-api | ec2api/tests/functional/scenario/test_vpn_routing.py | ec2api/tests/functional/scenario/test_vpn_routing.py | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class VpnRoutingTest(base.EC2TestCase):
VPC_CIDR = '10.4.0.0/20'
CUSTOMER_GATEWAY_IP = '198.51.100.77'
CUSTOMER_VPN_CIDR = '172.16.25.0/24'
@classmethod
@base.safe_setup
def setUpClass(cls):
super(VpnRoutingTest, cls).setUpClass()
if not base.TesterStateHolder().get_vpc_enabled():
raise cls.skipException('VPC is disabled')
def test_vpn_routing(self):
vpc_id, _subnet_id = self.create_vpc_and_subnet(self.VPC_CIDR)
data = self.client.create_customer_gateway(
Type='ipsec.1', PublicIp=self.CUSTOMER_GATEWAY_IP, BgpAsn=65000)
cgw_id = data['CustomerGateway']['CustomerGatewayId']
self.addResourceCleanUpStatic(
self.client.delete_customer_gateway, CustomerGatewayId=cgw_id)
data = self.client.create_vpn_gateway(Type='ipsec.1')
vgw_id = data['VpnGateway']['VpnGatewayId']
self.addResourceCleanUpStatic(
self.client.delete_vpn_gateway, VpnGatewayId=vgw_id)
data = self.client.create_vpn_connection(
CustomerGatewayId=cgw_id, VpnGatewayId=vgw_id,
Options={'StaticRoutesOnly': True}, Type='ipsec.1')
vpn_id = data['VpnConnection']['VpnConnectionId']
self.addResourceCleanUp(self.client.delete_vpn_connection,
VpnConnectionId=vpn_id)
data = self.client.attach_vpn_gateway(VpnGatewayId=vgw_id,
VpcId=vpc_id)
self.addResourceCleanUp(self.client.detach_vpn_gateway,
VpnGatewayId=vgw_id, VpcId=vpc_id)
vpn_waiter = self.get_vpn_connection_waiter()
vpn_waiter.wait_available(vpn_id)
attach_waiter = self.get_vpn_gateway_attachment_waiter()
attach_waiter.wait_available(vgw_id, 'attached')
data = self.client.describe_route_tables(
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
rtb_id = data['RouteTables'][0]['RouteTableId']
data = self.client.enable_vgw_route_propagation(RouteTableId=rtb_id,
GatewayId=vgw_id)
data = self.client.create_vpn_connection_route(
VpnConnectionId=vpn_id,
DestinationCidrBlock=self.CUSTOMER_VPN_CIDR)
route_waiter = self.get_vpn_connection_route_waiter(
self.CUSTOMER_VPN_CIDR)
route_waiter.wait_available(vpn_id)
data = self.client.describe_route_tables(RouteTableIds=[rtb_id])
route = next((r for r in data['RouteTables'][0]['Routes']
if r['DestinationCidrBlock'] == self.CUSTOMER_VPN_CIDR),
None)
self.assertIsNotNone(route)
self.assertEqual('active', route['State'])
self.assertEqual('EnableVgwRoutePropagation', route['Origin'])
| apache-2.0 | Python |
|
3a04bff5a7940463d6429918215429700befb507 | add valid-number | ibigbug/leetcode | valid-number.py | valid-number.py | # Link: https://oj.leetcode.com/problems/valid-number/
class Solution:
"""
Notes please see https://blog.xiaoba.me/2014/11/10/leetcode-valid-number.html
"""
# @param s, a string
# @return a boolean
def isNumber(self, s):
stateTable = [
[ 1, 1, 1, 3, 3, 7, 7, 7,-1],
[ 4, 3, 4,-1,-1,-1,-1,-1,-1],
[ 0, 8,-1, 8,-1,-1,-1, 8, 8],
[-1, 5,-1, 5,-1,-1,-1,-1,-1],
[ 2,-1,-1,-1,-1, 6,-1,-1,-1]
]
i = 0
state = 0
while True:
if i == len(s):
break
c = s[i]
i += 1
inputType = self._getInputType(c)
if inputType is None:
return False
state = stateTable[inputType][state]
if state == -1:
return False
return state == 1 or state == 3 or state == 7 or state == 8
def _isDigit(self, c):
return c >= '0' and c <= '9'
def _getInputType(self, c):
if self._isDigit(c):
return 0
if c == '.':
return 1
if c == ' ':
return 2
if c.lower() == 'e':
return 3
if c == '+' or c == '-':
return 4
| mit | Python |
|
dec6ea168c68e267f15b74407f8745d242629d30 | Create tokens.py | supthunder/premeStock | tokens.py | tokens.py | C_KEY = ""
C_SECRET = ""
A_TOKEN = ""
A_TOKEN_SECRET = ""
| mit | Python |
|
d56c3528ad8058231910fd3d06895f39174eeb6c | Prepare v2.16.2.dev | Flexget/Flexget,Flexget/Flexget,tobinjt/Flexget,crawln45/Flexget,malkavi/Flexget,crawln45/Flexget,ianstalk/Flexget,Danfocus/Flexget,tobinjt/Flexget,tobinjt/Flexget,Danfocus/Flexget,malkavi/Flexget,crawln45/Flexget,Danfocus/Flexget,Flexget/Flexget,malkavi/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,gazpachoking/Flexget,JorisDeRieck/Flexget,ianstalk/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,ianstalk/Flexget,crawln45/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,Danfocus/Flexget,tobinjt/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.16.2.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.16.1'
| mit | Python |
502beb022bb7d4e70f44e40411a1af2f7f08c14e | add test version | zhzhussupovkz/cex-io-api-python | CexioAPI.py | CexioAPI.py | #
# CexioAPI class
#
# @author zhzhussupovkz@gmail.com
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Zhussupov Zhassulan zhzhussupovkz@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import urllib
import urllib2
import hmac
import simplejson
import time
class CexioAPI(object):
def __init__(self, client_id, api_key, secret):
self.api_url = "https://cex.io/api/"
self.client_id = client_id
self.api_key = api_key
self.secret = secret
#for public requests
def __public_request(command, args = {}):
args = urllib.urlencode(args)
url = self.api_url + command
req = urllib2.Request(url, args)
opener = urllib2.build_opener(req)
f = opener.open(req)
res = simplejson.load(f)
return res
#for private requests
def __private_request(command, args = {}):
nonce = str(time.time()).split('.')[0]
message = nonce + self.client_id + self.api_key
signature = hmac.new(self.secret, message, digestmod = hashlib.sha256).hexdigest().upper()
args.update({'key' : self.api_key, 'nonce' : nonce, 'signature' : signature})
args = urllib.urlencode(args)
url = self.api_url + command
req = urllib2.Request(url, args)
opener = urllib2.build_opener(req)
f = opener.open(req)
res = simplejson.load(f)
return res
############### ticker ####################
#Returns JSON dictionary:
#last - last BTC price
#high - last 24 hours price high
#low - last 24 hours price low
#volume - last 24 hours volume
#bid - highest buy order
#ask - lowest sell order
def ticker():
return self.__public_request('ticker/GHS/BTC')
############### order_book ###############
#Returns JSON dictionary with "bids" and "asks".
#Each is a list of open orders and each order is
#represented as a list of price and amount.
def order_book():
return self.__public_request('order_book/GHS/BTC')
############### trade_history ###############
#Returns a list of recent trades, where each trade is a JSON dictionary:
#tid - trade id
#amount - trade amount
#price - price
#date - UNIX timestamp
def trade_history(since = 1):
args = {'since' : since}
return self.__public_request('trade_history/GHS/BTC', args)
############## balance ################
#Returns JSON dictionary:
#available - available balance
#orders - balance in pending orders
#bonus - referral program bonus
def balance():
return self.__private_request('balance')
############## open orders #############
#Returns JSON list of open orders. Each order is represented as dictionary:
#id - order id
#time - timestamp
#type - buy or sell
#price - price
#amount - amount
#pending - pending amount (if partially executed)
def open_orders():
return self.__private_request('open_orders/GHS/BTC')
############## cancel order ############
#Returns 'true' if order has been found and canceled.
#Params:
#id - order ID
def cancel_order(order_id):
args = {'order_id' : order_id}
return self.__private_request('cancel_order/GHS/BTC', args)
############ place order #############
#Returns JSON dictionary representing order:
#id - order id
#time - timestamp
#type - buy or sell
#price - price
#amount - amount
#pending - pending amount (if partially executed)
#Params:
#type - 'buy' or 'sell'
#amount - amount
#price - price
def place_order(p_type = 'buy', amount = 1, price = 1):
args = {'type' : p_type, 'amount' : amount, 'price' : price}
return self.__private_request('place_order/GHS/BTC', args)
| mit | Python |
|
a475d50d2b7b9febe5fb01bb185b63cbbe25f4d1 | add migration to remove fields | hoover/search,hoover/search,hoover/search | hoover/search/migrations/0006_auto_20200303_1309.py | hoover/search/migrations/0006_auto_20200303_1309.py | # Generated by Django 2.2.7 on 2020-03-03 13:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('search', '0005_rename_user_hypens_to_dots'),
]
operations = [
migrations.RemoveField(
model_name='collection',
name='loader',
),
migrations.RemoveField(
model_name='collection',
name='loader_state',
),
migrations.RemoveField(
model_name='collection',
name='options',
),
]
| mit | Python |
|
32108ccab67a76a05150e8cfb5bbdf2ff3477346 | Create minesweeper.py | MrMansley/Minesweeper | game/minesweeper.py | game/minesweeper.py | from tkinter import *
root = Tk()
root.resizable(0, 0)
root.title("Minesweeper")
frame = Frame(root)
Grid.rowconfigure(root, 0, weight=1)
Grid.columnconfigure(root, 0, weight=1)
frame.grid(row=0, column=0)
class Tiles:
def __init__(self, frame, size):
self.size = size
self.frame = frame
self.tiles[]
for x in range(self.size):
self.tiles.append([])
for y in range(self.size):
this.tiles[x].append(Button())
tiles[x][y] = Button(self.frame, text=' ', width=2, bd = 3, command=lambda row=x, col=y: self.clicked(row, col)
tiles[x][y].grid(row=x, column=y)
for x in range(this.size):
Grid.columnconfigure(frame, x, weight=1)
for y in range(this.size):
Grid.rowconfigure(frame, y, weight=1)
def clicked(self, x, y):
tiles[x][y]["text"] = '@'
tiles[x][y]["relief"] = SUNKEN
root.mainloop()
| mit | Python |
|
a75e87fd3b4fc3f370554227cefc4687593621ca | fix merge fup | fvbock/gDBPool | gdbpool/psyco_ge.py | gdbpool/psyco_ge.py | """A wait callback to allow psycopg2 cooperation with gevent.
Use `make_psycopg_green()` to enable gevent support in Psycopg.
"""
# Copyright (C) 2010 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# and licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import psycopg2
from psycopg2 import extensions
from gevent.socket import wait_read, wait_write
def make_psycopg_green():
"""Configure Psycopg to be used with gevent in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
extensions.set_wait_callback(gevent_wait_callback)
def gevent_wait_callback(conn, timeout=None):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == extensions.POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise psycopg2.OperationalError(
"Bad result from poll: %r" % state)
| mit | Python |
|
5f503f0b9ab51ca2b1985fe88d5e84ff63b7d745 | Add sample playlists for testing features. | wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site | addplaylists.py | addplaylists.py | #!/usr/bin/env python2
from datetime import datetime
from datetime import timedelta
import random
from wuvt.trackman.lib import perdelta
from wuvt import db
from wuvt.trackman.models import DJSet, DJ
today = datetime.now()
print("adding dj")
dj = DJ(u"Johnny 5", u"John")
db.session.add(dj)
db.session.commit()
print("djadded")
for show in perdelta(today - timedelta(days=500), today, timedelta(hours=4)):
if random.randint(0,99) < 40:
djset = DJSet(dj.id)
djset.dtstart = show
djset.dtend = show + timedelta(4)
db.session.add(djset)
db.session.commit()
| agpl-3.0 | Python |
|
1905395783d5a0f5997e6e620ba09d41398840e0 | add test_vincia.py | deepjets/deepjets,deepjets/deepjets,deepjets/deepjets | test_vincia.py | test_vincia.py |
from deepjets.generate import generate_events
for event in generate_events('w_vincia.config', 1, vincia=True):
pass
| bsd-3-clause | Python |
|
4fab31eef9ad80230b36039b66c70d94456e5f9b | Add missing tests file from previous commit. | GrAndSE/lighty | tests/monad.py | tests/monad.py | '''Test case for monads and monoidic functions
'''
import unittest
from lighty import monads
class MonadTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def testNumberComparision(self):
monad = monads.ValueMonad(10)
assert monad == 10, 'Number __eq__ error: %s' % monad
assert monad > 9, 'Number __gt__ error: %s' % monad
assert monad >= 10, 'Number __ge__ error: %s' % monad
assert monad < 11, 'Number __lt__ error: %s' % monad
assert monad <= 10, 'Number __le__ error: %s' % monad
def testNumberActions(self):
monad = monads.ValueMonad(10)
assert monad + 10 == 20, 'Number + error: %s' % (monad + 10)
assert monad - 5 == 5, 'Number - error: %s' % (monad - 5)
assert monad / 2 == 5, 'Number / error: %s' % (monad / 2)
assert monad * 2 == 20, 'Number * error: %s' % (monad * 2)
assert monad ** 2 == 100, 'Number pow error: %s' % (monad ** 2)
assert monad << 1 == 10 << 1, 'Number << error: %s' % (monad << 1)
assert monad >> 1 == 10 >> 1, 'Number >> error: %s' % (monad >> 1)
def testNumberSeq(self):
monad = monads.ValueMonad(10)
assert len(monad) == 1, 'Number len error: %s' % len(monad)
assert monad[0] == 10, 'Number [0] error: %s' % monad[0]
assert isinstance(monad[1], monads.NoneMonad), ('Number [1] error' %
monad[1])
assert not 10 in monad, 'Number in error: %s' % (10 in monad)
def test():
suite = unittest.TestSuite()
suite.addTest(MonadTestCase('testNumberComparision'))
suite.addTest(MonadTestCase('testNumberActions'))
suite.addTest(MonadTestCase('testNumberSeq'))
return suite
| bsd-3-clause | Python |
|
62207985d301dc9a47b0334f02a3f0c942e19d22 | Add packetStreamerClient example | alsmadi/CSCI-6617,similecat/floodlightsec,UdS-TelecommunicationsLab/floodlight,deepurple/floodlight,chinmaymhatre91/floodlight,rizard/geni-cinema,geddings/floodlight,alexreimers/floodlight,wallnerryan/FL_HAND,xph906/SDN,aprakash6/floodlight_video_cacher,onebsv1/floodlightworkbench,gfsantanna/firewall_SDN,duanjp8617/floodlight,rhoybeen/floodlightLB,onebsv1/floodlightworkbench,iluckydonkey/floodlight,ZhangMenghao/Floodlight,iluckydonkey/floodlight,teja-/floodlight,aprakash6/floodlight_video_cacher,Linerd/sdn_optimization,onebsv1/floodlight,rhoybeen/floodlightLB,thisthat/floodlight-controller,dhruvkakadiya/FloodlightLoadBalancer,woniu17/floodlight,Linerd/sdn_optimization,marymiller/floodlight,chris19891128/FloodlightSec,teja-/floodlight,rcchan/cs168-sdn-floodlight,marcbaetica/Floodlight-OVS-OF-Network-Solution-,duanjp8617/floodlight,UdS-TelecommunicationsLab/floodlight,rizard/fast-failover-demo,alberthitanaya/floodlight-dnscollector,baykovr/floodlight,aprakash6/floodlight_video_cacher,avbleverik/floodlight,CS-6617-Java/Floodlight,CS-6617-Java/Floodlight,iluckydonkey/floodlight,daniel666/multicastSDN,m1k3lin0/SDNProject,rizard/geni-cinema,StefanoSalsano/my-floodlight,pixuan/floodlight,similecat/floodlightsec,xph906/SDN-ec2,moisesber/floodlight,gfsantanna/firewall_SDN,netgroup/floodlight,niuqg/floodlight-test,xph906/SDN-NW,StefanoSalsano/my-floodlight,scofieldsoros/floodlight-0.9,alexreimers/floodlight,gfsantanna/firewall_SDN,egenevie/newnet,StefanoSalsano/my-floodlight,xph906/SDN,ZhangMenghao/Floodlight,jmiserez/floodlight,xph906/SDN,onebsv1/floodlightworkbench,alberthitanaya/floodlight-dnscollector,chechoRP/floodlight,omkale/myfloodlight,yeasy/floodlight-lc,wallnerryan/floodlight,TidyHuang/floodlight,akoshibe/fl-hhcp,srcvirus/floodlight,rizard/geni-cinema,nhelferty/sdn-project,wallnerryan/FL_HAND,daniel666/multicastSDN,chinmaymhatre91/floodlight,iluckydonkey/floodlight,CS-6617-Java/Floodlight,scofieldsoros/floodlight-0.9,cbarrin/EAGERFloodlight,Pengfei-Lu/floodlight,wallnerryan/FL_HAND,Wi5/odin-wi5-controller,chechoRP/floodlight,UdS-TelecommunicationsLab/floodlight,similecat/floodlightsec,thisthat/floodlight-controller,avbleverik/floodlight,nhelferty/sdn-project,pablotiburcio/AutoManIoT,pixuan/floodlight,riajkhu/floodlight,jmiserez/floodlight,egenevie/newnet,rsharo/floodlight,CS-6617-Java/Floodlight,floodlight/floodlight,kwanggithub/umfloodlight,TidyHuang/floodlight,UdS-TelecommunicationsLab/floodlight,chechoRP/floodlight,alsmadi/CSCI-6617,wallnerryan/floodlight,woniu17/floodlight,fazevedo86/floodlight,geddings/floodlight,baykovr/floodlight,SujithPandel/floodlight,gfsantanna/firewall_SDN,TKTL-SDN/SoftOffload-Master,AndreMantas/floodlight,akoshibe/fl-hhcp,Wi5/odin-wi5-controller,phisolani/floodlight,drinkwithwater/floodlightplus,chinmaymhatre91/floodlight,dhruvkakadiya/FloodlightLoadBalancer,rizard/SOSForFloodlight,SujithPandel/floodlight,daniel666/multicastSDN,wallnerryan/floodlight,schuza/odin-master,akoshibe/fl-hhcp,iluckydonkey/floodlight,m1k3lin0/SDNProject,09zwcbupt/floodlight,floodlight/floodlight,niuqg/floodlight-test,chris19891128/FloodlightSec,fazevedo86/floodlight,thisthat/floodlight-controller,daniel666/multicastSDN,kwanggithub/umfloodlight,akoshibe/fl-hhcp,marymiller/floodlight,Pengfei-Lu/floodlight,rizard/fast-failover-demo,riajkhu/floodlight,alexreimers/floodlight,drinkwithwater/floodlightplus,andi-bigswitch/floodlight-oss,kvm2116/floodlight,JinWenQiang/FloodlightController,xph906/SDN-ec2,omkale/myfloodlight,avbleverik/floodlight,chinmaymhatre91/floodlight,TKTL-SDN/SoftOffload-Master,rizard/fast-failover-demo,phisolani/floodlight,andi-bigswitch/floodlight-oss,TKTL-SDN/SoftOffload-Master,rizard/floodlight,chinmaymhatre91/floodlight,m1k3lin0/SDNProject,lalithsuresh/odin-master,baykovr/floodlight,omkale/myfloodlight,SujithPandel/floodlight,SujithPandel/floodlight,alexreimers/floodlight,schuza/odin-master,xuraylei/floodlight,UdS-TelecommunicationsLab/floodlight,jmiserez/floodlight,moisesber/floodlight,alexreimers/floodlight,TidyHuang/floodlight,woniu17/floodlight,teja-/floodlight,xph906/SDN-ec2,rizard/SOSForFloodlight,09zwcbupt/floodlight,duanjp8617/floodlight,marcbaetica/Floodlight-OVS-OF-Network-Solution-,pablotiburcio/AutoManIoT,thisthat/floodlight-controller,rizard/SOSForFloodlight,ZhangMenghao/Floodlight,deepurple/floodlight,xuraylei/floodlight,pixuan/floodlight,marcbaetica/Floodlight-OVS-OF-Network-Solution-,rizard/floodlight,alsmadi/CSCI-6617,aprakash6/floodlight_video_cacher,rizard/fast-failover-demo,TKTL-SDN/SoftOffload-Master,netgroup/floodlight,dhruvkakadiya/FloodlightLoadBalancer,phisolani/floodlight,schuza/odin-master,smartnetworks/floodlight,TidyHuang/floodlight,wallnerryan/floodlight,smartnetworks/floodlight,alsmadi/CSCI-6617,egenevie/newnet,kvm2116/floodlight,xph906/SDN-ec2,netgroup/floodlight,phisolani/floodlight,cbarrin/EAGERFloodlight,Pengfei-Lu/floodlight,smartnetworks/floodlight,scofieldsoros/floodlight-0.9,rizard/fast-failover-demo,egenevie/newnet,CS-6617-Java/Floodlight,rizard/geni-cinema,netgroup/floodlight,riajkhu/floodlight,andiwundsam/floodlight-sync-proto,moisesber/floodlight,niuqg/floodlight-test,dhruvkakadiya/FloodlightLoadBalancer,marymiller/floodlight,swiatecki/DTUSDN,similecat/floodlightsec,marcbaetica/Floodlight-OVS-OF-Network-Solution-,andiwundsam/floodlight-sync-proto,pablotiburcio/AutoManIoT,onebsv1/floodlight,rcchan/cs168-sdn-floodlight,swiatecki/DTUSDN,rizard/floodlight,lalithsuresh/odin-master,fazevedo86/floodlight,xph906/SDN,chechoRP/floodlight,ZhangMenghao/Floodlight,similecat/floodlightsec,yeasy/floodlight-lc,srcvirus/floodlight,kwanggithub/umfloodlight,TKTL-SDN/SoftOffload-Master,avbleverik/floodlight,niuqg/floodlight-test,deepurple/floodlight,floodlight/floodlight,m1k3lin0/SDNProject,AndreMantas/floodlight,chris19891128/FloodlightSec,Linerd/sdn_optimization,riajkhu/floodlight,swiatecki/DTUSDN,duanjp8617/floodlight,xph906/SDN,xph906/SDN-NW,jmiserez/floodlight,woniu17/floodlight,Wi5/odin-wi5-controller,srcvirus/floodlight,m1k3lin0/SDNProject,wallnerryan/FL_HAND,09zwcbupt/floodlight,teja-/floodlight,CS-6617-Java/Floodlight,rcchan/cs168-sdn-floodlight,fazevedo86/floodlight,woniu17/floodlight,alberthitanaya/floodlight-dnscollector,Wi5/odin-wi5-controller,Linerd/sdn_optimization,onebsv1/floodlight,rsharo/floodlight,deepurple/floodlight,srcvirus/floodlight,duanjp8617/floodlight,dhruvkakadiya/FloodlightLoadBalancer,nhelferty/sdn-project,hgupta2/floodlight2,avbleverik/floodlight,lalithsuresh/odin-master,AndreMantas/floodlight,ZhangMenghao/Floodlight,drinkwithwater/floodlightplus,aprakash6/floodlight_video_cacher,Pengfei-Lu/floodlight,xph906/SDN-NW,rhoybeen/floodlightLB,alsmadi/CSCI-6617,StefanoSalsano/my-floodlight,scofieldsoros/floodlight-0.9,phisolani/floodlight,JinWenQiang/FloodlightController,TidyHuang/floodlight,chris19891128/FloodlightSec,rhoybeen/floodlightLB,andi-bigswitch/floodlight-oss,chechoRP/floodlight,moisesber/floodlight,yeasy/floodlight-lc,netgroup/floodlight,JinWenQiang/FloodlightController,CS-6617-Java/Floodlight,JinWenQiang/FloodlightController,Pengfei-Lu/floodlight,JinWenQiang/FloodlightController,xuraylei/floodlight,nhelferty/sdn-project,xph906/SDN-NW,09zwcbupt/floodlight,swiatecki/DTUSDN,smartnetworks/floodlight,rcchan/cs168-sdn-floodlight,drinkwithwater/floodlightplus,hgupta2/floodlight2,rizard/geni-cinema,rsharo/floodlight,lalithsuresh/odin-master,schuza/odin-master,thisthat/floodlight-controller,kvm2116/floodlight,xph906/SDN-NW,andi-bigswitch/floodlight-oss,pixuan/floodlight,deepurple/floodlight,swiatecki/DTUSDN,Wi5/odin-wi5-controller,xph906/SDN-ec2,baykovr/floodlight,geddings/floodlight,pixuan/floodlight,cbarrin/EAGERFloodlight,yeasy/floodlight-lc,moisesber/floodlight,marymiller/floodlight,andiwundsam/floodlight-sync-proto,rcchan/cs168-sdn-floodlight,andiwundsam/floodlight-sync-proto,rhoybeen/floodlightLB,jmiserez/floodlight,fazevedo86/floodlight,hgupta2/floodlight2,hgupta2/floodlight2,gfsantanna/firewall_SDN,alberthitanaya/floodlight-dnscollector,schuza/odin-master,alberthitanaya/floodlight-dnscollector,smartnetworks/floodlight,kwanggithub/umfloodlight,baykovr/floodlight | example/packetStreamerClientExample.py | example/packetStreamerClientExample.py | #!/usr/bin/python
import urllib2
import json
import re
import sys
from optparse import OptionParser
sys.path.append('~/floodlight/target/gen-py')
sys.path.append('~/floodlight/thrift/lib/py')
from packetstreamer import PacketStreamer
from packetstreamer.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
SESSIONID = 'sessionId'
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage, version="%prog 1.0")
parser.add_option("-c", "--controller", dest="controller", metavar="CONTROLLER_IP",
default="127.0.0.1", help="controller's IP address")
parser.add_option("-m", "--mac", dest="mac", metavar="HOST_MAC",
help="The host mac address to trace the OF packets")
(options, args) = parser.parse_args()
def validateIp(ip):
ipReg = ("(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)")
m = re.compile(ipReg).match(ip)
if m:
return True
else :
return False
def validateMac(mac):
macReg = '([a-fA-F0-9]{2}:){5}[a-fA-F0-9]{2}' # same regex as above
m = re.compile(macReg).match(mac)
if m:
return True
else :
return False
if not validateIp(options.controller):
parser.error("Invalid format for ip address.")
if not options.mac:
parser.error("-m or --mac option is required.")
if not validateMac(options.mac):
parser.error("Invalid format for mac address. Format: xx:xx:xx:xx:xx:xx")
controller = options.controller
host = options.mac
url = 'http://%s:8080/wm/core/packettrace/json' % controller
filter = {'mac':host, 'direction':'both', 'period':1000}
post_data = json.dumps(filter)
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
response_text = None
def terminateTrace(sid):
global controller
filter = {SESSIONID:sid, 'period':-1}
post_data = json.dumps(filter)
url = 'http://%s:8080/wm/core/packettrace/json' % controller
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
try:
response = urllib2.urlopen(request)
response_text = response.read()
except Exception, e:
# Floodlight may not be running, but we don't want that to be a fatal
# error, so we just ignore the exception in that case.
print "Exception:", e
try:
response = urllib2.urlopen(request)
response_text = response.read()
except Exception, e:
# Floodlight may not be running, but we don't want that to be a fatal
# error, so we just ignore the exception in that case.
print "Exception:", e
exit
if not response_text:
print "Failed to start a packet trace session"
sys.exit()
response_text = json.loads(response_text)
sessionId = None
if SESSIONID in response_text:
sessionId = response_text[SESSIONID]
else:
print "Failed to start a packet trace session"
sys.exit()
try:
# Make socket
transport = TSocket.TSocket('localhost', 9090)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = PacketStreamer.Client(protocol)
# Connect!
transport.open()
while 1:
packets = client.getPackets(sessionId)
for packet in packets:
print "Packet: %s"% packet
if "FilterTimeout" in packet:
sys.exit()
except Thrift.TException, e:
print '%s' % (e.message)
terminateTrace(sessionId)
except KeyboardInterrupt, e:
terminateTrace(sessionId)
# Close!
transport.close()
| apache-2.0 | Python |
|
50141a66831d080ecc0791f94d1bd3bfec0aeb65 | Add migration for #465 | Minkov/site,Minkov/site,Phoenix1369/site,DMOJ/site,Minkov/site,Minkov/site,DMOJ/site,DMOJ/site,monouno/site,DMOJ/site,Phoenix1369/site,Phoenix1369/site,monouno/site,monouno/site,monouno/site,Phoenix1369/site,monouno/site | judge/migrations/0046_blogpost_authors.py | judge/migrations/0046_blogpost_authors.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-08 16:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0045_organization_access_code'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='authors',
field=models.ManyToManyField(blank=True, help_text='', to='judge.Profile', verbose_name='authors'),
),
]
| agpl-3.0 | Python |
|
364f8fedb492c1eedd317729b05d3bd37c0ea4ad | add andromercury tool | biddyweb/androguard,danielmohr/androguard,jxzhxch/androguard,natatapron/androguard,Ramble01/androguard,joeyt83/androguard,jxzhxch/androguard,liuyiVector/androguard,blokweb/androguard,bvanapriya/androguard,Iftekhar-ifti/androguard,pombreda/androguard,ohio813/androguard,IOsipov/androguard,woogi/androguard,natatapron/androguard,ccgreen13/androguard,natatapron/androguard,natatapron/androguard,blokweb/androguard,noushadali/androguard,tonyzzz/androguard,Alwnikrotikz/androguard,danielmohr/androguard,sychoi/androguard,Ramble01/androguard,xyzy/androguard,renndieG/androguard,Iftekhar-ifti/androguard,pombreda/androguard,cxmsmile/androguard,Alwnikrotikz/androguard,pombreda/androguard,erikrenz/androguard-1.9-old,IOsipov/androguard,jrgifford/androguard,renndieG/androguard,jrgifford/androguard,mei3am/androguard,sychoi/androguard,Ramble01/androguard,liuyiVector/androguard,edisona/androguard,bvanapriya/androguard,xyzy/androguard,mei3am/androguard,postfix/androguard,woogi/androguard,sychoi/androguard,siberider/androguard,biddyweb/androguard,blokweb/androguard,HwaAnnaLee/androguard,ccgreen13/androguard,IOsipov/androguard,edisona/androguard,IOsipov/androguard,tonyzzz/androguard,DaneZZenaD/androguard,erikrenz/androguard-1.9-old,edisona/androguard,noushadali/androguard,HwaAnnaLee/androguard,edisona/androguard,HwaAnnaLee/androguard,Iftekhar-ifti/androguard,blokweb/androguard,mei3am/androguard,ohio813/androguard,siberider/androguard,sychoi/androguard,blokweb/androguard,jakesyl/androguard,noushadali/androguard,Alwnikrotikz/androguard,joeyt83/androguard,pombreda/androguard,noushadali/androguard,jrgifford/androguard,danielmohr/androguard,biddyweb/androguard,jxzhxch/androguard,dandycheung/androguard,jxzhxch/androguard,pombreda/androguard,cxmsmile/androguard,noushadali/androguard,jakesyl/androguard,DaneZZenaD/androguard,IOsipov/androguard,ohio813/androguard,joeyt83/androguard,tonyzzz/androguard,biddyweb/androguard,siberider/androguard,erikrenz/androguard-1.9-old,edisona/androguard,dandycheung/androguard,liuyiVector/androguard,xyzy/androguard,postfix/androguard,dandycheung/androguard,biddyweb/androguard,ccgreen13/androguard,jxzhxch/androguard,liuyiVector/androguard,siberider/androguard,bvanapriya/androguard,natatapron/androguard,Ramble01/androguard,Ramble01/androguard,DaneZZenaD/androguard,jakesyl/androguard,tonyzzz/androguard,woogi/androguard,postfix/androguard,bvanapriya/androguard,sychoi/androguard,dandycheung/androguard,tonyzzz/androguard,siberider/androguard,dandycheung/androguard,bvanapriya/androguard,renndieG/androguard,cxmsmile/androguard,liuyiVector/androguard | andromercury.py | andromercury.py | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, re, os
from optparse import OptionParser
from androguard.core.bytecodes import apk
sys.path.append("./elsim/")
from elsim.elsign import dalvik_elsign
sys.path.append("./mercury/client")
from merc.lib.common import Session
option_0 = { 'name' : ('-l', '--list'), 'help' : 'list all packages', 'nargs' : 1 }
option_1 = { 'name' : ('-i', '--input'), 'help' : 'get specific packages (a filter)', 'nargs' : 1 }
option_2 = { 'name' : ('-r', '--remotehost'), 'help' : 'specify ip of emulator/device', 'nargs' : 1 }
option_3 = { 'name' : ('-p', '--port'), 'help' : 'specify the port', 'nargs' : 1 }
option_4 = { 'name' : ('-o', '--output'), 'help' : 'output directory to write packages', 'nargs' : 1 }
option_5 = { 'name' : ('-b', '--database'), 'help' : 'database : use this database', 'nargs' : 1 }
option_6 = { 'name' : ('-c', '--config'), 'help' : 'use this configuration', 'nargs' : 1 }
option_7 = { 'name' : ('-v', '--verbose'), 'help' : 'display debug information', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5, option_6, option_7]
def display(ret, debug) :
print "---->", ret[0],
def main(options, arguments) :
sessionip = "127.0.0.1"
sessionport = 31415
if options.remotehost :
sessionip = options.remotehost
if options.port :
sessionport = int(options.port)
newsession = Session(sessionip, sessionport, "bind")
# Check if connection can be established
if newsession.executeCommand("core", "ping", None).data == "pong":
if options.list :
request = {'filter': options.list, 'permissions': None }
apks_info = newsession.executeCommand("packages", "info", {}).getPaddedErrorOrData()
print apks_info
elif options.input and options.output :
s = None
if options.database != None or options.config != None :
s = dalvik_elsign.MSignature( options.database, options.config, options.verbose != None, ps = dalvik_elsign.PublicSignature)
request = {'filter': options.input, 'permissions': None }
apks_info = newsession.executeCommand("packages", "info", request).getPaddedErrorOrData()
print apks_info
for i in apks_info.split("\n") :
if re.match("APK path:", i) != None :
name_app = i.split(":")[1][1:]
print name_app,
response = newsession.downloadFile(name_app, options.output)
print response.data, response.error,
if s != None :
a = apk.APK( options.output + "/" + os.path.basename(name_app) )
if a.is_valid_APK() :
display( s.check_apk( a ), options.verbose )
print
else:
print "\n**Network Error** Could not connect to " + sessionip + ":" + str(sessionport) + "\n"
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
| apache-2.0 | Python |
|
e5931a5837b1574681757e2c6fc7260122b48746 | Add minify util | stefanogualdi/grails-ckeditor,stefanogualdi/grails-ckeditor,julyantonicheva/grails-ckeditor,julyantonicheva/grails-ckeditor,stefanogualdi/grails-ckeditor,stefanogualdi/grails-ckeditor,stefanogualdi/grails-ckeditor,julyantonicheva/grails-ckeditor,julyantonicheva/grails-ckeditor | web-app/js/ofm/scripts/utils/minify.py | web-app/js/ofm/scripts/utils/minify.py | #!/usr/bin/python2.6
# Minify Filemanager javascript files
# Usage : $ python ./utils/minify.py
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
import httplib, urllib, sys, os
fmRootFolder = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + "/"
os.chdir(fmRootFolder) # set working directory
toMinify = ["filemanager.js"]
print bcolors.HEADER + "-------------------------------------" + bcolors.ENDC
# we loop on JS languages files
for index, item in enumerate(toMinify):
# print index, item
dir = os.path.dirname(item)
file = os.path.basename(item)
with open (fmRootFolder + item, "r") as myfile:
js_input=myfile.read()
# Define the parameters for the POST request and encode them in
# a URL-safe format.
params = urllib.urlencode([
('js_code', js_input),
# ('compilation_level', 'WHITESPACE_ONLY'),
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'compiled_code'),
])
params2 = urllib.urlencode([
('js_code', js_input),
# ('compilation_level', 'WHITESPACE_ONLY'),
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'errors'),
])
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()
# we write the minified file - os.path.splitext(file)[0] return filename without extension
with open(fmRootFolder + dir + '/' + os.path.splitext(file)[0] + ".min.js", "w") as text_file:
text_file.write(data)
# We retrieve errors
conn.request('POST', '/compile', params2, headers)
response = conn.getresponse()
errors = response.read()
if errors == "":
print bcolors.OKBLUE + file + " has been minified. No error found."
else:
print bcolors.FAIL + file + " : the code contains errors : "
print ""
print errors + bcolors.ENDC
conn.close()
print bcolors.HEADER + "-------------------------------------" + bcolors.ENDC
| apache-2.0 | Python |
|
348ffbf16fcb67768d72bd18167e6c70c99a27a1 | Add Homodyne node | nckz/bart,nckz/bart,nckz/bart,nckz/bart,nckz/bart | gpi/Homodyne_GPI.py | gpi/Homodyne_GPI.py | # Author: Ashley Anderson III <aganders3@gmail.com>
# Date: 2015-10-10 21:13
# Copyright (c) 2015 Dignity Health
from __future__ import absolute_import, division, print_function, unicode_literals
import os
# gpi, future
import gpi
from bart.gpi.borg import IFilePath, OFilePath, Command
# bart
import bart
base_path = bart.__path__[0] # library base for executables
import bart.python.cfl as cfl
class ExternalNode(gpi.NodeAPI):
'''Usage: homodyne dim fraction <input> <output>
Perform homodyne reconstruction along dimension dim.
'''
def initUI(self):
# Widgets
self.addWidget('SpinBox', 'dim', min=0)
self.addWidget('DoubleSpinBox', 'fraction', min=0.5, max=1.0,
decimals=3, singlestep=0.01)
# IO Ports
self.addInPort('kspace', 'NPYarray')
self.addOutPort('out', 'NPYarray')
return 0
def compute(self):
kspace = self.getData('kspace')
# load up arguments list
args = [base_path+'/homodyne']
args += [str(self.getVal('dim'))]
args += [str(self.getVal('fraction'))]
# setup file for passing data to external command
in1 = IFilePath(cfl.writecfl, kspace, asuffix=['.cfl','.hdr'])
args += [in1]
out1 = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr'])
args += [out1]
# run commandline
print(Command(*args))
self.setData('out', out1.data())
in1.close()
out1.close()
return 0
| bsd-3-clause | Python |
|
1a7fa8080d19909ccf8e8e89aa19c92c1413f1c1 | Add script to submite jobs again | lnls-fac/job_manager | apps/pyjob_submite_jobs_again.py | apps/pyjob_submite_jobs_again.py | #!/usr/bin/env python3
import os
import sys
import subprocess
right_inputs = False
if len(sys.argv) > 2 :
tp = sys.argv[1]
rms = [int(x) for x in sys.argv[2:]]
if tp in ['ma', 'ex', 'xy']: right_inputs = True
curdir = os.getcwd()
if right_inputs:
if curdir.endswith('trackcpp'):
flatfile = 'flatfile.txt'
input_file = 'input_' + tp.lower() + '.py'
exec_file = 'runjob_' + tp.lower() + '.sh'
dirs = curdir.split(os.sep)
label = '-'.join(dirs[-5:]) + '-submitting_again.'
for m in rms:
mlabel = 'rms%02i'%m
os.chdir(os.path.join(curdir, mlabel))
files = os.listdir(os.getcwd())
kicktable_files = ','.join([f for f in files if f.endswith('_kicktable.txt')])
if len(kicktable_files) != 0:
inputs = ','.join([kicktable_files, flatfile,input_file])
else:
inputs = ','.join([flatfile,input_file])
description = ': '.join([mlabel, tp.upper(), label])
p = subprocess.Popen(['pyjob_qsub.py', '--inputFiles', inputs, '--exec', exec_file, '--description', description])
p.wait()
os.chdir(curdir)
else:
print('Change the current working directory to trackcpp directory.')
else:
print('Invalid inputs')
| mit | Python |
|
1bf7439c67e2206acb0c6d285014261eeb18097f | Add coverage as single execution | mi-schi/php-code-checker | coverage.py | coverage.py | from app import initialization
from app.configuration import add
from app.check import *
initialization.run()
add('phpunit-coverage', 'true')
phpunit.execute()
| mit | Python |
|
67f5e4a4ec4606d00fb94139b9c39c7abe0be33b | Add browse queue statistics sample | fbraem/mqweb,fbraem/mqweb,fbraem/mqweb | samples/python/queue_statistics.py | samples/python/queue_statistics.py | '''
This sample will read all queue statistic messages from SYSTEM.ADMIN.STATISTICS.QUEUE.
MQWeb runs on localhost and is listening on port 8081.
'''
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Browse statistic messages from SYSTEM.ADMIN.STATISTICS.QUEUE',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
args = parser.parse_args()
size = 1024 * 32
url = "/api/message/browse/" + args.queuemanager + '/SYSTEM.ADMIN.STATISTICS.QUEUE?size=' + str(size)
try:
conn = httplib.HTTPConnection('localhost', 8081)
conn.request('GET', url)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code'])
)
else:
count = 0
for message in result['data']:
count += 1
if 'admin' in message:
parameters = message['admin']['parameters']
print(str(parameters['IntervalStartDate']['value']) + ' ' +
str(parameters['IntervalStartTime']['value']) + ' ' +
str(parameters['IntervalEndDate']['value']) + ' ' +
str(parameters['IntervalEndTime']['value']))
if 'QStatisticsData' in parameters:
queues = {}
for statistics in parameters['QStatisticsData']['value']:
queue = {
'depth' : {
'min' : statistics['QMinDepth']['value'],
'max' : statistics['QMaxDepth']['value']
},
'get' : {
'count' : statistics['GetCount']['value'][0] + statistics['GetCount']['value'][1],
'bytes' : statistics['GetBytes']['value'][0] + statistics['GetBytes']['value'][1],
'fail' : statistics['GetFailCount']['value']
},
'put' : {
'count' : statistics['PutCount']['value'][0] + statistics['PutCount']['value'][1] + statistics['Put1Count']['value'][0] + statistics['Put1Count']['value'][1],
'bytes' : statistics['PutBytes']['value'][0] + statistics['PutBytes']['value'][1],
'fail' : statistics['PutFailCount']['value'] + statistics['Put1FailCount']['value']
},
'browse' : {
'count' : statistics['BrowseCount']['value'][0] + statistics['BrowseCount']['value'][1],
'bytes' : statistics['BrowseBytes']['value'][0] + statistics['BrowseBytes']['value'][1],
'fail' : statistics['BrowseFailCount']['value']
}
}
queues[statistics['QName']['value']] = queue
print(json.dumps(queues))
else:
print(json.dumps(parameters))
print('Number of messages: ' + str(count))
except httplib.HTTPException as e:
print ('An HTTP error occurred while inquiring queuemanager: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
| mit | Python |
|
884ae74bb75e5a0c60da74791a2e6fad9e4b83e5 | Add py solution for 436. Find Right Interval | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/find-right-interval.py | py/find-right-interval.py | from operator import itemgetter
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
sorted_itv = map(itemgetter(1, 2), sorted((x.start, i, x) for i, x in enumerate(intervals)))
size = len(intervals)
ans = []
for itv in intervals:
L, U = -1, size
while L + 1 < U:
mid = (L + U) / 2
if sorted_itv[mid][1].start >= itv.end:
U = mid
else:
L = mid
if U == size:
ans.append(-1)
else:
ans.append(sorted_itv[U][0])
return ans
| apache-2.0 | Python |
|
07f8fd56ab366a2d1365278c3310ade4b1d30c57 | Add functional test for version negotiation | openstack/heat,noironetworks/heat,noironetworks/heat,openstack/heat | heat_integrationtests/functional/test_versionnegotiation.py | heat_integrationtests/functional/test_versionnegotiation.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
| apache-2.0 | Python |
|
4b8f7a4c97668b4dbd8634d6b01e30b71737c3bd | fix send_HTML_email to work when no email_from argument is supplied | dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq | dimagi/utils/django/email.py | dimagi/utils/django/email.py | from django.conf import settings
from django.core.mail import get_connection
from django.core.mail.message import EmailMultiAlternatives
NO_HTML_EMAIL_MESSAGE = """
Your email client is trying to display the plaintext version of an email that
is only supported in HTML. Please set your email client to display this message
in HTML, or use an email client that supports HTML emails.
"""
def send_HTML_email(subject, recipient, html_content, text_content=None,
cc=None, email_from=settings.DEFAULT_FROM_EMAIL,
file_attachments=None):
if not text_content:
text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
NO_HTML_EMAIL_MESSAGE)
from_header = {'From': email_from} # From-header
connection = get_connection()
msg = EmailMultiAlternatives(subject, text_content, email_from,
[recipient], headers=from_header,
connection=connection, cc=cc)
for file in (file_attachments or []):
if file:
msg.attach(file["title"], file["file_obj"].getvalue(),
file["mimetype"])
msg.attach_alternative(html_content, "text/html")
msg.send()
| from django.conf import settings
from django.core.mail import get_connection
from django.core.mail.message import EmailMultiAlternatives
NO_HTML_EMAIL_MESSAGE = """
Your email client is trying to display the plaintext version of an email that
is only supported in HTML. Please set your email client to display this message
in HTML, or use an email client that supports HTML emails.
"""
def send_HTML_email(subject, recipient, html_content, text_content=None, cc=None, email_from=None, file_attachments=None):
if not text_content:
text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
NO_HTML_EMAIL_MESSAGE)
# If you get the return_path header wrong, this may impede mail delivery. It appears that the SMTP server
# has to recognize the return_path as being valid for the sending host. If we set it to, say, our SMTP
# server, this will always be the case (as the server is explicitly serving the host).
if email_from is None:
#todo: verify that this is even necessary here since it seems like email_return_path == email_from
email_return_path = getattr(settings, 'EMAIL_RETURN_PATH', None)
if email_return_path is None:
email_return_path = settings.EMAIL_LOGIN
email_from = getattr(settings, 'EMAIL_FROM', None)
if email_from is None:
email_from = email_return_path
else:
email_return_path = email_from
from_header = {'From': email_from} # From-header
connection = get_connection()
msg = EmailMultiAlternatives(subject, text_content, email_return_path, [recipient], headers=from_header, connection=connection, cc=cc)
for file in (file_attachments or []):
if file:
msg.attach(file["title"], file["file_obj"].getvalue(), file["mimetype"])
msg.attach_alternative(html_content, "text/html")
msg.send() | bsd-3-clause | Python |
f7c4f8d43b30dfee36d4ff46e9133194a15b3e81 | Add tests for __unicode__ functions in model. (#1026) | taranjeet/EvalAI,taranjeet/EvalAI,taranjeet/EvalAI,taranjeet/EvalAI | tests/unit/accounts/test_models.py | tests/unit/accounts/test_models.py | from django.contrib.auth.models import User
from django.test import TestCase
from accounts.models import Profile, UserStatus
class BaseTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(
username='user',
email='user@test.com',
password='password')
class UserStatusTestCase(BaseTestCase):
def setUp(self):
super(UserStatusTestCase, self).setUp()
self.user_status = UserStatus.objects.create(
name='user',
status=UserStatus.UNKNOWN,
)
def test__str__(self):
self.assertEqual(self.user_status.name, self.user_status.__str__())
class ProfileTestCase(BaseTestCase):
def setUp(self):
super(ProfileTestCase, self).setUp()
self.profile = Profile.objects.get(user=self.user)
def test__str__(self):
self.assertEqual('{}'.format(self.profile.user), self.profile.__str__())
| bsd-3-clause | Python |
|
3e5b98c1a79f625fbf9f54af782e459de7fa5b1f | update migration with new filename and parent migration name | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0052_cleanup_twitter_urls.py | accelerator/migrations/0052_cleanup_twitter_urls.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from accelerator.twitter_handle_cleanup import (
clean_entrepreneur_profile_twitter_handles,
clean_expert_profile_twitter_handles,
clean_organization_twitter_handles
)
def clean_up_twitter_handles(apps, schema_editor):
Organization = apps.get_model('accelerator', 'Organization')
ExpertProfile = apps.get_model('accelerator', 'ExpertProfile')
EntrepreneurProfile = apps.get_model(
'accelerator',
'EntrepreneurProfile')
clean_entrepreneur_profile_twitter_handles(EntrepreneurProfile)
clean_expert_profile_twitter_handles(ExpertProfile)
clean_organization_twitter_handles(Organization)
class Migration(migrations.Migration):
dependencies = [
(
'accelerator',
'0051_add_register_for_events_to_event_subnav_items'
),
]
operations = [
migrations.RunPython(
clean_up_twitter_handles,
migrations.RunPython.noop),
]
| mit | Python |
|
c97f648a012c38802d9637d4c573a4ca9c8e1633 | Create encoder.py | sajithshetty/SLAE32,sajithshetty/SLAE32 | additional/customencoder/encoder.py | additional/customencoder/encoder.py | #!/usr/bin/python
#below is the shellcode for /bin/sh using execve sys call
shellcode = ("\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x89\xe2\x53\x89\xe1\xb0\x0b\xcd\x80")
t=[]
w=[]
z=[]
ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | \
(val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
for i in range(0, len(shellcode)):
s = ord(shellcode[i])
y = ror(s,2,8)
b = y+1
w.append(s)
t.append(y)
z.append(b)
print "length %d" %len(t)
print "[+] Original shellcode..:", (", ".join(hex(c) for c in w[0::]))
print "[+] ROR shellcode..:", (", ".join(hex(c) for c in t[0::]))
print "[+] ROR shellcode after adding 1 to each byte ..:", (", ".join(hex(c) for c in z[0::]))
| cc0-1.0 | Python |
|
30bb33609d9e22b6999d086196ed622a456d7dc2 | Create incomplete_minimal_logger.py | AdityaSoni19031997/Machine-Learning,AdityaSoni19031997/Machine-Learning | lld_practice/incomplete_minimal_logger.py | lld_practice/incomplete_minimal_logger.py | # just trying to minimize what all it offers
# reference python standard logging module snippets
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelToName = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
}
_nameToLevel = {
'CRITICAL': CRITICAL,
'FATAL': FATAL,
'ERROR': ERROR,
'WARN': WARNING,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'NOTSET': NOTSET,
}
def getLevelName(level):
result = _levelToName.get(level)
if result is not None:
return result
result = _nameToLevel.get(level)
if result is not None:
return result
return "Level %s" % level
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _nameToLevel:
raise ValueError("Unknown level: %r" % level)
rv = _nameToLevel[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
class my_logger:
def __init__(self, logger_name, logger_level):
# write to console and a text file is the req...
# error > warning > info > debug...
self.logger_name = logger_name
self.logger_level = _checkLevel(logger_level)
self._cache = {}
def info(self, msg, *args, **kwargs):
# check if we can allow this and then allow it
if self.isEnabledFor("INFO"):
self.log_me(log_level="INFO", log_msg=msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
if self.isEnabledFor("WARN"):
self.log_me(log_level="WARN", log_msg=msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
if self.isEnabledFor("ERROR"):
self.log_me(log_level="ERROR", log_msg=msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
if self.isEnabledFor("DEBUG"):
self.log_me(log_level="DEBUG", log_msg=msg, *args, **kwargs)
def handler(self, msg_level, msg):
msg_level = msg_level
pass
def isEnabledFor(self, level):
# is the logger enabled for this level?
try:
return self._cache[level]
except KeyError:
try:
is_enabled = self._cache[level] = (level >= self.getEffectiveLevel())
finally:
pass
return is_enabled
def setLevel(self, level):
self.level = _checkLevel(level)
def log_me(self, log_level, log_msg, *args, **kwargs):
# we need to handle handlers here as well..
# create a log_record (prepare the message here)
#
pass
def getEffectiveLevel(self):
# get the effective level for this logger...
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def __repr__(self):
level = getLevelName(self.getEffectiveLevel())
return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level)
| mit | Python |
|
fb004f72c27b49ba9661e6a83b8f49be39757d22 | add changemath shell | lichunown/jekyllblog,lichunown/jekyllblog,lichunown/jekyllblog | math_change.py | math_change.py | import sys
filename = './Deterministic Policy Gradient Algorithms笔记.md'
outname = ''
def change(filename, outname):
f = open(filename, encoding='utf8')
data = f.readlines()
f.close()
out = ''
doublenum = 0
for line in data:
if line=='$$\n':
doublenum += 1
if doublenum % 2 == 0:
out += '$$\n\n'
else:
out += '\n$$\n'
elif '$' in line:
out += line.replace('$','\n$$\n').replace('$$$$','$$')
else:
out += line
with open(outname, 'w', encoding='utf8') as f:
f.write(out)
if __name__=='__main__':
arglen = len(sys.argv) - 1
if arglen == 2:
change(*sys.argv[1:])
if arglen == 1:
filename = sys.argv[1]
change(filename, filename)
| apache-2.0 | Python |
|
79ea8a3a9ac43ba5ab9789e4962b4fb0814dccc0 | clean up localsettings example | SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,gmimano/commcaretest,gmimano/commcaretest,SEL-Columbia/commcare-hq | localsettings.example.py | localsettings.example.py | import os
import sys
####### Database config. This assumes Postgres #######
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq',
'USER': 'postgres',
'PASSWORD': '******'
}
}
####### Couch Config ######
COUCH_SERVER_ROOT = '127.0.0.1:5984'
COUCH_USERNAME = 'admin'
COUCH_PASSWORD = '********'
COUCH_DATABASE_NAME = 'commcarehq'
####### # Email setup ########
# email settings: these ones are the custom hq ones
EMAIL_LOGIN = "notifications@dimagi.com"
EMAIL_PASSWORD="******"
EMAIL_SMTP_HOST="smtp.gmail.com"
EMAIL_SMTP_PORT=587
ADMINS = (('HQ Dev Team', 'commcarehq-dev+www-notifications@dimagi.com'),)
BUG_REPORT_RECIPIENTS = ['commcarehq-support@dimagi.com']
NEW_DOMAIN_RECIPIENTS = ['commcarehq-dev+newdomain@dimagi.com']
####### Log/debug setup ########
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# The django logs will end up here
DJANGO_LOG_FILE = os.path.join('/opt/www.commcarehq.org_project/log',"www.commcarehq.org.django.log")
SEND_BROKEN_LINK_EMAILS = True
CELERY_SEND_TASK_ERROR_EMAILS = True
####### Static files ########
filepath = os.path.abspath(os.path.dirname(__file__))
# media for user uploaded media. in general this won't be used at all.
MEDIA_ROOT = os.path.join(filepath,'mediafiles')
STATIC_ROOT = os.path.join(filepath,'staticfiles')
####### Bitly ########
BITLY_LOGIN = 'dimagi'
BITLY_APIKEY = '*******'
####### Jar signing config ########
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
JAR_SIGN = dict(
jad_tool = os.path.join(_ROOT_DIR, "submodules", "core-hq-src", "corehq", "apps", "app_manager", "JadTool.jar"),
key_store = os.path.join(os.path.dirname(os.path.dirname(_ROOT_DIR)), "DimagiKeyStore"),
key_alias = "javarosakey",
store_pass = "*******",
key_pass = "*******",
)
####### XEP stuff - TODO: remove this section when we retire XEP ########
REFLEXIVE_URL_BASE = "https://localhost:8001"
def get_url_base():
return REFLEXIVE_URL_BASE
GET_URL_BASE = 'settings.get_url_base'
####### SMS Config ########
# Mach
SMS_GATEWAY_URL = "http://gw1.promessaging.com/sms.php"
SMS_GATEWAY_PARAMS = "id=******&pw=******&dnr=%(phone_number)s&msg=%(message)s&snr=DIMAGI"
# Unicel
UNICEL_CONFIG = {"username": "Dimagi",
"password": "******",
"sender": "Promo" }
####### Custom reports ########
CUSTOM_REPORT_MAP = {
"domain_name": [
'path.to.CustomReport',
]
}
####### Domain sync / de-id ########
DOMAIN_SYNCS = {
"domain_name": {
"target": "target_db_name",
"transform": "corehq.apps.domainsync.transforms.deidentify_domain"
}
}
DOMAIN_SYNC_APP_NAME_MAP = { "app_name": "new_app_name" }
####### Misc / HQ-specific Config ########
DEFAULT_PROTOCOL = "https" # or http
OVERRIDE_LOCATION="https://www.commcarehq.org"
GOOGLE_ANALYTICS_ID = '*******'
AXES_LOCK_OUT_AT_FAILURE = False
LUCENE_ENABLED = True
INSECURE_URL_BASE = "http://submit.commcarehq.org"
PREVIEWER_RE = r'^.*@dimagi\.com$'
GMAPS_API_KEY = '******'
FORMTRANSLATE_TIMEOUT = 5
LOCAL_APPS = ('django_cpserver','dimagi.utils', 'gunicorn', 'django_extensions')
| import os
# Postgres config
DATABASE_ENGINE = 'postgresql_psycopg2'
DATABASE_NAME = 'commcarehq'
DATABASE_USER = 'postgres'
DATABASE_PASSWORD = '*****'
DATABASE_HOST = ''
DATABASE_PORT = '5432'
DJANGO_LOG_FILE = "/var/log/datahq/datahq.django.log"
LOG_SIZE = 1000000
LOG_LEVEL = "DEBUG"
LOG_FILE = "/var/log/datahq/datahq.log"
LOG_FORMAT = "[%(name)s]: %(message)s"
LOG_BACKUPS = 256 # number of logs to keep
filepath = os.path.abspath(os.path.dirname(__file__))
STATIC_ROOT = os.path.join(filepath, 'staticmedia') #if you so wish to have your staticroot be defined here - this is necessary for staticfiles to merge all the static media from the management command.
####### Couch Forms ######
COUCH_SERVER_ROOT = 'localhost:5984'
COUCH_USERNAME = ''
COUCH_PASSWORD = ''
COUCH_DATABASE_NAME = 'commcarehq'
BITLY_LOGIN = 'dimagi'
BITLY_APIKEY = '*****'
EMAIL_LOGIN="user@gmail.com"
EMAIL_PASSWORD="******"
EMAIL_SMTP_HOST="smtp.gmail.com"
EMAIL_SMTP_PORT=587
JAR_SIGN = dict(
key_store = "/PATH/TO/KEY_STORE",
key_alias = "KEY",
store_pass = "*****",
key_pass = "*****",
)
# Link to XForm Editor
# in the future we will possible allow multiple
EDITOR_URL = 'http://localhost:8011/xep/initiate/'
XFORMPLAYER_URL = 'http://localhost:8888/play_remote/'
# A back door for phones that can't do SSL to access HQ through http
INSECURE_URL_BASE = "http://submit.mysite.com"
BUG_REPORT_RECIPIENTS = ['me@example.com']
PREVIEWER_RE = r'^.*@dimagi\.com$' | bsd-3-clause | Python |
cc6f30cb1c91b321db6bf77496c8b6fe7c56aabb | Add log parser | qinglan233/vass,qinglan233/vass,qinglan233/vass,qinglan233/vass | log_parser/parse_logs.py | log_parser/parse_logs.py | #!/usr/bin/env python
######################################################
# -*- coding: utf-8 -*-
# File Name: parse_logs.py
# Author: James Hong & Qian Li
# Created Date: 2017-10-28
# Description: Parse CloudWatch logs
######################################################
import argparse
import json
import os
import re
import sys
import boto3
import gzip
import numpy as np
import shutil
from collections import OrderedDict
TEMP_INPUT = './download_log.gz'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', '-b', type=str, required=True,
help='S3 bucket where logs files are stored')
parser.add_argument('--prefix', '-p', type=str, required=True,
help='S3 log files prefix')
parser.add_argument('--outfile', '-o', type=str, required=True,
help='File to save parsed output')
return parser.parse_args()
class StatsObject(object):
def __init__(self):
self.numLambdas = 0
self.data = OrderedDict()
def incrementNumLambdas(self):
self.numLambdas += 1
def record_key_value(self, k, v):
if k not in self.data:
self.data[k] = []
self.data[k].append(v)
def print_stats(self):
print 'Parsed %d lambda logs' % self.numLambdas
for k, v in self.data.iteritems():
print k
print ' mean:', np.mean(v)
print ' stdev:', np.std(v)
print ' median:', np.median(v)
print ' min:', min(v)
print ' max:', max(v)
print ' 10th:', np.percentile(v, 10)
print ' 25th:', np.percentile(v, 25)
print ' 75th:', np.percentile(v, 75)
print ' 90th:', np.percentile(v, 90)
print ' 95th:', np.percentile(v, 95)
print ' 99th:', np.percentile(v, 99)
def dump_parsed_values(self, outfile):
print >> sys.stderr, 'Writing parsed results to', outfile
with open(outfile, 'w') as ofs:
json.dump(self.data, ofs)
REPORT_RE = re.compile(r'Duration: ([\d.]+) ms[\s]+Billed Duration: (\d+) ms[\s]+Memory Size: (\d+) MB[\s]+Max Memory Used: (\d+) MB')
def parse_line(line, stats):
if 'Timelist:' in line:
try:
_, timelist = line.split('Timelist:', 1)
timelistObj = json.loads(json.loads(timelist.strip()))
for k, v in timelistObj.iteritems():
stats.record_key_value(k, v)
except Exception as e:
print >> sys.stderr, e, line
matchObj = REPORT_RE.search(line)
if matchObj is not None:
duration = float(matchObj.group(1))
billedDuration = int(matchObj.group(2))
memorySize = int(matchObj.group(3))
maxMemoryUsed = int(matchObj.group(4))
stats.record_key_value('duration', duration)
stats.record_key_value('billed-duration', billedDuration)
stats.record_key_value('memory-size', memorySize)
stats.record_key_value('max-memory-used', maxMemoryUsed)
stats.incrementNumLambdas()
def ensure_clean_state():
if os.path.exists(TEMP_INPUT):
os.remove(TEMP_INPUT)
def main(args):
ensure_clean_state()
print >> sys.stderr, 'Bucket: ', args.bucket
print >> sys.stderr, 'Prefix: ', args.prefix
stats = StatsObject()
s3 = boto3.resource('s3')
inputBucket = args.bucket
inputPrefix = args.prefix
# We need to fetch file from S3
logsBucket = s3.Bucket(inputBucket)
for obj in logsBucket.objects.filter(Prefix=inputPrefix):
objKey = obj.key
if objKey.endswith('.gz'):
print >> sys.stderr, 'Parsing', objKey
s3.Object(logsBucket.name, objKey).download_file(TEMP_INPUT)
try:
with gzip.open(TEMP_INPUT, 'rb') as logFile:
for line in logFile:
parse_line(line, stats)
except Exception as e:
print >> sys.stderr, e
print('S3 Bucket: {}'.format(args.bucket))
print('File Prefix: {}'.format(args.prefix))
stats.print_stats()
if args.outfile is not None:
stats.dump_parsed_values(args.outfile)
if __name__ == '__main__':
main(get_args())
| apache-2.0 | Python |
|
f380db268eecec80a5ab12fcc553c0278452b18a | add file ProbModelXML.py along with an example of student model | anaviltripathi/pgmpy,ankurankan/pgmpy,palashahuja/pgmpy,pratyakshs/pgmpy,jhonatanoliveira/pgmpy,abinashpanda/pgmpy,sandeepkrjha/pgmpy,khalibartan/pgmpy,yashu-seth/pgmpy,ankurankan/pgmpy,liquidmetal/pgmpy,liquidmetal/pgmpy,pratyakshs/pgmpy,jhonatanoliveira/pgmpy,sandeepkrjha/pgmpy,pgmpy/pgmpy,abinashpanda/pgmpy,vivek425ster/pgmpy,palashahuja/pgmpy,kislayabhi/pgmpy,kris-singh/pgmpy,yashu-seth/pgmpy,vivek425ster/pgmpy,kislayabhi/pgmpy,khalibartan/pgmpy,pgmpy/pgmpy,kris-singh/pgmpy,anaviltripathi/pgmpy | pgmpy/readwrite/ProbModelXML.py | pgmpy/readwrite/ProbModelXML.py | """
ProbModelXML: http://leo.ugr.es/pgm2012/submissions/pgm2012_submission_43.pdf
For the student example the ProbModelXML file should be:
<?xml version=“1.0” encoding=“UTF-8”?>
<ProbModelXML formatVersion=“1.0”>
<ProbNet type=BayesianNetwork >
<AdditionalConstraints />
<Comment>
Student example model from Probabilistic Graphical Models: Principles and Techniques by Daphne Koller
</Comment>
<Language>
English
</Language>
<AdditionalProperties />
<Variable name="intelligence" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="smart"><AdditionalProperties /></State>
<State name="dumb"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="difficulty" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="difficult"><AdditionalProperties /></State>
<State name="easy"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="grade" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="grade_A"><AdditionalProperties /></State>
<State name="grade_B"><AdditionalProperties /></State>
<State name="grade_C"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="recommendation_letter" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="good"><AdditionalProperties /></State>
<State name="bad"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="SAT" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="high"><AdditionalProperties /></State>
<State name="low"><AdditionalProperties /></State>
</States>
</Variable>
<Links>
<Link var1="difficulty" var2="grade" directed=1>
<Comment>Directed Edge from difficulty to grade</Comment>
<Label>diff_to_grad</Label>
<AdditionalProperties />
</Link>
<Link var1="intelligence" var2="grade" directed=1>
<Comment>Directed Edge from intelligence to grade</Comment>
<Label>intel_to_grad</Label>
<AdditionalProperties />
</Link>
<Link var1="intelligence" var2="SAT" directed=1>
<Comment>Directed Edge from intelligence to SAT</Comment>
<Label>intel_to_sat</Label>
<AdditionalProperties />
</Link>
<Link var1="grade" var2="recommendation_letter" directed=1>
<Comment>Directed Edge from grade to recommendation_letter</Comment>
<Label>grad_to_reco</Label>
<AdditionalProperties />
</Link>
</Links>
<Potential type="Table" role="ConditionalProbability" label=string>
<Comment>CPDs in the form of table</Comment>
<AdditionalProperties />
<state>
<name>
</Potential>
</ProbNet>
<Policies />
<InferenceOptions />
<Evidence>
<EvidenceCase>
<Finding variable=string state=string stateIndex=integer numericValue=number/>
</EvidenceCase>
</Evidence>
</ProbModelXML>
""" | mit | Python |
|
209314b65ee960d73ee81baad6b9ced4102d6c0b | Introduce GenericSparseDB() class | wojtekwalczak/FB_datalab | lib/generic_sparse_db.py | lib/generic_sparse_db.py | #!/usr/bin/env python
# -*- encoding: utf-8
import gzip
import scipy.io as sio
from utils.utils import Utils
class GenericSparseDB(Utils):
def init(self):
self.data = sio.mmread(gzip.open(self._matrix_fn)).tolil()
self.factors = self._load_pickle(self._factors_fn)
self.fac_len = len(self.factors)
self.col_names = self.factors + self._load_pickle(self._colnames_fn)
assert self.data.shape[1] == len(self.col_names),\
'Mismatch between the number of columns: %s - %s.'\
% (self.data.shape[1], len(self.col_names))
def reset(self):
self._init()
| mit | Python |
|
47dff2561be481ff067c22ed98d9ea6a9cf8ae10 | Add test to execute notebooks | adicu/AccessibleML,alanhdu/AccessibleML | test/test_notebook.py | test/test_notebook.py | import os
import glob
import contextlib
import subprocess
import pytest
notebooks = list(glob.glob("*.ipynb", recursive=True))
@contextlib.contextmanager
def cleanup(notebook):
name, __ = os.path.splitext(notebook)
yield
fname = name + ".html"
if os.path.isfile(fname):
os.remove(fname)
@pytest.mark.parametrize("notebook", notebooks)
def test_notebook(notebook):
with cleanup(notebook):
# hack to execute the notebook from commandline
assert 0 == subprocess.call(["jupyter", "nbconvert", "--to=html",
"--ExecutePreprocessor.enabled=True",
notebook])
| mit | Python |
|
ec033203d8e82258347eb4f6a6a83ef67bc9171c | Add expr tests | schae234/Camoco,schae234/Camoco | tests/test_Expr.py | tests/test_Expr.py | #!/usr/bin/python3
import pytest
import numpy as np
def test_nans_in_same_place(testCOB):
norm_expr = testCOB.expr(raw=False)
raw_expr = testCOB.expr(raw=True).ix[norm_expr.index,norm_expr.columns]
assert all(np.isnan(norm_expr) == np.isnan(raw_expr))
def test_inplace_nansort(testCOB):
x = np.random.rand(50000)
for i in np.random.randint(0,50000,500):
x[i] = np.nan
sorted_x = testCOB.inplace_nansort(x)
assert all(np.isnan(x) == np.isnan(sorted_x))
| mit | Python |
|
f3c4b7513c49189750ea15b36e561a4e5ed56214 | add linear classification back | RoboJackets/robocup-software,RoboJackets/robocup-software,RoboJackets/robocup-software,RoboJackets/robocup-software | soccer/gameplay/evaluation/linear_classification.py | soccer/gameplay/evaluation/linear_classification.py |
# Classifies a feature into any number of classes
# Linear classfication defined is
# y = f(x, w, b) where...
# x is a vector of input features of an object
# w is a vector of weights to apply to the features
# b is the bias of the feature-weight system
# f() is x dot w + b
# y is the final output score
# Classifies the object into two distinct class based on a cutoff value
# Anything less than the cutoff is of class false, greater than the cutoff is of class true
#
# @param input The vector of input features
# @param weights The vector of weights to apply to the input features
# @param bias The bias of the features-weight system
# @param cutoff The number which splits the output score of the object into two classes
# @param Returns tuple of the class (true or false) and the given score
def binary_classification(input, weights, bias, cutoff):
score = linear_classification(input, weights, bias)
return (score < cutoff, score)
# Returns the raw output score of the linear classifier based on the dot product
#
# @param input The vector of input features
# @param weights The vector of weights to apply to the input features
# @param bias The bias of the features-weight system
def linear_classification(input, weights, bias):
# Element wise multiplication
out = map(lambda x, w: x * w, input, weights)
return sum(out) + bias | apache-2.0 | Python |
|
14c20e35bcfc55cc3c12d94596079fc27a907f94 | Add unit tests | brentmitchell25/deoxys,brentmitchell25/deoxys | tests/test_unit.py | tests/test_unit.py | import unittest
from test_utilities import mapTestJsonFiles, mapJsonToYml, testYaml, getImmediateSubdirectories, unidiff_output
class TestUnit(unittest.TestCase):
def test_input(self):
testDirectories = getImmediateSubdirectories('test_input')
for directory in testDirectories:
json = mapTestJsonFiles(directory)
ymlInput = mapJsonToYml(json)['services']
ymlOutput = testYaml(ymlInput, inputDirectoryName=directory)
try:
self.assertEqual(ymlInput, ymlOutput, msg='{}\n{}'.format(directory,unidiff_output(ymlOutput, ymlInput)))
except Exception, e:
print(e)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
c2b082ebe95acc24f86fde9cd6875d7de3a9ca40 | Set up test_user file | ucfopen/canvasapi,ucfopen/canvasapi,ucfopen/canvasapi | tests/test_user.py | tests/test_user.py | import unittest
import settings
import requests_mock
from util import register_uris
from pycanvas.user import User
from pycanvas.exceptions import ResourceDoesNotExist
from pycanvas import Canvas
class TestUser(unittest.TestCase):
"""
Tests core Account functionality
"""
@classmethod
def setUpClass(self):
requires = {
}
adapter = requests_mock.Adapter()
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY, adapter)
register_uris(settings.BASE_URL, requires, adapter) | mit | Python |
|
39589065b158061c280f68fa730f72bf595428be | Add Stata package (#10189) | LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/stata/package.py | var/spack/repos/builtin/packages/stata/package.py | # Copyright 2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
from datetime import datetime
class Stata(Package):
"""STATA is a general-purpose statistical software package developed
by StataCorp."""
# Known limitations of this installer:
# * This really only installs the command line version of the program. To
# install GUI support there are extra packages needed that I can't easily
# test right now (should be installable via yum as a temp workaround):
# libgtk-x11-2.0.so libgdk-x11-2.0.so libatk-1.0.so libgdk_pixbuf-2.0.so
# Those libraries appear to be provided by: pango gdk-pixbuf2 gtk2
#
# * There are two popular environment variables that can be set, but vary from
# place to place, so future enhancement maybe to support STATATMP and TMPDIR.
#
# * I haven't tested any installer version but 15.
homepage = "https://www.stata.com/"
# url = "stata"
version('15', '2486f4c7db1e7b453004c7bd3f8da40ba1e30be150613065c7b82b1915259016')
# V15 depends on libpng v12 and fails with other versions of libpng
depends_on('libpng@1.2.57')
# STATA is downloaded from user/pass protected ftp as Stata15Linux64.tar.gz
def url_for_version(self, version):
return "file://{0}/Stata{1}Linux64.tar.gz".format(os.getcwd(), version)
# STATA is simple and needs really just the PATH set.
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', prefix)
run_env.prepend_path('LD_LIBRARY_PATH', self.spec['libpng'].prefix.lib)
# Extracting the file provides the following:
# ./unix/
# ./unix/linux64/
# ./unix/linux64/docs.taz
# ./unix/linux64/setrwxp
# ./unix/linux64/ado.taz
# ./unix/linux64/inst2
# ./unix/linux64/base.taz
# ./unix/linux64/bins.taz
# ./license.pdf
# ./stata15.ico
# ./install
#
# The installation scripts aren't really necessary:
# ./install is a shell script that sets up the environment.
# ./unix/linux64/setrwxp is a shell script that ensures permissions.
# ./unix/linux64/inst2 is the actual installation script.
#
# 1. There is a markfile that is the version number. Stata uses this for
# for doing version checks/updates.
# echo $(date) > installed.150
#
# 2. Then it extracts the tar.gz files: ado.taz base.taz bins.taz docs.taz
#
# 3. It copies installer scripts to root directory
# cp ./unix/linux64/setrwxp setrwxp
# cp ./unix/linux64/inst2 inst2
#
# 4. Then it checks for proper permissions:
# chmod 750 setrwxp inst2
# ./setrwxp now
#
# 5. The last step has to be run manually since it is an interactive binary
# for configuring the license key. Load the module and run:
# $ stinit
def install(self, spec, prefix):
bash = which('bash')
tar = which('tar')
# Step 1.
x = datetime.now()
with open("installed.150", "w") as fh:
fh.write(x.strftime("%a %b %d %H:%M:%S %Z %Y"))
# Step 2.
instlist = ['ado.taz', 'base.taz', 'bins.taz', 'docs.taz']
for instfile in instlist:
tar('-x', '-z', '-f', 'unix/linux64/' + instfile)
# Step 3.
install('unix/linux64/setrwxp', 'setrwxp')
install('unix/linux64/inst2', 'inst2')
# Step 4. Since the install script calls out specific permissions and
# could change in the future (or old versions) I thought it best to
# just use it.
bash("./setrwxp", "now")
# Install should now be good to copy into the installation directory.
install_tree('.', prefix)
| lgpl-2.1 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.