input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
9567 1.236126637992265592046925461401665E-5760 6.180633189961327960234627307008325E-5761
9568 3.090316594980663980117313653504163E-5761 1.545158297490331990058656826752081E-5761
9569 7.725791487451659950293284133760405E-5762 3.862895743725829975146642066880203E-5762
9570 1.931447871862914987573321033440102E-5762 9.657239359314574937866605167200508E-5763
9571 4.828619679657287468933302583600254E-5763 2.414309839828643734466651291800127E-5763
9572 1.207154919914321867233325645900064E-5763 6.035774599571609336166628229500318E-5764
9573 3.017887299785804668083314114750159E-5764 1.508943649892902334041657057375080E-5764
9574 7.54471824946451167020828528687540E-5765 3.77235912473225583510414264343770E-5765
9575 1.88617956236612791755207132171885E-5765 9.43089781183063958776035660859425E-5766
9576 4.715448905915319793880178304297125E-5766 2.357724452957659896940089152148563E-5766
9577 1.178862226478829948470044576074282E-5766 5.894311132394149742350222880371408E-5767
9578 2.947155566197074871175111440185704E-5767 1.473577783098537435587555720092852E-5767
9579 7.36788891549268717793777860046426E-5768 3.68394445774634358896888930023213E-5768
9580 1.841972228873171794484444650116065E-5768 9.209861144365858972422223250580325E-5769
9581 4.604930572182929486211111625290163E-5769 2.302465286091464743105555812645081E-5769
9582 1.151232643045732371552777906322541E-5769 5.756163215228661857763889531612703E-5770
9583 2.878081607614330928881944765806352E-5770 1.439040803807165464440972382903176E-5770
9584 7.19520401903582732220486191451588E-5771 3.59760200951791366110243095725794E-5771
9585 1.79880100475895683055121547862897E-5771 8.99400502379478415275607739314485E-5772
9586 4.497002511897392076378038696572425E-5772 2.248501255948696038189019348286213E-5772
9587 1.124250627974348019094509674143107E-5772 5.621253139871740095472548370715533E-5773
9588 2.810626569935870047736274185357767E-5773 1.405313284967935023868137092678883E-5773
9589 7.026566424839675119340685463394415E-5774 3.513283212419837559670342731697208E-5774
9590 1.756641606209918779835171365848604E-5774 8.78320803104959389917585682924302E-5775
9591 4.39160401552479694958792841462151E-5775 2.195802007762398474793964207310755E-5775
9592 1.097901003881199237396982103655378E-5775 5.489505019405996186984910518276888E-5776
9593 2.744752509702998093492455259138444E-5776 1.372376254851499046746227629569222E-5776
9594 6.86188127425749523373113814784611E-5777 3.430940637128747616865569073923055E-5777
9595 1.715470318564373808432784536961528E-5777 8.577351592821869042163922684807638E-5778
9596 4.288675796410934521081961342403819E-5778 2.144337898205467260540980671201910E-5778
9597 1.072168949102733630270490335600955E-5778 5.360844745513668151352451678004775E-5779
9598 2.680422372756834075676225839002388E-5779 1.340211186378417037838112919501194E-5779
9599 6.70105593189208518919056459750597E-5780 3.350527965946042594595282298752985E-5780
9600 1.675263982973021297297641149376493E-5780 8.376319914865106486488205746882463E-5781
9601 4.188159957432553243244102873441232E-5781 2.094079978716276621622051436720616E-5781
9602 1.047039989358138310811025718360308E-5781 5.23519994679069155405512859180154E-5782
9603 2.61759997339534577702756429590077E-5782 1.308799986697672888513782147950385E-5782
9604 6.543999933488364442568910739751925E-5783 3.271999966744182221284455369875963E-5783
9605 1.635999983372091110642227684937982E-5783 8.179999916860455553211138424689908E-5784
9606 4.089999958430227776605569212344954E-5784 2.044999979215113888302784606172477E-5784
9607 1.022499989607556944151392303086239E-5784 5.112499948037784720756961515431193E-5785
9608 2.556249974018892360378480757715597E-5785 1.278124987009446180189240378857798E-5785
9609 6.39062493504723090094620189428899E-5786 3.195312467523615450473100947144495E-5786
9610 1.597656233761807725236550473572248E-5786 7.988281168809038626182752367861238E-5787
9611 3.994140584404519313091376183930619E-5787 1.997070292202259656545688091965310E-5787
9612 9.98535146101129828272844045982655E-5788 4.992675730505649141364220229913275E-5788
9613 2.496337865252824570682110114956638E-5788 1.248168932626412285341055057478319E-5788
9614 6.240844663132061426705275287391595E-5789 3.120422331566030713352637643695798E-5789
9615 1.560211165783015356676318821847899E-5789 7.801055828915076783381594109239495E-5790
9616 3.900527914457538391690797054619748E-5790 1.950263957228769195845398527309874E-5790
9617 9.75131978614384597922699263654937E-5791 4.875659893071922989613496318274685E-5791
9618 2.437829946535961494806748159137343E-5791 1.218914973267980747403374079568671E-5791
9619 6.094574866339903737016870397843355E-5792 3.047287433169951868508435198921678E-5792
9620 1.523643716584975934254217599460839E-5792 7.618218582924879671271087997304195E-5793
9621 3.809109291462439835635543998652098E-5793 1.904554645731219917817771999326049E-5793
9622 9.522773228656099589088859996630245E-5794 4.761386614328049794544429998315123E-5794
9623 2.380693307164024897272214999157562E-5794 1.190346653582012448636107499578781E-5794
9624 5.951733267910062243180537497893905E-5795 2.975866633955031121590268748946953E-5795
9625 1.487933316977515560795134374473477E-5795 7.439666584887577803975671872367383E-5796
9626 3.719833292443788901987835936183692E-5796 1.859916646221894450993917968091846E-5796
9627 9.29958323110947225496958984045923E-5797 4.649791615554736127484794920229615E-5797
9628 2.324895807777368063742397460114808E-5797 1.162447903888684031871198730057404E-5797
9629 5.81223951944342015935599365028702E-5798 2.90611975972171007967799682514351E-5798
9630 1.453059879860855039838998412571755E-5798 7.265299399304275199194992062858775E-5799
9631 3.632649699652137599597496031429388E-5799 1.816324849826068799798748015714694E-5799
9632 9.08162424913034399899374007857347E-5800 4.540812124565171999496870039286735E-5800
9633 2.270406062282585999748435019643368E-5800 1.135203031141292999874217509821684E-5800
9634 5.67601515570646499937108754910842E-5801 2.83800757785323249968554377455421E-5801
9635 1.419003788926616249842771887277105E-5801 7.095018944633081249213859436385525E-5802
9636 3.547509472316540624606929718192763E-5802 1.773754736158270312303464859096381E-5802
9637 8.868773680791351561517324295481905E-5803 4.434386840395675780758662147740953E-5803
9638 2.217193420197837890379331073870477E-5803 1.108596710098918945189665536935238E-5803
9639 5.54298355049459472594832768467619E-5804 2.771491775247297362974163842338095E-5804
9640 1.385745887623648681487081921169048E-5804 6.928729438118243407435409605845238E-5805
9641 3.464364719059121703717704802922619E-5805 1.732182359529560851858852401461310E-5805
9642 8.66091179764780425929426200730655E-5806 4.330455898823902129647131003653275E-5806
9643 2.165227949411951064823565501826638E-5806 1.082613974705975532411782750913319E-5806
9644 5.413069873529877662058913754566595E-5807 2.706534936764938831029456877283298E-5807
9645 1.353267468382469415514728438641649E-5807 6.766337341912347077573642193208245E-5808
9646 3.383168670956173538786821096604123E-5808 1.691584335478086769393410548302061E-5808
9647 8.457921677390433846967052741510305E-5809 4.228960838695216923483526370755153E-5809
9648 2.114480419347608461741763185377577E-5809 1.057240209673804230870881592688788E-5809
9649 5.28620104836902115435440796344394E-5810 2.64310052418451057717720398172197E-5810
9650 1.321550262092255288588601990860985E-5810 6.607751310461276442943009954304925E-5811
9651 3.303875655230638221471504977152463E-5811 1.651937827615319110735752488576231E-5811
9652 8.259689138076595553678762442881155E-5812 4.129844569038297776839381221440578E-5812
9653 2.064922284519148888419690610720289E-5812 1.032461142259574444209845305360145E-5812
9654 5.162305711297872221049226526800725E-5813 2.581152855648936110524613263400363E-5813
9655 1.290576427824468055262306631700182E-5813 6.452882139122340276311533158500908E-5814
9656 3.226441069561170138155766579250454E-5814 1.613220534780585069077883289625227E-5814
9657 8.066102673902925345389416448126135E-5815 4.033051336951462672694708224063068E-5815
9658 2.016525668475731336347354112031534E-5815 1.008262834237865668173677056015767E-5815
9659 5.041314171189328340868385280078835E-5816 2.520657085594664170434192640039418E-5816
9660 1.260328542797332085217096320019709E-5816 6.301642713986660426085481600098545E-5817
9661 3.150821356993330213042740800049273E-5817 1.575410678496665106521370400024636E-5817
9662 7.87705339248332553260685200012318E-5818 3.93852669624166276630342600006159E-5818
9663 1.969263348120831383151713000030795E-5818 9.846316740604156915758565000153975E-5819
9664 4.923158370302078457879282500076988E-5819 2.461579185151039228939641250038494E-5819
9665 1.230789592575519614469820625019247E-5819 6.153947962877598072349103125096235E-5820
9666 3.076973981438799036174551562548118E-5820 1.538486990719399518087275781274059E-5820
9667 7.692434953596997590436378906370295E-5821 3.846217476798498795218189453185148E-5821
9668 1.923108738399249397609094726592574E-5821 9.61554369199624698804547363296287E-5822
9669 4.807771845998123494022736816481435E-5822 2.403885922999061747011368408240718E-5822
9670 1.201942961499530873505684204120359E-5822 6.009714807497654367528421020601795E-5823
9671 3.004857403748827183764210510300898E-5823 1.502428701874413591882105255150449E-5823
9672 7.512143509372067959410526275752245E-5824 3.756071754686033979705263137876123E-5824
9673 1.878035877343016989852631568938062E-5824 9.390179386715084949263157844690308E-5825
9674 4.695089693357542474631578922345154E-5825 2.347544846678771237315789461172577E-5825
9675 1.173772423339385618657894730586289E-5825 5.868862116696928093289473652931443E-5826
9676 2.934431058348464046644736826465722E-5826 1.467215529174232023322368413232861E-5826
9677 7.336077645871160116611842066164305E-5827 3.668038822935580058305921033082153E-5827
9678 1.834019411467790029152960516541077E-5827 9.170097057338950145764802582705383E-5828
9679 4.585048528669475072882401291352692E-5828 2.292524264334737536441200645676346E-5828
9680 1.146262132167368768220600322838173E-5828 5.731310660836843841103001614190865E-5829
9681 2.865655330418421920551500807095433E-5829 1.432827665209210960275750403547716E-5829
9682 7.16413832604605480137875201773858E-5830 3.58206916302302740068937600886929E-5830
9683 1.791034581511513700344688004434645E-5830 8.955172907557568501723440022173225E-5831
9684 4.477586453778784250861720011086613E-5831 2.238793226889392125430860005543306E-5831
9685 1.119396613444696062715430002771653E-5831 5.596983067223480313577150013858265E-5832
9686 2.798491533611740156788575006929133E-5832 1.399245766805870078394287503464566E-5832
9687 6.99622883402935039197143751732283E-5833 3.498114417014675195985718758661415E-5833
9688 1.749057208507337597992859379330708E-5833 8.745286042536687989964296896653538E-5834
9689 4.372643021268343994982148448326769E-5834 2.186321510634171997491074224163385E-5834
9690 1.093160755317085998745537112081693E-5834 5.465803776585429993727685560408463E-5835
9691 2.732901888292714996863842780204232E-5835 1.366450944146357498431921390102116E-5835
9692 6.83225472073178749215960695051058E-5836 3.41612736036589374607980347525529E-5836
9693 1.708063680182946873039901737627645E-5836 8.540318400914734365199508688138225E-5837
9694 4.270159200457367182599754344069113E-5837 2.135079600228683591299877172034556E-5837
9695 1.067539800114341795649938586017278E-5837 5.33769900057170897824969293008639E-5838
9696 2.668849500285854489124846465043195E-5838 1.334424750142927244562423232521598E-5838
9697 6.67212375071463622281211616260799E-5839 3.336061875357318111406058081303995E-5839
9698 1.668030937678659055703029040651998E-5839 8.340154688393295278515145203259988E-5840
9699 4.170077344196647639257572601629994E-5840 2.085038672098323819628786300814997E-5840
9700 1.042519336049161909814393150407499E-5840 5.212596680245809549071965752037493E-5841
9701 2.606298340122904774535982876018747E-5841 1.303149170061452387267991438009373E-5841
9702 6.515745850307261936339957190046865E-5842 3.257872925153630968169978595023433E-5842
9703 1.628936462576815484084989297511717E-5842 8.144682312884077420424946487558583E-5843
9704 4.072341156442038710212473243779292E-5843 2.036170578221019355106236621889646E-5843
9705 1.018085289110509677553118310944823E-5843 5.090426445552548387765591554724115E-5844
9706 2.545213222776274193882795777362058E-5844 1.272606611388137096941397888681029E-5844
9707 6.363033056940685484706989443405145E-5845 3.181516528470342742353494721702573E-5845
9708 1.590758264235171371176747360851287E-5845 7.953791321175856855883736804256433E-5846
9709 3.976895660587928427941868402128217E-5846 1.988447830293964213970934201064108E-5846
9710 9.94223915146982106985467100532054E-5847 4.97111957573491053492733550266027E-5847
9711 2.485559787867455267463667751330135E-5847 1.242779893933727633731833875665068E-5847
9712 6.21389946966863816865916937832534E-5848 3.10694973483431908432958468916267E-5848
9713 1.553474867417159542164792344581335E-5848 7.767374337085797710823961722906675E-5849
9714 3.883687168542898855411980861453338E-5849 1.941843584271449427705990430726669E-5849
9715 9.709217921357247138529952153633345E-5850 4.854608960678623569264976076816673E-5850
9716 2.427304480339311784632488038408337E-5850 1.213652240169655892316244019204168E-5850
9717 6.06826120084827946158122009602084E-5851 3.03413060042413973079061004801042E-5851
9718 1.51706530021206986539530502400521E-5851 7.58532650106034932697652512002605E-5852
9719 3.792663250530174663488262560013025E-5852 1.896331625265087331744131280006513E-5852
9720 9.481658126325436658720656400032565E-5853 4.740829063162718329360328200016283E-5853
9721 2.370414531581359164680164100008142E-5853 1.185207265790679582340082050004071E-5853
9722 5.926036328953397911700410250020355E-5854 2.963018164476698955850205125010178E-5854
9723 1.481509082238349477925102562505089E-5854 7.407545411191747389625512812525445E-5855
9724 3.703772705595873694812756406262723E-5855 1.851886352797936847406378203131361E-5855
9725 9.259431763989684237031891015656805E-5856 4.629715881994842118515945507828403E-5856
9726 2.314857940997421059257972753914202E-5856 1.157428970498710529628986376957101E-5856
9727 5.787144852493552648144931884785505E-5857 2.893572426246776324072465942392753E-5857
9728 1.446786213123388162036232971196377E-5857 7.233931065616940810181164855981883E-5858
9729 3.616965532808470405090582427990942E-5858 1.808482766404235202545291213995471E-5858
9730 9.042413832021176012726456069977355E-5859 4.521206916010588006363228034988678E-5859
9731 2.260603458005294003181614017494339E-5859 1.130301729002647001590807008747170E-5859
9732 5.65150864501323500795403504373585E-5860 2.825754322506617503977017521867925E-5860
9733 1.412877161253308751988508760933963E-5860 7.064385806266543759942543804669813E-5861
9734 3.532192903133271879971271902334907E-5861 1.766096451566635939985635951167453E-5861
9735 8.830482257833179699928179755837265E-5862 4.415241128916589849964089877918633E-5862
9736 2.207620564458294924982044938959317E-5862 1.103810282229147462491022469479658E-5862
9737 5.51905141114573731245511234739829E-5863 2.759525705572868656227556173699145E-5863
9738 1.379762852786434328113778086849573E-5863 6.898814263932171640568890434247863E-5864
9739 3.449407131966085820284445217123932E-5864 1.724703565983042910142222608561966E-5864
9740 8.62351782991521455071111304280983E-5865 4.311758914957607275355556521404915E-5865
9741 2.155879457478803637677778260702458E-5865 1.077939728739401818838889130351229E-5865
9742 5.389698643697009094194445651756145E-5866 2.694849321848504547097222825878073E-5866
9743 1.347424660924252273548611412939037E-5866 6.737123304621261367743057064695183E-5867
9744 3.368561652310630683871528532347592E-5867 1.684280826155315341935764266173796E-5867
9745 8.42140413077657670967882133086898E-5868 4.21070206538828835483941066543449E-5868
9746 2.105351032694144177419705332717245E-5868 1.052675516347072088709852666358623E-5868
9747 5.263377581735360443549263331793115E-5869 2.631688790867680221774631665896558E-5869
9748 1.315844395433840110887315832948279E-5869 6.579221977169200554436579164741395E-5870
9749 3.289610988584600277218289582370698E-5870 1.644805494292300138609144791185349E-5870
9750 8.224027471461500693045723955926745E-5871 4.112013735730750346522861977963373E-5871
9751 2.056006867865375173261430988981687E-5871 1.028003433932687586630715494490843E-5871
9752 5.140017169663437933153577472454215E-5872 2.570008584831718966576788736227108E-5872
9753 1.285004292415859483288394368113554E-5872 6.42502146207929741644197184056777E-5873
9754 3.212510731039648708220985920283885E-5873 1.606255365519824354110492960141943E-5873
9755 8.031276827599121770552464800709715E-5874 4.015638413799560885276232400354858E-5874
9756 2.007819206899780442638116200177429E-5874 1.003909603449890221319058100088715E-5874
9757 5.019548017249451106595290500443575E-5875 2.509774008624725553297645250221788E-5875
9758 1.254887004312362776648822625110894E-5875 6.27443502156181388324411312555447E-5876
9759 3.137217510780906941622056562777235E-5876 1.568608755390453470811028281388618E-5876
9760 7.84304377695226735405514140694309E-5877 3.921521888476133677027570703471545E-5877
9761 1.960760944238066838513785351735773E-5877 9.803804721190334192568926758678863E-5878
9762 4.901902360595167096284463379339432E-5878 2.450951180297583548142231689669716E-5878
9763 1.225475590148791774071115844834858E-5878 6.12737795074395887035557922417429E-5879
9764 3.063688975371979435177789612087145E-5879 1.531844487685989717588894806043573E-5879
9765 7.659222438429948587944474030217865E-5880 3.829611219214974293972237015108933E-5880
9766 1.914805609607487146986118507554467E-5880 9.574028048037435734930592537772333E-5881
9767 4.787014024018717867465296268886167E-5881 2.393507012009358933732648134443083E-5881
9768 1.196753506004679466866324067221542E-5881 5.983767530023397334331620336107708E-5882
9769 2.991883765011698667165810168053854E-5882 1.495941882505849333582905084026927E-5882
9770 7.479709412529246667914525420134635E-5883 3.739854706264623333957262710067318E-5883
9771 1.869927353132311666978631355033659E-5883 9.349636765661558334893156775168295E-5884
9772 4.674818382830779167446578387584148E-5884 2.337409191415389583723289193792074E-5884
9773 1.168704595707694791861644596896037E-5884 5.843522978538473959308222984480185E-5885
9774 2.921761489269236979654111492240093E-5885 1.460880744634618489827055746120046E-5885
9775 7.30440372317309244913527873060023E-5886 3.652201861586546224567639365300115E-5886
9776 1.826100930793273112283819682650058E-5886 9.130504653966365561419098413250288E-5887
9777 4.565252326983182780709549206625144E-5887 2.282626163491591390354774603312572E-5887
9778 1.141313081745795695177387301656286E-5887 5.70656540872897847588693650828143E-5888
9779 2.853282704364489237943468254140715E-5888 1.426641352182244618971734127070358E-5888
9780 7.13320676091122309485867063535179E-5889 3.566603380455611547429335317675895E-5889
9781 1.783301690227805773714667658837948E-5889 8.916508451139028868573338294189738E-5890
9782 4.458254225569514434286669147094869E-5890 2.229127112784757217143334573547435E-5890
9783 1.114563556392378608571667286773718E-5890 5.572817781961893042858336433868588E-5891
9784 2.786408890980946521429168216934294E-5891 1.393204445490473260714584108467147E-5891
9785 6.966022227452366303572920542335735E-5892 3.483011113726183151786460271167868E-5892
9786 1.741505556863091575893230135583934E-5892 8.70752778431545787946615067791967E-5893
9787 4.353763892157728939733075338959835E-5893 2.176881946078864469866537669479918E-5893
9788 1.088440973039432234933268834739959E-5893 5.442204865197161174666344173699795E-5894
9789 2.721102432598580587333172086849898E-5894 1.360551216299290293666586043424949E-5894
9790 6.802756081496451468332930217124745E-5895 3.401378040748225734166465108562373E-5895
9791 1.700689020374112867083232554281187E-5895 8.503445101870564335416162771405933E-5896
9792 4.251722550935282167708081385702967E-5896 2.125861275467641083854040692851483E-5896
9793 1.062930637733820541927020346425742E-5896 5.314653188669102709635101732128708E-5897
9794 2.657326594334551354817550866064354E-5897 1.328663297167275677408775433032177E-5897
9795 6.643316485836378387043877165160885E-5898 3.321658242918189193521938582580443E-5898
9796 1.660829121459094596760969291290222E-5898 8.304145607295472983804846456451108E-5899
9797 4.152072803647736491902423228225554E-5899 2.076036401823868245951211614112777E-5899
9798 1.038018200911934122975605807056389E-5899 5.190091004559670614878029035281943E-5900
9799 2.595045502279835307439014517640972E-5900 1.297522751139917653719507258820486E-5900
9800 6.48761375569958826859753629410243E-5901 3.243806877849794134298768147051215E-5901
9801 1.621903438924897067149384073525608E-5901 8.109517194624485335746920367628038E-5902
9802 4.054758597312242667873460183814019E-5902 2.027379298656121333936730091907010E-5902
9803 1.013689649328060666968365045953505E-5902 5.068448246640303334841825229767525E-5903
9804 2.534224123320151667420912614883763E-5903 1.267112061660075833710456307441881E-5903
9805 6.335560308300379168552281537209405E-5904 3.167780154150189584276140768604703E-5904
9806 1.583890077075094792138070384302352E-5904 7.919450385375473960690351921511758E-5905
9807 3.959725192687736980345175960755879E-5905 1.979862596343868490172587980377940E-5905
9808 9.89931298171934245086293990188970E-5906 4.94965649085967122543146995094485E-5906
9809 2.474828245429835612715734975472425E-5906 1.237414122714917806357867487736213E-5906
9810 6.187070613574589031789337438681065E-5907 3.093535306787294515894668719340533E-5907
9811 1.546767653393647257947334359670267E-5907 7.733838266968236289736671798351333E-5908
9812 3.866919133484118144868335899175667E-5908 1.933459566742059072434167949587833E-5908
9813 9.667297833710295362170839747939165E-5909 4.833648916855147681085419873969583E-5909
9814 2.416824458427573840542709936984792E-5909 1.208412229213786920271354968492396E-5909
9815 6.04206114606893460135677484246198E-5910 3.02103057303446730067838742123099E-5910
9816 1.510515286517233650339193710615495E-5910 7.552576432586168251695968553077475E-5911
9817 3.776288216293084125847984276538738E-5911 1.888144108146542062923992138269369E-5911
9818 9.440720540732710314619960691346845E-5912 4.720360270366355157309980345673423E-5912
9819 2.360180135183177578654990172836712E-5912 1.180090067591588789327495086418356E-5912
9820 5.90045033795794394663747543209178E-5913 2.95022516897897197331873771604589E-5913
9821 1.475112584489485986659368858022945E-5913 7.375562922447429933296844290114725E-5914
9822 3.687781461223714966648422145057363E-5914 1.843890730611857483324211072528681E-5914
9823 9.219453653059287416621055362643405E-5915 4.609726826529643708310527681321703E-5915
9824 2.304863413264821854155263840660852E-5915 1.152431706632410927077631920330426E-5915
9825 5.76215853316205463538815960165213E-5916 2.881079266581027317694079800826065E-5916
9826 1.440539633290513658847039900413033E-5916 7.202698166452568294235199502065163E-5917
9827 3.601349083226284147117599751032582E-5917 1.800674541613142073558799875516291E-5917
9828 9.003372708065710367793999377581455E-5918 4.501686354032855183896999688790728E-5918
9829 2.250843177016427591948499844395364E-5918 1.125421588508213795974249922197682E-5918
9830 5.62710794254106897987124961098841E-5919 2.813553971270534489935624805494205E-5919
9831 1.406776985635267244967812402747103E-5919 7.033884928176336224839062013735513E-5920
9832 3.516942464088168112419531006867757E-5920 1.758471232044084056209765503433878E-5920
9833 8.79235616022042028104882751716939E-5921 4.396178080110210140524413758584695E-5921
9834 2.198089040055105070262206879292348E-5921 1.099044520027552535131103439646174E-5921
9835 5.49522260013776267565551719823087E-5922 2.747611300068881337827758599115435E-5922
9836 1.373805650034440668913879299557718E-5922 6.869028250172203344569396497788588E-5923
9837 3.434514125086101672284698248894294E-5923 1.717257062543050836142349124447147E-5923
9838 8.586285312715254180711745622235735E-5924 4.293142656357627090355872811117868E-5924
9839 2.146571328178813545177936405558934E-5924 1.073285664089406772588968202779467E-5924
9840 5.366428320447033862944841013897335E-5925 2.683214160223516931472420506948668E-5925
9841 1.341607080111758465736210253474334E-5925 6.70803540055879232868105126737167E-5926
9842 3.354017700279396164340525633685835E-5926 1.677008850139698082170262816842918E-5926
9843 8.38504425069849041085131408421459E-5927 4.192522125349245205425657042107295E-5927
9844 2.096261062674622602712828521053648E-5927 1.048130531337311301356414260526824E-5927
9845 5.24065265668655650678207130263412E-5928 2.62032632834327825339103565131706E-5928
9846 1.31016316417163912669551782565853E-5928 6.55081582085819563347758912829265E-5929
9847 3.275407910429097816738794564146325E-5929 1.637703955214548908369397282073163E-5929
9848 8.188519776072744541846986410365815E-5930 4.094259888036372270923493205182908E-5930
9849 2.047129944018186135461746602591454E-5930 1.023564972009093067730873301295727E-5930
9850 5.117824860045465338654366506478635E-5931 2.558912430022732669327183253239318E-5931
9851 1.279456215011366334663591626619659E-5931 6.397281075056831673317958133098295E-5932
9852 3.198640537528415836658979066549148E-5932 1.599320268764207918329489533274574E-5932
9853 7.99660134382103959164744766637287E-5933 3.998300671910519795823723833186435E-5933
9854 1.999150335955259897911861916593218E-5933 9.995751679776299489559309582966088E-5934
9855 4.997875839888149744779654791483044E-5934 2.498937919944074872389827395741522E-5934
9856 1.249468959972037436194913697870761E-5934 6.247344799860187180974568489353805E-5935
9857 3.123672399930093590487284244676903E-5935 1.561836199965046795243642122338451E-5935
9858 7.809180999825233976218210611692255E-5936 3.904590499912616988109105305846128E-5936
9859 1.952295249956308494054552652923064E-5936 9.76147624978154247027276326461532E-5937
9860 4.88073812489077123513638163230766E-5937 2.44036906244538561756819081615383E-5937
9861 1.220184531222692808784095408076915E-5937 6.100922656113464043920477040384575E-5938
9862 3.050461328056732021960238520192288E-5938 1.525230664028366010980119260096144E-5938
9863 7.62615332014183005490059630048072E-5939 3.81307666007091502745029815024036E-5939
9864 1.90653833003545751372514907512018E-5939 9.5326916501772875686257453756009E-5940
9865 4.76634582508864378431287268780045E-5940 2.383172912544321892156436343900225E-5940
9866 1.191586456272160946078218171950113E-5940 5.957932281360804730391090859750563E-5941
9867 2.978966140680402365195545429875282E-5941 1.489483070340201182597772714937641E-5941
9868 7.447415351701005912988863574688205E-5942 3.723707675850502956494431787344103E-5942
9869 1.861853837925251478247215893672052E-5942 9.309269189626257391236079468360258E-5943
9870 4.654634594813128695618039734180129E-5943 2.327317297406564347809019867090065E-5943
9871 1.163658648703282173904509933545033E-5943 5.818293243516410869522549667725163E-5944
9872 2.909146621758205434761274833862582E-5944 1.454573310879102717380637416931291E-5944
9873 7.272866554395513586903187084656455E-5945 3.636433277197756793451593542328228E-5945
9874 1.818216638598878396725796771164114E-5945 9.09108319299439198362898385582057E-5946
9875 4.545541596497195991814491927910285E-5946 2.272770798248597995907245963955143E-5946
9876 1.136385399124298997953622981977572E-5946 5.681926995621494989768114909887858E-5947
9877 2.840963497810747494884057454943929E-5947 1.420481748905373747442028727471965E-5947
9878 7.102408744526868737210143637359825E-5948 3.551204372263434368605071818679913E-5948
9879 1.775602186131717184302535909339957E-5948 8.878010930658585921512679546699783E-5949
9880 4.439005465329292960756339773349892E-5949 2.219502732664646480378169886674946E-5949
9881 1.109751366332323240189084943337473E-5949 5.548756831661616200945424716687365E-5950
9882 2.774378415830808100472712358343683E-5950 1.387189207915404050236356179171841E-5950
9883 6.935946039577020251181780895859205E-5951 3.467973019788510125590890447929603E-5951
9884 1.733986509894255062795445223964802E-5951 8.669932549471275313977226119824008E-5952
9885 4.334966274735637656988613059912004E-5952 2.167483137367818828494306529956002E-5952
9886 1.083741568683909414247153264978001E-5952 5.418707843419547071235766324890005E-5953
| |
<filename>lib/kb_hmmer/kb_hmmerImpl.py
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import os
import sys
import shutil
import hashlib
import subprocess
import requests
import re
import traceback
import uuid
from datetime import datetime
from pprint import pprint, pformat
import numpy as np
import math
import gzip
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
#from biokbase.workspace.client import Workspace as workspaceService
from Workspace.WorkspaceClient import Workspace as workspaceService
from requests_toolbelt import MultipartEncoder
from biokbase.AbstractHandle.Client import AbstractHandle as HandleService
# SDK Utils
from KBaseDataObjectToFileUtils.KBaseDataObjectToFileUtilsClient import KBaseDataObjectToFileUtils
from DataFileUtil.DataFileUtilClient import DataFileUtil as DFUClient
from KBaseReport.KBaseReportClient import KBaseReport
# silence whining
import requests
requests.packages.urllib3.disable_warnings()
#END_HEADER
class kb_hmmer:
'''
Module Name:
kb_hmmer
Module Description:
** A KBase module: kb_hmmer
**
** This module contains HMMER Hidden Markov Model Sequence Search and Alignment
**
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "1.2.2"
GIT_URL = "https://github.com/kbaseapps/kb_hmmer"
GIT_COMMIT_HASH = "4b495b811a6bcf532801133d832d175ea5069ae1"
#BEGIN_CLASS_HEADER
workspaceURL = None
shockURL = None
handleURL = None
callbackURL = None
scratch = None
HMMER_BIN = os.path.join(os.sep, 'kb', 'module', 'hmmer', 'binaries')
HMMER_BUILD = os.path.join(HMMER_BIN, 'hmmbuild') # construct profile HMM(s) from MSA(s)
HMMER_MAKE_DB = os.path.join(HMMER_BIN, 'makehmmerdb') # build a HMMER binary db from a seq file
HMMER_SEARCH = os.path.join(HMMER_BIN, 'hmmsearch') # search profile(s) against a sequence db
HMMER_PHMMER = os.path.join(HMMER_BIN, 'phmmer') # search protein sequence(s) against a protein sequence db
HMMER_NHMMER = os.path.join(HMMER_BIN, 'nhmmer') # search nuc sequence(s) against a nuc sequence db
HMMER_JACKHAMMER = os.path.join(HMMER_BIN, 'jackhmmer') # iteratively search sequence(s) against a protein db
#HMMER_ALIGN = '/kb/module/hmmer/binaries/hmmalign' # align sequences to a profile HMM
#HMMER_PRESS = '/kb/module/hmmer/binaries/hmmpress' # prepare HMM db for hmmscan
#HMMER_SCAN = '/kb/module/hmmer/binaries/hmmscan' # scan prot sequence(s) against protein profile db
#HMMER_NSCAN = '/kb/module/hmmer/binaries/nhmmscan' # scan nuc sequence(s) against nuc profile db
# dbCAN CAZy search App
dbCAN_version = 'v6'
dbCAN_HMMS_DIR = os.path.join(os.sep, 'kb', 'module', 'data', 'dbCAN', 'dbCAN-' + dbCAN_version)
dbCAN_HMMS_PATH = os.path.join(dbCAN_HMMS_DIR, 'dbCAN-fam-HMMs.txt.' + dbCAN_version)
# target is a list for collecting log messages
def log(self, target, message):
# we should do something better here...
if target is not None:
target.append(message)
print(message)
sys.stdout.flush()
# Helper script borrowed from the transform service, logger removed
#
def upload_file_to_shock(self,
console, # DEBUG
shock_service_url=None,
filePath=None,
ssl_verify=True,
token=None):
"""
Use HTTP multi-part POST to save a file to a SHOCK instance.
"""
self.log(console, "UPLOADING FILE " + filePath + " TO SHOCK")
if token is None:
raise Exception("Authentication token required!")
#build the header
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
if filePath is None:
raise Exception("No file given for upload to SHOCK!")
dataFile = open(os.path.abspath(filePath), 'rb')
m = MultipartEncoder(fields={'upload': (os.path.split(filePath)[-1], dataFile)})
header['Content-Type'] = m.content_type
#logger.info("Sending {0} to {1}".format(filePath,shock_service_url))
try:
response = requests.post(shock_service_url + "/node", headers=header,
data=m, allow_redirects=True, verify=ssl_verify)
dataFile.close()
except:
dataFile.close()
raise
if not response.ok:
response.raise_for_status()
result = response.json()
if result['error']:
raise Exception(result['error'][0])
else:
return result["data"]
def _check_MSA_sequence_type_correct(self, MSA_in, row_order, seq_type):
PROT_MSA_pattern = re.compile("^[\.\-_acdefghiklmnpqrstvwyACDEFGHIKLMNPQRSTVWYxX ]+$")
DNA_MSA_pattern = re.compile("^[\.\-_ACGTUXNRYSWKMBDHVacgtuxnryswkmbdhv \t\n]+$")
this_appropriate_sequence_found_in_MSA_input = True
msa_invalid_msgs = []
# Check for PROTEIN sequence type
#
if seq_type.startswith('P') or seq_type.startswith('p'):
if 'sequence_type' in MSA_in and (MSA_in['sequence_type'] == 'dna' or MSA_in['sequence_type'] == 'DNA'):
this_appropriate_sequence_found_in_MSA_input = False
else:
for row_id in row_order:
#self.log(console, row_id+": '"+MSA_in['alignment'][row_id]+"'") # DEBUG
if DNA_MSA_pattern.match(MSA_in['alignment'][row_id]):
self.log(msa_invalid_msgs,
"Finding nucleotide instead of protein sequences in MSA. " +
"BAD record for MSA row_id: " + row_id + "\n" + MSA_in['alignment'][row_id] + "\n")
this_appropriate_sequence_found_in_MSA_input = False
break
elif not PROT_MSA_pattern.match(MSA_in['alignment'][row_id]):
self.log(msa_invalid_msgs,
"Not finding protein sequence in MSA. " +
"BAD record for MSA row_id: " + row_id + "\n" + MSA_in['alignment'][row_id] + "\n")
this_appropriate_sequence_found_in_MSA_input = False
break
# Check for NUCLEOTIDE sequence type
#
elif seq_type.startswith('N') or seq_type.startswith('n'):
if 'sequence_type' in MSA_in and (MSA_in['sequence_type'] != 'dna' and MSA_in['sequence_type'] != 'DNA'):
this_appropriate_sequence_found_in_MSA_input = False
else:
for row_id in row_order:
#self.log(console, row_id+": '"+MSA_in['alignment'][row_id]+"'") # DEBUG
if not DNA_MSA_pattern.match(MSA_in['alignment'][row_id]):
self.log(msa_invalid_msgs,
"Not Finding nucleotide in MSA. " +
"BAD record for MSA row_id: " + row_id + "\n" + MSA_in['alignment'][row_id] + "\n")
this_appropriate_sequence_found_in_MSA_input = False
break
elif PROT_MSA_pattern.match(MSA_in['alignment'][row_id]):
self.log(msa_invalid_msgs,
"Finding protein sequence instead of nucleotide sequences in MSA. " +
"BAD record for MSA row_id: " + row_id + "\n" + MSA_in['alignment'][row_id] + "\n")
this_appropriate_sequence_found_in_MSA_input = False
break
else:
raise ValueError("Incorrectly formatted call of _check_MSA_sequence_type_correct() method")
# return sequence type check logical
#
return (this_appropriate_sequence_found_in_MSA_input, msa_invalid_msgs)
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
self.shockURL = config['shock-url']
self.handleURL = config['handle-service-url']
self.serviceWizardURL = config['service-wizard-url']
# self.callbackURL = os.environ['SDK_CALLBACK_URL'] if os.environ['SDK_CALLBACK_URL'] != None else 'https://kbase.us/services/njs_wrapper'
self.callbackURL = os.environ.get('SDK_CALLBACK_URL')
if self.callbackURL == None:
raise ValueError("SDK_CALLBACK_URL not set in environment")
self.scratch = os.path.abspath(config['scratch'])
if self.scratch == None:
self.scratch = os.path.join('/kb', 'module', 'local_scratch')
if not os.path.exists(self.scratch):
os.makedirs(self.scratch)
# set i/o dirs
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
self.input_dir = os.path.join(self.scratch, 'input.' + str(timestamp))
self.output_dir = os.path.join(self.scratch, 'output.' + str(timestamp))
if not os.path.exists(self.input_dir):
os.makedirs(self.input_dir)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
#END_CONSTRUCTOR
pass
def HMMER_MSA_Search(self, ctx, params):
"""
Method for HMMER search of an MSA against many sequences
**
** overloading as follows:
** input_msa_ref: MSA
** input_many_ref: SequenceSet, FeatureSet, Genome, GenomeSet
** output_name: SequenceSet (if input_many is SequenceSet), (else) FeatureSet
:param params: instance of type "HMMER_Params" (HMMER Input Params)
-> structure: parameter "workspace_name" of type "workspace_name"
(** The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_many_ref" of type "data_obj_ref", parameter
"input_msa_ref" of type "data_obj_ref", parameter
"output_filtered_name" of type "data_obj_name", parameter
"e_value" of Double, parameter "bitscore" of Double, parameter
"overlap_perc" of Double, parameter "maxaccepts" of Double
:returns: instance of type "HMMER_Output" (HMMER Output) ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN HMMER_MSA_Search
console = []
invalid_msgs = []
msa_invalid_msgs = []
search_tool_name = 'HMMER_MSA_prot'
self.log(console, 'Running ' + search_tool_name + '_Search with params=')
self.log(console, "\n" + pformat(params))
report = ''
# report = 'Running '+search_tool_name+'_Search with params='
# report += "\n"+pformat(params)
#appropriate_sequence_found_in_one_input = False
appropriate_sequence_found_in_MSA_input = False
appropriate_sequence_found_in_many_input = False
genome_id_feature_id_delim = '.f:'
# set hmmer_dir
hmmer_dir = os.path.join(self.output_dir, 'hmmer_run')
if not os.path.exists(hmmer_dir):
os.makedirs(hmmer_dir)
#### do some basic checks
#
if 'workspace_name' not in params:
raise ValueError('workspace_name parameter is required')
# if 'input_one_ref' not in params:
# raise ValueError('input_one_ref parameter is required')
if 'input_msa_ref' not in params:
raise ValueError('input_msa_ref parameter is required')
if 'input_many_ref' not in params:
raise ValueError('input_many_ref parameter is required')
if 'output_filtered_name' not in params:
raise ValueError('output_filtered_name parameter is required')
# set local names
# input_one_ref = params['input_one_ref']
input_msa_ref = params['input_msa_ref']
input_many_ref = params['input_many_ref']
#### Get the input_msa object
##
# if input_one_feature_id == None:
# self.log(invalid_msgs,"input_one_feature_id was not obtained from Query Object: "+input_one_name)
# master_row_idx = 0
try:
ws = workspaceService(self.workspaceURL, token=ctx['token'])
#objects = ws.get_objects([{'ref': input_msa_ref}])
objects = ws.get_objects2({'objects': [{'ref': input_msa_ref}]})['data']
input_msa_data = objects[0]['data']
info = objects[0]['info']
input_msa_name = str(info[1])
msa_type_name = info[2].split('.')[1].split('-')[0]
except Exception as e:
raise ValueError('Unable to fetch input_msa_name object from workspace: ' + str(e))
#to get the full stack trace: traceback.format_exc()
if msa_type_name != 'MSA':
raise ValueError('Cannot yet handle input_msa type of: ' + msa_type_name)
else:
self.log(console, "\n\nPROCESSING MSA " + input_msa_name + "\n") # DEBUG
MSA_in = input_msa_data
row_order = []
default_row_labels = dict()
if 'row_order' in MSA_in.keys():
row_order = MSA_in['row_order']
else:
row_order = sorted(MSA_in['alignment'].keys())
if 'default_row_labels' in MSA_in.keys():
default_row_labels = MSA_in['default_row_labels']
else:
for row_id in row_order:
default_row_labels[row_id] = row_id
# determine row index of query sequence
# for row_id in row_order:
# master_row_idx += 1
# if row_id == input_one_feature_id:
# break
# if master_row_idx == 0:
# self.log(invalid_msgs,"Failed to find query id "+input_one_feature_id+" from Query Object "+input_one_name+" within MSA: "+input_msa_name)
# | |
from __future__ import division
import json
from datetime import datetime
import os
import data_tools as dt
def annotate(input_json, output_csv, data_path, overwrite=False):
"""
Annotate for RetinaNet https://github.com/fizyr/keras-retinanet
Only annotates the object classes (not areas for segmentation)
:param input_json: json input file path
:param output_csv: csv annotation file path
:param data_path: path of data
:param overwrite:
:return:
"""
fd_json = open(input_json, 'r')
y = json.load(fd_json)
fd_json.close()
start_time = datetime.now()
if os.path.isfile(output_csv) and not overwrite:
print('File ' + output_csv + ' already exists. Not written.')
return
fd_out = open(output_csv, 'w')
object_classes = []
for img_id in xrange(len(y)):
name = y[img_id][u'name']
is_empty = True
for label in y[img_id][u'labels']:
cat = label[u'category']
if cat not in object_classes:
if u'box2d' in label.keys():
object_classes.append(cat)
else:
continue
b2d = label[u'box2d']
x1, y1, x2, y2 = b2d['x1'], b2d['y1'], b2d['x2'], b2d['y2']
x_min, y_min = min(x1, x2), min(y1, y2)
x_max, y_max = max(x1, x2), max(y1, y2)
if int(x_max) <= int(x_min) or int(y_max) <= int(y_min):
continue
row = ('%s,%d,%d,%d,%d,%s\n' % (data_path + name, x_min, y_min, x_max, y_max, cat))
fd_out.write(row)
is_empty = False
if is_empty:
row = ('%s,,,,,\n' % (data_path + name))
fd_out.write(row)
fd_out.close()
print('File successfully written', output_csv, 'in (s)', str(datetime.now() - start_time))
return object_classes
def annotate_tiny(input_json, output_path, data_path, overwrite=False):
"""
Annotate a subset (10k) of the bdd100k from the validation data
:param input_json:
:param output_path:
:param data_path:
:param overwrite:
:return:
"""
if os.path.isfile(output_path + 'tiny_train_annot.csv') and not overwrite:
print('File ' + output_path + 'tiny_train_annot.csv' + ' already exists. Not written.')
else:
annotate_tiny_range(input_json, output_path+'tiny_train_annot.csv', data_path, xrange(0, 70), overwrite)
if os.path.isfile(output_path + 'tiny_val_annot.csv') and not overwrite:
print('File ' + output_path + 'tiny_val_annot.csv' + ' already exists. Not written.')
else:
annotate_tiny_range(input_json, output_path+'tiny_val_annot.csv', data_path, xrange(70, 80), overwrite)
return output_path+'tiny_train_annot.csv', output_path+'tiny_val_annot.csv'
def annotate_tiny_range(input_json, output_csv, data_path, range, overwrite=False):
fd_json = open(input_json, 'r')
y = json.load(fd_json)
fd_json.close()
if os.path.isfile(output_csv) and not overwrite:
print('File ' + output_csv + ' already exists. Not written.')
return
fd_out = open(output_csv, 'w')
object_classes = []
for img_id in range:
name = y[img_id][u'name']
is_empty = True
for label in y[img_id][u'labels']:
cat = label[u'category']
if cat not in object_classes:
if u'box2d' in label.keys():
object_classes.append(cat)
else:
continue
b2d = label[u'box2d']
x1, y1, x2, y2 = b2d['x1'], b2d['y1'], b2d['x2'], b2d['y2']
x_min, y_min = min(x1, x2), min(y1, y2)
x_max, y_max = max(x1, x2), max(y1, y2)
if int(x_max) <= int(x_min) or int(y_max) <= int(y_min):
continue
row = ('%s,%d,%d,%d,%d,%s\n' % (data_path + name, x_min, y_min, x_max, y_max, cat))
fd_out.write(row)
is_empty = False
if is_empty:
row = ('%s,,,,,\n' % (data_path + name))
fd_out.write(row)
fd_out.close()
return object_classes
def class_mapping(classes=None, input_json=None, output_csv='class_mapping.csv', overwrite=False):
"""
Writes the class mapping file for objects categories
:param classes: list of classes or categories names
:param input_json: file path for json labels
:param output_csv: file path for class mapping file
:param overwrite:
:return: path of output file
"""
if os.path.isfile(output_csv) and not overwrite:
print('File ' + output_csv + ' already exists. Not written.')
return output_csv
if input_json:
with open(input_json, 'r') as fd_json:
y = json.load(fd_json)
classes = []
for img_id in xrange(len(y)):
for label in y[img_id][u'labels']:
cat = label[u'category']
if cat not in classes:
if u'box2d' in label.keys():
classes.append(cat)
else:
continue
else:
if not classes:
print('No input for class mapping')
return
with open(output_csv, 'w') as cl_map:
for k in xrange(len(classes)):
cl_map.write(classes[k] + ',' + str(k) + '\n')
print('Class mapping successfully written')
return output_csv
def get_label_names(class_map_file):
labels_to_names = {}
with open(class_map_file, 'r') as map_file:
for l in map_file.readlines():
labels_to_names[int(l.split(',')[1])] = l.split(',')[0]
print(labels_to_names)
return labels_to_names
def avg_box_size(annotation_file):
x = 0
y = 0
n = 0
with open(annotation_file, 'r') as f:
for l in f.readlines():
split = l.split(',')
assert len(split) == 6
x_min, y_min, x_max, y_max = int(split[1]), int(split[2]), int(split[3]), int(split[4])
x += (x_max - x_min)
y += (y_max - y_min)
n += 1
x_avg = x / n
y_avg = y / n
return x_avg, y_avg, n
def do_boxes_collide(box1, box2):
x_min1, y_min1, x_max1, y_max1 = box1
x_min2, y_min2, x_max2, y_max2 = box2
if x_max2 <= x_min1:
return False
elif x_min2 >= x_max1:
return False
else:
if y_max2 <= y_min1:
return False
elif y_min2 >= y_max1:
return False
else:
return True
def get_box(line):
split = line.split(',')
return int(split[1]), int(split[2]), int(split[3]), int(split[4])
def annotate_objects(annotation_file, output):
# Not optimizing the number of boxes
start_time = datetime.now()
with open(annotation_file, 'r') as input_annot:
with open(output, 'w') as out:
line = input_annot.readline()
while line:
curr_id = str(line.split(',')[0])
boxes = []
while line and str(line.split(',')[0]) == curr_id:
box1 = get_box(line)
collision = False
for box2 in boxes:
if do_boxes_collide(box1, box2):
collision = True
continue
if not collision:
boxes.append(box1)
out.write(line)
line = input_annot.readline()
print('File successfully written', output, 'in (s)', str(datetime.now() - start_time))
def adjust_ratio(box, ratio):
x_min, y_min, x_max, y_max = box
x = x_max - x_min
y = y_max - y_min
target_ratio = ratio[0] / ratio[1]
img_ratio = x / y
if target_ratio > img_ratio:
target_width = y * target_ratio
x_min -= (target_width - x)//2
x_max += (target_width - x)//2 + (target_width - x) % 2
elif target_ratio < img_ratio:
target_height = x / target_ratio
y_min -= (target_height - y) // 2
y_max += (target_height - y) // 2 + (target_height - y) % 2
return x_min, y_min, x_max, y_max
def adjust_size(box, format):
# Adds margin if formatted image is too small
x_min, y_min, x_max, y_max = box
if (x_max - x_min) < format[0]:
x = x_max - x_min
y = y_max - y_min
x_min -= (format[0] - x) // 2
x_max += (format[0] - x) // 2 + (format[0] - x) % 2
y_min -= (format[1] - y) // 2
y_max += (format[1] - y) // 2 + (format[1] - y) % 2
assert (x_max - x_min) == format[0] and (y_max - y_min) == format[1]
return x_min, y_min, x_max, y_max
def adjust_position(box, image_size):
x_min, y_min, x_max, y_max = box
x = x_max - x_min
y = y_max - y_min
if x > image_size[0] or y > image_size[1]:
return None
if x_min < 0:
x_max -= x_min
x_min -= x_min
elif x_max > image_size[0]:
x_min -= x_max - image_size[0]
x_max -= x_max - image_size[0]
elif y_min < 0:
y_max -= y_min
y_min -= y_min
elif y_max > image_size[1]:
y_min -= y_max - image_size[1]
y_max -= y_max - image_size[1]
return x_min, y_min, x_max, y_max
def build_dataset(obj_annot_file, output_path, labels_file, make_attributes_file=True):
"""
Builds classification dataset from object detection dataset.
For BDD100k: 1044675 training images from training folder
150738 val images from val folder
Not optimized to maximise number of images.
Not time-optimized
Minimum size of object: 44 pixels horizontally or vertically from original image
images
:param obj_annot_file: annotation file of format https://github.com/fizyr/keras-retinanet
:param output_path: where images will be saved
:param labels_file: names of files for classification ground truth
:param make_attributes_file:
:return:
"""
min_size = 44 # set image size = 64x64, max margin = 20
format = (64, 64,3)
img_size = (1280, 720)
cnt = 0
print('Starting extraction...')
start_time = datetime.now()
obj_annot = open(obj_annot_file, 'r')
labels_fd = open(labels_file, 'w')
if make_attributes_file:
attributes_fd = open(labels_file[:-4] + '_attributes.csv', 'w')
orig_boxes = []
line = obj_annot.readline()
curr = line.split(',')[0]
boxes = []
classes = []
while line:
if line.split(',')[0] != curr:
_, file_names = dt.crop_resize(curr, boxes, resize_format=format, output_path=output_path)
for i in xrange(len(classes)):
labels_fd.write(file_names[i] + ',' + classes[i])
if make_attributes_file:
attributes_fd.write(file_names[i] + ',' + str(orig_boxes[i]) + ',' + classes[i])
# next image
boxes = []
orig_boxes = []
classes = []
curr = line.split(',')[0]
box = get_box(line)
if box[2] - box[1] < min_size:
line = obj_annot.readline()
continue
else:
orig_boxes.append(box)
box = adjust_ratio(box, format[:2])
box = adjust_size(box, format[:2])
box = adjust_position(box, img_size)
if not box:
line = obj_annot.readline()
continue
boxes.append(box)
classes.append(line.split(',')[-1])
cnt += 1
line = obj_annot.readline()
_, file_names = dt.crop_resize(curr, boxes, resize_format=format, output_path=output_path)
assert len(classes) == len(file_names)
for i in xrange(len(classes)):
labels_fd.writelines('%s,%s' % (file_names[i], classes[i]))
obj_annot.close()
labels_fd.close()
if make_attributes_file:
print('Written ' + labels_file[:-4] + '_attributes.csv')
attributes_fd.close()
print(str(cnt) + ' images successfully generated in ' + output_path + ' in '
+ str(datetime.now() - start_time) + '(s)')
def get_ids_labels(labels_file, class_map_file):
name_to_label = {}
with open(class_map_file, 'r') as map_file:
for l in map_file.readlines():
name_to_label[str(l.split(',')[0])] = | |
<gh_stars>1-10
# Author: <NAME> (<EMAIL>)
import argparse
import dynet as dy
import numpy as np
import os
import pickle
import random
import sys
import time
from collections import Counter
from copy import deepcopy
########################### useful generic operations ##########################
def get_boundaries(bio):
"""
Extracts an ordered list of boundaries. BIO label sequences can be either
- Raw BIO: B I I O => {(0, 2, None)}
- Labeled BIO: B-PER I-PER B-LOC O => {(0, 1, "PER"), (2, 2, "LOC")}
"""
boundaries= []
i = 0
while i < len(bio):
if bio[i][0] == 'O': i += 1
else:
s = i
entity = bio[s][2:] if len(bio[s]) > 2 else None
i += 1
while i < len(bio) and bio[i][0] == 'I':
if len(bio[i]) > 2 and bio[i][2:] != entity: break
i += 1
boundaries.append((s, i - 1, entity))
return boundaries
def label_bio(bio, ents):
labeled_bio = []
i = 0
counter = 0
while i < len(bio):
if bio[i][0] == 'O':
labeled_bio.append('O')
i += 1
else:
labeled_bio.append(bio[i][0] + '-' + ents[counter])
i += 1
while i < len(bio) and bio[i][0] == 'I':
labeled_bio.append(bio[i][0] + '-' + ents[counter])
i += 1
counter += 1
return labeled_bio
def score_crf(start_b, T, end_b, score_vecs, inds):
total = start_b[inds[0]] + score_vecs[0][inds[0]]
for i in xrange(1, len(score_vecs)):
total += T[inds[i-1]][inds[i]] + score_vecs[i][inds[i]]
total += end_b[inds[-1]]
return total
def viterbi(start_b, T, end_b, score_vecs, valid):
num_labels = len(valid)
bp = [[None for _ in xrange(num_labels)] for _ in xrange(len(score_vecs))]
pi = [[None for _ in xrange(num_labels)] for _ in xrange(len(score_vecs))]
for y in xrange(num_labels): pi[0][y] = score_vecs[0][y] + start_b[y]
for i in xrange(1, len(score_vecs)):
for y in xrange(num_labels):
score_best = float("-inf")
y_prev_best = None
valid_previous_labels = valid[y]
for y_prev in valid_previous_labels:
score = pi[i-1][y_prev] + T[y_prev][y] + score_vecs[i][y]
if score > score_best:
y_prev_best = y_prev
score_best = score
pi[i][y] = score_best
bp[i][y] = y_prev_best
best_y = np.argmax([pi[-1][y] + end_b[y] for y in xrange(num_labels)])
pred_rev = [best_y]
for i in reversed(xrange(1, len(score_vecs))):
best_y = bp[i][best_y]
pred_rev.append(best_y)
return pred_rev[::-1]
def drop(x, x_count):
"""Drops x with higher probabiliy if x is less frequent."""
return random.random() > x_count[x] / (x_count[x] + 0.25)
def bilstm_single(inputs, lstm1, lstm2):
"""Computes a single embedding of input expressions using 2 LTSMs."""
f = lstm1.initial_state()
b = lstm2.initial_state()
for input_f, input_b in zip(inputs, reversed(inputs)):
f = f.add_input(input_f)
b = b.add_input(input_b)
return dy.concatenate([f.output(), b.output()])
def bilstm(inputs, lstm1, lstm2):
"""Computes embeddings of input expressions using 2 LTSMs."""
f = lstm1.initial_state()
b = lstm2.initial_state()
outs_f = []
outs_b = []
for input_f, input_b in zip(inputs, reversed(inputs)):
f = f.add_input(input_f)
b = b.add_input(input_b)
outs_f.append(f.output())
outs_b.append(b.output())
outs = []
for i, out_b in enumerate(reversed(outs_b)):
outs.append(dy.concatenate([outs_f[i], out_b]))
return outs
def stringfy(words, start, end):
"""
Converts a sentence with an entity marked by start and end into a single
string.
"""
string = ""
delim = "__"
start_marker = "{{"
end_marker = "}}"
for i, word in enumerate(words):
if i == start: word = start_marker + word
if i == end: word = word + end_marker
if i < len(words) - 1: word = word + delim
string += word
return string
################################# data #########################################
class Seq(object):
def __init__(self, w_seq, l_seq=None):
self.w_seq = w_seq # word sequence
self.l_seq = l_seq # label sequence
self.bio_pred = []
self.ent_pred = []
def evaluate(self, tp, fp, fn, all_ent="<all>"):
gold_boundaries = get_boundaries(self.l_seq)
pred_boundaries_untyped = get_boundaries(self.bio_pred)
pred_boundaries = []
for i in xrange(len(pred_boundaries_untyped)):
s, t, _ = pred_boundaries_untyped[i]
entity = self.ent_pred[i]
pred_boundaries.append((s, t, entity))
gold_boundaries = set(gold_boundaries)
pred_boundaries = set(pred_boundaries)
for (s, t, entity) in gold_boundaries:
if (s, t, entity) in pred_boundaries:
tp[entity] += 1
tp[all_ent] += 1
else:
fn[entity] += 1
fn[all_ent] += 1
for (s, t, entity) in pred_boundaries:
if not (s, t, entity) in gold_boundaries:
fp[entity] += 1
fp[all_ent] += 1
class SeqData(object):
def __init__(self, data_path):
self.seqs = []
self.w_enc = {} # "dog" -> 35887
self.c_enc = {} # "d" -> 20
self.l_enc = {} # "B-ORG" -> 7
self.e_enc = {} # "PER" -> 2
self.__ALL = "<all>" # Denotes all entity types.
self.w_count = Counter()
self.c_count = Counter()
with open(data_path) as infile:
w_seq = []
l_seq = []
for line in infile:
toks = line.split()
if toks:
w = toks[0]
l = toks[1]
self.w_count[w] += 1
if not w in self.w_enc: self.w_enc[w] = len(self.w_enc)
for c in w:
self.c_count[c] += 1
if not c in self.c_enc: self.c_enc[c] = len(self.c_enc)
if not l in self.l_enc: self.l_enc[l] = len(self.l_enc)
w_seq.append(w)
l_seq.append(l)
else:
if w_seq:
self.seqs.append(Seq(w_seq, l_seq))
w_seq = []
l_seq = []
if w_seq:
self.seqs.append(Seq(w_seq, l_seq))
for l in self.l_enc:
if len(l) > 1 and not l[2:] in self.e_enc:
self.e_enc[l[2:]] = len(self.e_enc)
def evaluate(self):
keys = self.e_enc.keys() + [self.__ALL]
tp = {e: 0 for e in keys}
fp = {e: 0 for e in keys}
fn = {e: 0 for e in keys}
for seq in self.seqs:
seq.evaluate(tp, fp, fn, self.__ALL)
self.p = {}
self.r = {}
for e in keys:
pZ = tp[e] + fp[e]
rZ = tp[e] + fn[e]
self.p[e] = 100. * tp[e] / pZ if pZ > 0. else 0.
self.r[e] = 100. * tp[e] / rZ if rZ > 0. else 0.
return self.f1(self.__ALL)
def f1(self, cat):
f1Z = self.p[cat] + self.r[cat]
f1 = 2. * self.p[cat] * self.r[cat] / f1Z if f1Z > 0. else 0.0
return f1
def write(self, path):
with open(path, 'w') as outf:
for seq in self.seqs:
pred = label_bio(seq.bio_pred, seq.ent_pred)
for i in xrange(len(seq.w_seq)):
outf.write(seq.w_seq[i] + " " + seq.l_seq[i] + " " + pred[i]
+ "\n")
outf.write("\n")
################################## model #######################################
class Mention2Vec(object):
def __init__(self):
self.__is_training = False
self.__UNK = "<?>"
self.__BIO_ENC = {'B': 0, 'I': 1, 'O': 2}
self.__BIO_DEC = {self.__BIO_ENC[x]: x for x in self.__BIO_ENC}
self.crep_cache = {}
self.entemb_path = None
def set_entemb(self, entemb_path):
self.entemb_path = entemb_path
open(self.entemb_path, 'w').close() # Clear before appending
def config(self, wdim, cdim, ldim, model_path, wemb_path, epochs,
loss, dropout_rate, learning_rate):
self.wdim = wdim
self.cdim = cdim
self.ldim = ldim
self.model_path = model_path
self.wemb_path = wemb_path
self.epochs = epochs
self.loss = loss
self.dropout_rate = dropout_rate
self.learning_rate = learning_rate
def train(self, data, dev=None):
self.m = dy.ParameterCollection()
self.__init_params(data)
self.__enable_lstm_dropout()
self.__is_training = True
if os.path.isfile(self.model_path): os.remove(self.model_path)
if not os.path.exists(self.model_path): os.makedirs(self.model_path)
trainer = dy.AdamTrainer(self.m, self.learning_rate)
perf_best = 0.
exists = False
for epoch in xrange(self.epochs):
inds = [i for i in xrange(len(data.seqs))]
random.shuffle(inds)
for i in inds:
loss = self.get_loss(data.seqs[i])
loss.backward()
trainer.update()
if dev:
self.__is_training = False
self.__disable_lstm_dropout()
perf, _ = self.get_perf(dev)
print "Epoch {0:d} F1: {1:.2f}".format(epoch + 1, perf),
if perf > perf_best:
perf_best = perf
print 'new best - saving model',
self.save()
exists = True
self.__is_training = True
self.__enable_lstm_dropout()
print
else:
self.save()
if exists:
m = Mention2Vec()
m.load_and_populate(self.model_path)
perf, _ = m.get_perf(dev)
print "Best dev F1: {0:.2f}".format(perf)
def save(self):
self.m.save(os.path.join(self.model_path, "model"))
with open(os.path.join(self.model_path, "info.pickle"), 'w') as outf:
pickle.dump((self.w_enc, self.wdim, self.c_enc, self.cdim,
self.ldim, self.e_dec, self.loss), outf)
def load_and_populate(self, model_path):
self.m = dy.ParameterCollection()
self.model_path = model_path
with open(os.path.join(self.model_path, "info.pickle")) as inf:
self.w_enc, self.wdim, self.c_enc, self.cdim, self.ldim, \
self.e_dec, self.loss = pickle.load(inf)
self.wlook = self.m.add_lookup_parameters((len(self.w_enc), self.wdim))
self.clook = self.m.add_lookup_parameters((len(self.c_enc), self.cdim))
self.__init_others()
self.m.populate(os.path.join(self.model_path, "model"))
self.__disable_lstm_dropout()
def get_perf(self, test):
assert not self.__is_training
self.crep_cache.clear()
for w in test.w_enc: self.crep_cache[w] = self.get_crep(w).vec_value()
start_time = time.time()
num_words = 0
for i in xrange(len(test.seqs)):
self.get_loss(test.seqs[i])
num_words += len(test.seqs[i].w_seq)
return test.evaluate(), int(num_words / (time.time() - start_time))
def get_crep(self, w):
"""Character-based representation of word w"""
if not self.__is_training and w in self.crep_cache:
crep = dy.vecInput(2 * self.cdim)
crep.set(self.crep_cache[w])
return crep
inputs = []
for c in w:
if self.__is_training and drop(c, self.c_count): c = self.__UNK
if not c in self.c_enc: c = self.__UNK
inputs.append(dy.lookup(self.clook, self.c_enc[c]))
return bilstm_single(inputs, self.clstm1, self.clstm2)
def get_wemb(self, w):
"""Word embedding of word w"""
if self.__is_training and drop(w, self.w_count): w = self.__UNK
if not w in self.w_enc: w = self.__UNK
return dy.lookup(self.wlook, self.w_enc[w])
def get_loss_boundary(self, inputs, seq):
"""
Computes boundary loss for this sequence based on input vectors.
"""
W_bio1 = dy.parameter(self.l2bio1)
W_bio1b = dy.parameter(self.l2bio1b)
W_bio2 = dy.parameter(self.l2bio2)
W_bio2b = dy.parameter(self.l2bio2b)
def ff(h): return W_bio2 * dy.tanh(W_bio1 * h + W_bio1b) + W_bio2b
gs = [ff(h) for h in | |
<gh_stars>1-10
# =============================================================================
# Federal University of Rio Grande do Sul (UFRGS)
# Connectionist Artificial Intelligence Laboratory (LIAC)
# <NAME> - <EMAIL>
# =============================================================================
# Copyright (c) 2011 <NAME>, renato.ppontes at gmail dot com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
import chess
from .pieces import *
__all__ = ['Board']
PIECES = {
'p': Pawn,
'r': Rook,
'b': Bishop,
'q': Queen,
'n': Knight,
# 'k': King, # no king for now
}
class Board(object):
'''Board class is the structure responsible to store the game pieces, and
to handle the game rules and the game flow.
The board cells are represented here in the form of a matrix (a list of
lists, precisely).
'''
def __init__(self, board='.'*64, win_methods=[], tie_methods=[],
allow_enpassant=False, allow_castle=False,
allow_rook_promotion=False, allow_queen_promotion=False,
allow_knight_promotion=False, allow_bishop_promotion=False):
'''Board constructor.
The `board` parameter is a string defining the initial position of
pieces in this board. It must be a 64-characters string where the
character ``.`` represents an empty cell and the characters ``prbqnk``
represent the chess pieces. In this notation, the lowercase characters
represent the black pieces, and the uppercase characters represent the
white pieces. The first position of the string represents the ``A8``
cell of the chess board, the second position of the string is the
``A7`` cell, and so on.
The ``win_methods`` parameter is a list containing function that,
receiving a ``Board`` object, verify if one player has won the game
, returning ``chess.BLACK`` or ``chess.WHITE``, or returning
``chess.NONE``if no winning condition has been reached.
Similarly, ``tie_methods`` is a list containing function that,
receiving a ``Board`` object, verify if the game ended in a draw.
These functions return a boolean.
Both ``win_methods`` and ``tie_methods``, actually are a list with the
function names. When the board is initialized, this class search the
functions in the ``chess.models`` namespace to use them.
All other parameter are flags that turn on or off some game rules, such
as the enpassant and castle movement or the pawn promotions.
:param board: a string with the initial board configuration. Default to
empty board.
:param win_methods: a list with functions that verify the end-game
conditions. Default to empty list.
:param tie_methods: a list with function that verify the tie
conditions. Default to empty list.
:param allow_enpassant: is enpassant movement allowed in this game?
Default to false.
:param allow_castle: is castle movement allowed in this game? Default
to false.
:param allow_rook_promotion: is promotion to rook allowed in this game?
Default to false.
:param allow_queen_promotion: is promotion to queen allowed in this
game? Default to false.
:param allow_knight_promotion: is promotion to knight allowed in this
game? Default to false.
:param allow_bishop_promotion: is promotion to bishop allowed in this
game? Default to false.
'''
# End game verifiers
self.win_methods = self._get_win_methods(win_methods)
self.tie_methods = self._get_tie_methods(tie_methods)
# Game rule flags
self.allow_enpassant = allow_enpassant
self.allow_castle = allow_castle
self.allow_rook_promotion = allow_rook_promotion
self.allow_queen_promotion = allow_queen_promotion
self.allow_knight_promotion = allow_knight_promotion
self.allow_bishop_promotion = allow_bishop_promotion
# Board structures
self._cells = [[None for j in xrange(8)] for i in xrange(8)]
self.pieces = []
self.black_pieces = []
self.white_pieces = []
# State variables
# - Enpassant
self.enpassant = None
self.enpassant_piece = None
# - Castle
self.white_has_queen_castle = False
self.white_has_king_castle = False
self.black_has_queen_castle = False
self.black_has_king_castle = False
# - Check
self.black_in_check = False
self.white_in_check = False
# - Other state variables
self.who_moves = chess.WHITE
self.nocapture_moves = 0
self.bad_move = False
self.white_infractions = 0
self.black_infractions = 0
self.move_time = chess.config['max_move_time']
self.game_time = 0
self.white_time = 0
self.black_time = 0
self.winner = chess.NONE
self.draw = False
# Initializes the board
self.set_board(board)
def _get_win_methods(self, methods):
'''Receive a list of function names and returns a list of function
objects. The functions must be in the ``chess.models`` namespace,
otherwise a `ValueError` will be raised.
:param methods: a list of function names.
:return: a list of functions.
'''
r = []
for method_name in methods:
name = 'win_'+method_name
if hasattr(chess.models, name):
r.append(getattr(chess.models, name))
else:
raise ValueError('Win method "%s" not found.'%method_name)
return r
def _get_tie_methods(self, methods):
'''Receive a list of function names and returns a list of function
objects. The functions must be in the ``chess.models`` namespace,
otherwise a `ValueError` will be raised.
:param methods: a list of function names.
:return: a list of functions.
'''
r = []
for method_name in methods:
name = 'tie_'+method_name
if hasattr(chess.models, name):
r.append(getattr(chess.models, name))
else:
raise ValueError('Tie method "%s" not found.'%method_name)
return r
def _verify_win(self):
'''Verify all the win conditions by calling the functions in the
`win_methods` list. The result is recorded in the `winner` variable.
'''
for win in self.win_methods:
r = win(self)
if r != chess.NONE:
self.winner = r
return
def _verify_tie(self):
'''Verify all the draw conditions by calling the functions in the
`tie_methods` list. The result is recorded in the `draw` variable.
'''
for tie in self.tie_methods:
if tie(self):
self.draw = True
return
def __getitem__(self, pos):
'''Access the board cells
:param pos: a 2-tuple with the board row and col.
:return: the piece localized in the position or None.
'''
if not 0 <= pos[0] <= 7 or not 0 <= pos[1] <= 7:
return None
return self._cells[pos[0]][pos[1]]
def __setitem__(self, pos, value):
'''Set the piece at a given positions in the board.
:param pos: a 2-tuple with the board row and col.
:param value: a piece or None.
'''
self._cells[pos[0]][pos[1]] = value
def update(self, tick):
'''Update the game.
If the player don't perform its move before a maximum amount of time
(``chess.config["max_move_time"]``), the player receive an infraction.
:param tick: the time elapsed since the last update.
:return: a boolean telling if the player received an infraction by
maximum time.
'''
self.move_time -= tick
self.game_time += tick
has_infractions = False
if self.who_moves == chess.WHITE:
self.white_time += tick
else:
self.black_time += tick
if self.move_time <= 0:
self.move_time = chess.config['max_move_time']
if self.who_moves == chess.WHITE:
self.white_infractions += 1
if self.white_infractions >= chess.config['max_infractions']:
self.winner = chess.BLACK
else:
self.black_infractions += 1
if self.black_infractions >= chess.config['max_infractions']:
self.winner = chess.WHITE
has_infractions = True
return has_infractions
def move(self, from_pos, to_pos):
'''Perform a move verifying the validity of the move.
If the player tries to perform an invalid movement, the ``bad_move``
flag is registered on the board state and an infraction is counted.
:param from_pos: the current piece position.
:param to_pos: the next position of the piece.
'''
from_piece = self[from_pos]
to_piece = self[to_pos]
# Change bad move state
self.bad_move = False
# Verify if it is a valid move
if not self.is_valid_move(from_pos, to_pos):
self.bad_move = True
if self.who_moves == chess.BLACK:
self.black_infractions += 1
else:
self.white_infractions += 1
self.move_time = chess.config['max_move_time']
self._verify_win()
return
# Verify if it is an enpassant capture
if self.enpassant and from_piece.type == chess.PAWN:
if to_pos[0] == self.enpassant[0] and to_pos[1] == self.enpassant[1]:
to_piece = self.enpassant_piece
# Verify if it is a capture (to remove from | |
as angles relative to
the field center, while the offsets are returned as lengths relative
to the nominal fiber center.
Parameters
----------
angle_x : astropy.units.Quantity
Angular separation from the field center along x.
angle_y : astropy.units.Quantity
Angular separation from the field center along y.
wavelength : astropy.units.Quantity
Wavelength where the blur should be calculated.
Returns
-------
tuple
Tuple (dx, dy) of astropy quantities giving the spot centroid
offset components at this wavelength and position in the focal
plane. Offsets are given in length units, e.g., microns.
"""
return self._offset_function(angle_x, angle_y, wavelength)
def get_focal_plane_optics(self, focal_x, focal_y, wlen_grid):
"""Calculate the optical parameters at a set of focal-plane positions.
Uses :meth:`get_centroid_offset`, :meth:`get_blur_rms`, and
:meth:`field_radius_to_angle` to calculate the optics at each focal
plane location.
This method does not make any assumptions about how the x and y
axes are defined, as long as (0, 0) is the field center. However
radial symmetry is broken by the (dx, dy) offsets calculated by
:meth:`get_centroid_offset`.
Note that units are required for the input arrays and included with
the returned arrays.
Parameters
----------
focal_x : :class:`astropy.units.Quantity`
1D array of X coordinates in the focal plane relative to the
boresight, with length units.
focal_y : :class:`astropy.units.Quantity`
1D array of Y coordinates in the focal plane relative to the
boresight, with length units.
wlen_grid : :class:`astropy.units.Quantity`
1D array of wavelengths where parameters should be tabulated,
with length units.
Returns
-------
tuple
Tuple of arrays scale, blur, offset with shapes (N,2), (N,M) and
(N,M,2) where N is the size of the 1D input (x,y) arrays, M is
the size of the input wavelength grid, and axes of length 2
correspond to radial and azimuthal axes (not the input x,y!).
All output arrays have units.
"""
# Check for valid units on the input arrays.
try:
focal_x_mm = focal_x.to(u.mm).value
focal_y_mm = focal_y.to(u.mm).value
wlen_grid_ang = wlen_grid.to(u.Angstrom).value
except astropy.units.UnitConversionError:
raise ValueError('Input arrays have invalid units.')
except AttributeError:
raise ValueError('Input arrays are missing required units.')
# Check for expected input array shapes.
if len(focal_x_mm.shape) != 1 or len(wlen_grid_ang.shape) != 1:
raise ValueError('Input arrays must be 1D.')
if focal_x_mm.shape != focal_y_mm.shape:
raise ValueError('Input (x,y) arrays have different shapes.')
# Allocate output arrays.
n_xy = len(focal_x_mm)
n_wlen = len(wlen_grid_ang)
scale = np.empty((n_xy, 2))
blur = np.empty((n_xy, n_wlen))
offset = np.empty((n_xy, n_wlen, 2))
# Convert x, y offsets in length units to field angles.
focal_r = np.sqrt(focal_x**2+focal_y**2)
angle_r = self.field_radius_to_angle(focal_r)
angle_x = np.zeros(focal_x.shape) * angle_r.unit
angle_y = np.zeros(focal_y.shape) * angle_r.unit
positive_radius = focal_r>0
angle_x[positive_radius] = (
angle_r[positive_radius] / focal_r[positive_radius]
) * focal_x[positive_radius]
angle_y[positive_radius] = (
angle_r[positive_radius] / focal_r[positive_radius]
) * focal_y[positive_radius]
# Calculate the radial and azimuthal plate scales at each location.
scale[:, 0] = self.radial_scale(focal_r).to(u.um / u.arcsec).value
scale[:, 1] = self.azimuthal_scale(focal_r).to(u.um / u.arcsec).value
# Calculate the transformations between polar and Cartesian coordinates.
phi = np.arctan2(focal_y_mm, focal_x_mm)
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
# Lookup the instrument blur and centroid offset at each
# wavelength for this focal-plane position.
for i, wlen in enumerate(wlen_grid):
# Lookup the RMS blurs in focal-plane microns.
blur[:, i] = self.get_blur_rms(wlen, angle_r).to(u.um).value
# Lookup the radial centroid offsets in focal-plane microns.
dx, dy = self.get_centroid_offset(angle_x, angle_y, wlen)
dx_um = dx.to(u.um).value
dy_um = dy.to(u.um).value
# Rotate to polar coordinates.
offset[:, i, 0] = cos_phi * dx_um + sin_phi * dy_um
offset[:, i, 1] = -sin_phi * dx_um + cos_phi * dy_um
return scale * (u.um / u.arcsec), blur * u.um, offset * u.um
def plot_field_distortion(self):
"""Plot focal plane distortions over the field of view.
Requires that the matplotlib package is installed.
"""
import matplotlib.pyplot as plt
# Tabulate the field radius - angle mapping.
radius = np.linspace(0., self.field_radius.to(u.mm).value, 500) * u.mm
angle = self.field_radius_to_angle(radius).to(u.deg)
# Calculate the r**2 weighted mean inverse radial scale by minimizing
# angle - mean_inv_radial_scale * radius with respect to
# mean_inv_radial_scale.
mean_inv_radial_scale = (
np.sum(radius ** 3 * angle) / np.sum(radius ** 4))
mean_radial_scale = (1. / mean_inv_radial_scale).to(u.um / u.arcsec)
# Calculate the angular distortion relative to the mean radial scale.
distortion = (angle - radius * mean_inv_radial_scale).to(u.arcsec)
# Eliminate round off error so that the zero distortion case is
# correctly recovered.
distortion = np.round(distortion, decimals=5)
# Calculate the fiber area as a function of radius.
radial_size = (
0.5 * self.fiber_diameter / self.radial_scale(radius))
azimuthal_size = (
0.5 * self.fiber_diameter / self.azimuthal_scale(radius))
fiber_area = (np.pi * radial_size * azimuthal_size).to(u.arcsec ** 2)
# Calculate the r**2 weighted mean fiber area.
mean_fiber_area = np.sum(radius ** 2 * fiber_area) / np.sum(radius ** 2)
# Calculate the dimensionless fiber area ratio.
fiber_area_ratio = (fiber_area / mean_fiber_area).si.value
# Calculate the dimensionless ratio of azimuthal / radial plate scales
# which is the ratio of the on-sky radial / azimuthal extends.
shape_ratio = (self.azimuthal_scale(radius) /
self.radial_scale(radius)).si.value
# Make the plots.
fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(8, 8))
ax1.plot(angle, distortion, 'b-', lw=2)
ax1.set_ylabel('Field angle distortion [arcsec]', fontsize='large')
ax1.set_xlim(0., self.field_angle.to(u.deg).value)
ax1.grid()
ax1.axhline(0., color='r')
xy = 0.5 * self.field_angle.to(u.deg).value, 0.
label = '{0:.1f}'.format(mean_radial_scale)
ax1.annotate(label, xy, xy, color='r', horizontalalignment='center',
verticalalignment='bottom', fontsize='large')
ax2.plot(angle, fiber_area_ratio, 'b', lw=2, label='Area ratio')
ax2.plot(angle, shape_ratio, 'k', lw=2, ls='--',
label='Radial/azimuthal')
ax2.set_ylabel('Fiber sky area and shape ratios', fontsize='large')
ax2.grid()
ax2.legend(loc='upper right')
ax2.axhline(1., color='r')
xy = 0.5 * self.field_angle.to(u.deg).value, 1.
label = '{0:.3f}'.format(mean_fiber_area)
ax2.annotate(label, xy, xy, color='r', horizontalalignment='center',
verticalalignment='bottom', fontsize='large')
ax2.set_xlabel('Field angle [deg]', fontsize='large')
plt.subplots_adjust(
left=0.10, right=0.98, bottom=0.07, top=0.97, hspace=0.05)
def plot(self, flux=1e-17 * u.erg / (u.cm**2 * u.s * u.Angstrom),
exposure_time=1000 * u.s, cmap='nipy_spectral'):
"""Plot a summary of this instrument's model.
Requires that the matplotlib package is installed.
Parameters
----------
flux : astropy.units.Quantity
Constant source flux to use for displaying the instrument response.
exposure_time : astropy.units.Quantity
Exposure time to use for displaying the instrument response.
cmap : str or matplotlib.colors.Colormap
Matplotlib colormap name or instance to use for displaying the
instrument response. Colors are selected for each camera
according to its central wavelength, so a spectral color map
will give reasonably intuitive results.
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(8, 8))
ax1_rhs = ax1.twinx()
ax2_rhs = ax2.twinx()
cmap = cm.get_cmap(cmap)
wave = self._wavelength.value
wave_unit = self._wavelength.unit
dwave = np.gradient(wave)
if self.fiber_acceptance_dict:
for source_type in self.fiber_acceptance_dict:
# Plot fiber acceptance fractions without labels.
ax1.plot(wave, self.fiber_acceptance_dict[source_type], 'k--')
for camera in self.cameras:
cwave = camera._wavelength
# Use an approximate spectral color for each band.
mid_wave = 0.5 * (camera.wavelength_min + camera.wavelength_max)
color = cmap(
(mid_wave - self.wavelength_min) /
(self.wavelength_max - self.wavelength_min))
# Calculate number of photons with perfect fiber acceptance.
nphot = (flux * self.photons_per_bin * exposure_time *
camera.throughput / dwave)
dark_noise = np.sqrt(
(camera.dark_current_per_bin * exposure_time).value)
total_noise = np.sqrt(
dark_noise ** 2 + camera.read_noise_per_bin.value ** 2)
ax1.plot(cwave, camera.throughput, ls='-', color=color)
ax1_rhs.plot(cwave, nphot.value, ls=':', color=color)
ax1_rhs.fill_between(
cwave, total_noise / dwave, lw=0, color=color, alpha=0.2)
ax1_rhs.fill_between(
cwave, dark_noise / dwave, lw=0, color=color, alpha=0.2)
ax1_rhs.plot(cwave, total_noise / dwave, ls='-.', color=color)
ax2.plot(
cwave, camera.rms_resolution.to(wave_unit).value,
ls='-', color=color)
ax2.plot(
cwave, camera.row_size.to(wave_unit / u.pixel).value,
ls='--', color=color)
ax2_rhs.plot(
cwave, camera.neff_spatial.to(u.pixel), ls=':', color=color)
ax1.plot([], [], 'k--', label='Fiber Acceptance')
ax1.plot([], [], 'k-', label='Camera Throughput')
ax1.plot([], [], 'k:', label='{0}'.format(flux))
ax1.plot([], [], 'k-.', label='Dark + Readout Noise')
ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
ax2.plot([], [], 'k-', label='RMS Resolution')
ax2.plot([], [], 'k--', label='Row Size')
ax2.plot([], [], 'k:', label='Column Size')
ax2.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
ax1.set_ylim(0., None)
ax1.set_ylabel('Fiber, Camera Throughput')
ax1_rhs.set_ylim(0., None)
ax1_rhs.set_ylabel(
'Photons, Electrons / Exposure / {0}'.format(wave_unit))
ax2.set_ylim(0., None)
ax2.set_ylabel('RMS Resolution, Row Size [{0}]'.format(wave_unit))
ax2_rhs.set_ylim(0., None)
ax2_rhs.set_ylabel('Effective Column Size [pixels]')
ax2.set_xlabel('Wavelength [{0}]'.format(wave_unit))
ax2.set_xlim(wave[0], wave[-1])
def initialize(config, camera_output=True):
"""Initialize the instrument model from configuration parameters.
This method is responsible for creating a new :class:`Instrument` as
well as the :class:`Cameras <specsim.camera.Camera>` it includes.
Parameters
----------
config : :class:`specsim.config.Configuration`
The configuration parameters to use.
camera_output : bool
Initialize support for resolution convolution and downsampling for
each camera when True.
Returns
-------
Instrument
An initialized instrument model including one or more
:class:`cameras <specsim.camera.Camera>`.
"""
name = config.instrument.name
cameras = config.instrument.cameras
camera_names | |
# Copyright 2022 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representations of the system's Snaps, and abstractions around managing them.
The `snap` module provides convenience methods for listing, installing, refreshing, and removing
Snap packages, in addition to setting and getting configuration options for them.
In the `snap` module, `SnapCache` creates a dict-like mapping of `Snap` objects at when
instantiated. Installed snaps are fully populated, and available snaps are lazily-loaded upon
request. This module relies on an installed and running `snapd` daemon to perform operations over
the `snapd` HTTP API.
`SnapCache` objects can be used to install or modify Snap packages by name in a manner similar to
using the `snap` command from the commandline.
An example of adding Juju to the system with `SnapCache` and setting a config value:
```python
try:
cache = snap.SnapCache()
juju = cache["juju"]
if not juju.present:
juju.ensure(snap.SnapState.Latest, channel="beta")
juju.set({"some.key": "value", "some.key2": "value2"})
except snap.SnapError as e:
logger.error("An exception occurred when installing charmcraft. Reason: %s", e.message)
```
In addition, the `snap` module provides "bare" methods which can act on Snap packages as
simple function calls. :meth:`add`, :meth:`remove`, and :meth:`ensure` are provided, as
well as :meth:`add_local` for installing directly from a local `.snap` file. These return
`Snap` objects.
As an example of installing several Snaps and checking details:
```python
try:
nextcloud, charmcraft = snap.add(["nextcloud", "charmcraft"])
if nextcloud.get("mode") != "production":
nextcloud.set({"mode": "production"})
except snap.SnapError as e:
logger.error("An exception occurred when installing snaps. Reason: %s", e.message)
```
"""
import http.client
import json
import logging
import os
import socket
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
from collections.abc import Mapping
from enum import Enum
from subprocess import CalledProcessError
from typing import Dict, Iterable, List, Optional, Union
logger = logging.getLogger(__name__)
# The unique Charmhub library identifier, never change it
LIBID = "05394e5893f94f2d90feb7cbe6b633cd"
# Increment this major API version when introducing breaking changes
LIBAPI = 1
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 2
def _cache_init(func):
def inner(*args, **kwargs):
if _Cache.cache is None:
_Cache.cache = SnapCache()
return func(*args, **kwargs)
return inner
class MetaCache(type):
"""MetaCache class used for initialising the snap cache."""
@property
def cache(cls) -> "SnapCache":
"""Property for returning the snap cache."""
return cls._cache
@cache.setter
def cache(cls, cache: "SnapCache") -> None:
"""Setter for the snap cache."""
cls._cache = cache
def __getitem__(cls, name) -> "Snap":
"""Snap cache getter."""
return cls._cache[name]
class _Cache(object, metaclass=MetaCache):
_cache = None
class Error(Exception):
"""Base class of most errors raised by this library."""
def __repr__(self):
"""String representation of the Error class."""
return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args)
@property
def name(self):
"""Return a string representation of the model plus class."""
return "<{}.{}>".format(type(self).__module__, type(self).__name__)
@property
def message(self):
"""Return the message passed as an argument."""
return self.args[0]
class SnapAPIError(Error):
"""Raised when an HTTP API error occurs talking to the Snapd server."""
def __init__(self, body: Dict, code: int, status: str, message: str):
"""This shouldn't be instantiated directly."""
super().__init__(message) # Makes str(e) return message
self.body = body
self.code = code
self.status = status
self._message = message
def __repr__(self):
"""String representation of the SnapAPIError class."""
return "APIError({!r}, {!r}, {!r}, {!r})".format(
self.body, self.code, self.status, self._message
)
class SnapState(Enum):
"""The state of a snap on the system or in the cache."""
Present = "present"
Absent = "absent"
Latest = "latest"
Available = "available"
class SnapError(Error):
"""Raised when there's an error installing or removing a snap."""
class SnapNotFoundError(Error):
"""Raised when a requested snap is not known to the system."""
class Snap(object):
"""Represents a snap package and its properties.
`Snap` exposes the following properties about a snap:
- name: the name of the snap
- state: a `SnapState` representation of its install status
- channel: "stable", "candidate", "beta", and "edge" are common
- revision: a string representing the snap's revision
- confinement: "classic" or "strict"
"""
def __init__(
self,
name,
state: SnapState,
channel: str,
revision: str,
confinement: str,
cohort: Optional[str] = "",
) -> None:
self._name = name
self._state = state
self._channel = channel
self._revision = revision
self._confinement = confinement
self._cohort = cohort
def __eq__(self, other) -> bool:
"""Equality for comparison."""
return isinstance(other, self.__class__) and (
self._name,
self._revision,
) == (other._name, other._revision)
def __hash__(self):
"""A basic hash so this class can be used in Mappings and dicts."""
return hash((self._name, self._revision))
def __repr__(self):
"""A representation of the snap."""
return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__)
def __str__(self):
"""A human-readable representation of the snap."""
return "<{}: {}-{}.{} -- {}>".format(
self.__class__.__name__,
self._name,
self._revision,
self._channel,
str(self._state),
)
def _snap(self, command: str, optargs: Optional[Iterable[str]] = None) -> str:
"""Perform a snap operation.
Args:
command: the snap command to execute
optargs: an (optional) list of additional arguments to pass,
commonly confinement or channel
Raises:
SnapError if there is a problem encountered
"""
optargs = optargs or []
_cmd = ["snap", command, self._name, *optargs]
try:
return subprocess.check_output(_cmd, universal_newlines=True)
except CalledProcessError as e:
raise SnapError("Could not %s snap [%s]: %s", _cmd, self._name, e.output)
def get(self, key) -> str:
"""Gets a snap configuration value.
Args:
key: the key to retrieve
"""
return self._snap("get", [key]).strip()
def set(self, config: Dict) -> str:
"""Sets a snap configuration value.
Args:
config: a dictionary containing keys and values specifying the config to set.
"""
args = ['{}="{}"'.format(key, val) for key, val in config.items()]
return self._snap("set", [*args])
def unset(self, key) -> str:
"""Unsets a snap configuration value.
Args:
key: the key to unset
"""
return self._snap("unset", [key])
def _install(self, channel: Optional[str] = "", cohort: Optional[str] = "") -> None:
"""Add a snap to the system.
Args:
channel: the channel to install from
cohort: optional, the key of a cohort that this snap belongs to
"""
cohort = cohort or self._cohort
args = []
if self.confinement == "classic":
args.append("--classic")
if channel:
args.append('--channel="{}"'.format(channel))
if cohort:
args.append('--cohort="{}"'.format(cohort))
self._snap("install", args)
def _refresh(
self,
channel: Optional[str] = "",
cohort: Optional[str] = "",
leave_cohort: Optional[bool] = False,
) -> None:
"""Refresh a snap.
Args:
channel: the channel to install from
cohort: optionally, specify a cohort.
leave_cohort: leave the current cohort.
"""
channel = '--channel="{}"'.format(channel) if channel else ""
args = [channel]
if not cohort:
cohort = self._cohort
if leave_cohort:
self._cohort = ""
args.append("--leave-cohort")
elif cohort:
args.append('--cohort="{}"'.format(cohort))
self._snap("refresh", args)
def _remove(self) -> None:
"""Removes a snap from the system."""
return self._snap("remove")
@property
def name(self) -> str:
"""Returns the name of the snap."""
return self._name
def ensure(
self,
state: SnapState,
classic: Optional[bool] = False,
channel: Optional[str] = "",
cohort: Optional[str] = "",
):
"""Ensures that a snap is in a given state.
Args:
state: a `SnapState` to reconcile to.
classic: an (Optional) boolean indicating whether classic confinement should be used
channel: the channel to install from
cohort: optional. Specify the key of a snap cohort.
Raises:
SnapError if an error is encountered
"""
self._confinement = "classic" if classic or self._confinement == "classic" else ""
if state not in (SnapState.Present, SnapState.Latest):
# We are attempting to remove this snap.
if self._state in (SnapState.Present, SnapState.Latest):
# The snap is installed, so we run _remove.
self._remove()
else:
# The snap is not installed -- no need to do anything.
pass
else:
# We are installing or refreshing a snap.
if self._state not in (SnapState.Present, SnapState.Latest):
# The snap is not installed, so we install it.
self._install(channel, cohort)
else:
# The snap is installed, but we are changing it (e.g., switching channels).
self._refresh(channel, cohort)
self._state = state
@property
def present(self) -> bool:
"""Returns whether or not a snap is present."""
return self._state in (SnapState.Present, SnapState.Latest)
@property
def latest(self) -> bool:
"""Returns whether the snap is the most recent version."""
return self._state is SnapState.Latest
@property
def state(self) -> SnapState:
"""Returns the current snap state."""
return self._state
@state.setter
def state(self, state: SnapState) -> None:
"""Sets the snap state to a given value.
Args:
state: | |
outer join.
df = pd.merge(camd_cli_df, haoguo_cli_df,
how="left", # LEFT OUTER JOIN
on="Tumor_Sample_Barcode",
sort=False,
indicator='indicator_column2')
# This should be True since performing left_outer join
assert set(df['indicator_column2'].unique()) <= set(['both', 'left_only'])
# Fill-in missing values in Column derived from the Hao-Guo Clinical dataset
assert non_key_haoguo_cli == set(RECEPTOR_STATUS_COLS + ['Histology_Type_HAOGUO'])
for colname in non_key_haoguo_cli:
df[colname] = df[colname].fillna(value='Unknown')
df = combine_histology_type_data(df)
df = df.drop(['Histology_Type_CAMD', 'Histology_Type_HAOGUO'], 1)
return df
def check_merged_clinical_df(df):
"""Check the contents of the Merged Clinical DF."""
# Ensure that there is no missing data in any of the columns.
assert not df.isnull().values.any()
# Ensure that there is no unexpected receptor_status values.
for colname in RECEPTOR_STATUS_COLS:
assert set(df[colname].unique()) == set(RECEPTOR_STATUSES)
# Ensure that there is no unexpected 'Biopsy_Site_Type' values.
assert set(df['Biopsy_Site_Type'].unique()) <= set(BIOPSY_SITE_TYPES)
# Ensure that there is no unexpected 'Histology_Type' values.
assert set(df[df['Is_Breast_Carcinoma']]['Histology_Type'].unique()) <= set(HISTOLOGY_TYPES)
# Ensure that there is no unexpected 'Gender' values.
assert set(df['Gender'].unique()) <= set(GENDER_TYPES)
# Ensure that there is no unexpected 'Panel_Version' values.
assert set(df['Panel_Version'].unique()) <= set(PANEL_VERSIONS)
# Ensure that there are no duplicated 'Tumor_Sample_Barcode' values
assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()
return df
def combine_histology_type_data(cli_df):
"""Create new 'Histology_Type' column which merges the histology_type data
from the CAMD's and HAO-GUO's data sources."""
# Ensure that there is no missing data in any of the columns.
assert not cli_df.isnull().values.any()
assert set(cli_df[cli_df['Is_Breast_Carcinoma']]['Histology_Type_CAMD'].unique()) <= set(HISTOLOGY_TYPES)
assert set(cli_df['Histology_Type_HAOGUO'].unique()) <= set(HISTOLOGY_TYPES)
cli_df['Histology_Type'] = cli_df['Histology_Type_CAMD']
# If the Hao-Gao's dataset assigns a known 'histology_type' value to the
# sample, then this assignment take precedence over the CAMD's assignment.
indices = cli_df.index[cli_df.loc[:,'Histology_Type_HAOGUO'] != 'Unknown']
for index in indices:
histology_type = cli_df.get_value(index, 'Histology_Type_HAOGUO')
cli_df.set_value(index, 'Histology_Type', histology_type)
return cli_df
def check_patient_and_sample_matches(camd_cli_df, haoguo_cli_df):
"""Ensure consistency between the 'Patient_ID' and 'Tumor_Sample_Barcode'
matches.
"""
match_patients = set(camd_cli_df['Patient_ID'].unique()) & set(haoguo_cli_df['Patient_ID'].unique())
match_samples = set(camd_cli_df['Tumor_Sample_Barcode'].unique()) & set(haoguo_cli_df['Tumor_Sample_Barcode'].unique())
sample2patient_camd, patient2samples_camd = map_sample_and_patient(camd_cli_df)
sample2patient_haoguo, patient2samples_haoguo = map_sample_and_patient(haoguo_cli_df)
# For each 'Tumor_Sample_Barcode' match, there is a corresponding 'Patient_ID'
# match.
for sample_id in match_samples:
patient_camd = sample2patient_camd[sample_id]
patient_haoguo = sample2patient_haoguo[sample_id]
assert patient_camd == patient_haoguo
# For each 'Patient_ID' match, there is a corresponding 'Tumor_Sample_Barcode'
# match.
for patient_id in match_patients:
samples_camd = patient2samples_camd[patient_id]
sample_haoguo = patient2samples_haoguo[patient_id]
assert len(samples_camd & sample_haoguo) > 0
# Same number of matches.
assert len(match_patients) == len(match_samples)
def map_sample_and_patient(cli_df):
"""Create a dictionary mapping 'Tumor_Sample_Barcode' and 'Patient_ID' """
sample2patient = dict()
patient2samples = dict()
for index, row in cli_df.iterrows():
sample = row['Tumor_Sample_Barcode']
patient = row['Patient_ID']
if sample not in sample2patient:
sample2patient[sample] = set()
sample2patient[sample].add(patient)
if patient not in patient2samples:
patient2samples[patient] = set()
patient2samples[patient].add(sample)
for sample in sample2patient:
patients = sample2patient[sample]
assert len(patients) == 1
sample2patient[sample] = list(patients)[0]
return sample2patient, patient2samples
def import_camd_clinical(infile):
"""Import the DFCI Sample-level Clinical File [obtained from the Center for
Advanced Molecular Diagnostics (CAMD) lab].
Notes
-----
- Extract the following data:
(1) Patient_ID (e.g. 'CBIO_P10001') [Note: Used only for joining clinical tables + logging]
(2) Tumor_Sample_Barcode (e.g. 'CBIO_P10001_S1')
(3) Biopsy_Site_Type (see BIOPSY_SITE_TYPES list)
(4) Histology_Type_CAMD (see HISTOLOGY_TYPES list)
(5) Panel_Version (see PANEL_VERSIONS list) [Note: Only output in details_mode]
(6) CANCER_TYPE [Note: Only for debugging/logging purposes]
(6) Is_Breast_Sarcoma (True, False) [Note: Only for downstream filtering purposes]
(7) Is_Breast_Carcinoma (True, False) [Note: Only for downstream filtering purposes]
(8) Age_At_Seq_Report [Note: Only for downstream filtering purposes]
"""
df = pd.read_table(infile, sep="\t", dtype=str, comment="#", header=0)
df['Is_Breast_Sarcoma'] = (df['CANCER_TYPE'] == 'Breast Sarcoma') # Not consider a 'Breast Cancer'
df['Is_Breast_Carcinoma'] = (df['CANCER_TYPE'] == 'Breast Carcinoma')
# Retain only the required columns
required_columns = ['PATIENT_ID', 'SAMPLE_ID', 'ONCOTREE_BIOPSY_SITE_TYPE', 'CANCER_TYPE_DETAILED', 'PANEL_VERSION', 'CANCER_TYPE',
'Is_Breast_Carcinoma', 'Is_Breast_Sarcoma', 'AGE']
assert set(required_columns) <= set(df)
df = df[required_columns]
histology_colname = 'Histology_Type_CAMD'
df.rename(columns={'PATIENT_ID': 'Patient_ID'}, inplace=True)
df.rename(columns={'SAMPLE_ID': 'Tumor_Sample_Barcode'}, inplace=True)
df.rename(columns={'ONCOTREE_BIOPSY_SITE_TYPE': 'Biopsy_Site_Type'}, inplace=True)
df.rename(columns={'CANCER_TYPE_DETAILED': histology_colname}, inplace=True)
df.rename(columns={'PANEL_VERSION': 'Panel_Version'}, inplace=True)
df.rename(columns={'AGE': 'Age_At_Seq_Report'}, inplace=True)
# Standardize the values in the 'Biopsy_Site_type' column
df['Biopsy_Site_Type'].replace('Local Recurrence', 'Local_Recurrence', inplace=True)
df['Biopsy_Site_Type'].replace('Metastatic Recurrence', 'Metastatic', inplace=True)
df['Biopsy_Site_Type'].replace('Any/Other', 'Unknown', inplace=True)
df['Biopsy_Site_Type'].replace('Unspecified', 'Unknown', inplace=True)
df['Biopsy_Site_Type'].replace('Not Applicable', 'Unknown', inplace=True)
# Standardize the values in the histology_type column
df[histology_colname].replace('Breast Invasive Ductal Carcinoma', 'Invasive_Ductal_Carcinoma', inplace=True)
df[histology_colname].replace('Breast Invasive Lobular Carcinoma', 'Invasive_Lobular_Carcinoma', inplace=True)
df[histology_colname].replace('Breast Mixed Ductal and Lobular Carcinoma', 'Mixed_Ductal_and_Lobular_Carcinoma', inplace=True)
df[histology_colname].replace('Invasive Breast Carcinoma', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[histology_colname].replace('Adenoid Cystic Breast Cancer', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[histology_colname].replace('Breast Invasive Carcinosarcoma, NOS', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[histology_colname].replace('Breast Invasive Cancer, NOS', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[histology_colname].replace('Breast Carcinoma with Signet Ring', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[histology_colname].replace('Breast Invasive Mixed Mucinous Carcinoma', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[histology_colname].replace('Solid Papillary Carcinoma of the Breast', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[histology_colname].replace('Adenomyoepithelioma of the Breast', 'Other_Breast_Cancer', inplace=True)
df[histology_colname].replace('Breast Ductal Carcinoma In Situ', 'Other_Breast_Cancer', inplace=True)
df[histology_colname].replace('Carcinoma with Chondroid Metaplasia', 'Other_Breast_Cancer', inplace=True)
df[histology_colname].replace('Metaplastic Breast Cancer', 'Other_Breast_Cancer', inplace=True)
df[histology_colname].replace('Paget Disease of the Nipple', 'Other_Breast_Cancer', inplace=True)
df.loc[~df.Is_Breast_Carcinoma, histology_colname] = 'Not_Breast_Carcinoma'
# Ensure that there is no missing data in any of the columns.
assert not df.isnull().values.any()
# Ensure that there are no duplicated 'Tumor_Sample_Barcode' values
assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()
# Check that for all rows, Exome_ID has Sample_ID as its prefix.
for index, row in df.iterrows():
assert row['Tumor_Sample_Barcode'].startswith(row['Patient_ID'] + '_S')
return df
def import_camd_gender(infile):
"""Import the DFCI Patients' gender data file [obtained from the Center for
Advanced Molecular Diagnostics (CAMD) lab].
Notes
-----
- Extract the following data:
(1) PATIENT_ID (e.g. 'CBIO_P10001') [Note: Used only for joining clinical tables]
(2) Gender (see GENDER_TYPES list)
"""
df = pd.read_table(infile, sep="\t", dtype=str, comment="#", header=0)
# Retain only the required columns
required_columns = ['PATIENT_ID', 'GENDER']
df.rename(columns={'PATIENT_ID': 'Patient_ID'}, inplace=True)
df.rename(columns={'GENDER': 'Gender'}, inplace=True)
# Ensure that there are no duplicated 'Patient_ID' values
assert not df.duplicated(subset=['Patient_ID']).any()
return df
def import_haoguo_clinical(infile):
"""Import the DFCI clinical data file obtained from <NAME> (BCB).
For meaning of values in each column, also see the accompanying dictionary
document (located in the same directory as the Clinical Data file):
Clinicaldata_forIntelCCCproject_4-10-2017_Dictionary.xls
Notes
-----
- Extract the following data:
(1) Patient_ID (e.g. 'CBIO_P10001') [Note: Used only for consistency_check]
(2) Tumor_Sample_Barcode (e.g. 'CBIO_P10001_S1') [Note: Used only for joining clinical tables + logging]
(2) ER_Status (see RECEPTOR_STATUSES list)
(3) PR_Status (see RECEPTOR_STATUSES list)
(4) HER2_Status (see RECEPTOR_STATUSES list)
(5) Histology_Type_HAOGUO (see HISTOLOGY_TYPES list) [Note: When available, take precedent over CAMD's data]
"""
df = pd.read_table(infile, sep="\t", dtype=str, comment="#", header=0)
# Check that the Biopsy_site_type of all samples from Hao's Dataset is 'Primary'
assert (df["biopsy_site_type"] == 'Primary').all() == True
required_columns = ['cBio_PatientID', 'cBio_SampleID', 'er', 'pr', 'her2___ihc',
'HER2_fish', 'histology']
assert set(required_columns) <= set(df)
df = df[required_columns]
histology_colname = 'Histology_Type_HAOGUO'
df.rename(columns={'cBio_PatientID': 'Patient_ID'}, inplace=True)
df.rename(columns={'cBio_SampleID': 'Tumor_Sample_Barcode'}, inplace=True)
df.rename(columns={'er': 'ER_Status'}, inplace=True)
df.rename(columns={'pr': 'PR_Status'}, inplace=True)
df.rename(columns={'her2___ihc': 'HER2_IHC'}, inplace=True)
df.rename(columns={'HER2_fish': 'HER2_FISH'}, inplace=True)
df.rename(columns={'histology': histology_colname}, inplace=True)
df = infer_HER2_status(df)
df = df.drop(['HER2_IHC', 'HER2_FISH'], 1)
# Fill-in missing values
for colname in RECEPTOR_STATUS_COLS + [histology_colname]:
df[colname] = df[colname].fillna(value='Unknown')
# Standardize the values in the [ER,PR]_Status columns.
for colname in ['ER_Status', 'PR_Status']:
df[colname].replace('0.0', 'Negative', inplace=True)
df[colname].replace('1.0', 'Positive', inplace=True)
df[colname].replace('-1.0', 'Unknown', inplace=True)
df[colname].replace('93.0', 'Unknown', inplace=True)
# Map '2.0=Positive low (1-10)' to 'Negative' to be consistent with
# Receptor Status data from public datasets (e.g. TCGA-BRCA).
df[colname].replace('2.0', 'Negative', inplace=True)
# Standardize the values in the Histology_Type column
# (1) Used the following source to guide the mapping:
# (A) https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3047091/ (see Figure 1)
# (B) http://breast-cancer.ca/ (navigate to Section 5)
df[histology_colname].replace('0.0', 'Other_Breast_Cancer', inplace=True) # 'DCIS'
df[histology_colname].replace('1.0', 'Invasive_Ductal_Carcinoma', inplace=True) # 'Invasive ductal'
df[histology_colname].replace('2.0', 'Invasive_Lobular_Carcinoma', inplace=True) # 'Invasive lobular'
df[histology_colname].replace('3.0', 'Other_Invasive_Breast_Carcinoma', inplace=True) # 'Tubular'
df[histology_colname].replace('4.0', 'Other_Invasive_Breast_Carcinoma', inplace=True) # 'Mucinous'
df[histology_colname].replace('5.0', 'Other_Invasive_Breast_Carcinoma', inplace=True) # 'Micropapillary'
df[histology_colname].replace('6.0', 'Mixed_Ductal_and_Lobular_Carcinoma', inplace=True) # 'Mixed (IDC&ILS)'
df[histology_colname].replace('99.0', 'Unknown', inplace=True) # 'Other'
df[histology_colname].replace('-1.0', 'Unknown', inplace=True) # 'Unknown'
# Ensure that there is no unexpected 'Histology_Type' values.
assert set(df[histology_colname].unique()) <= set(HISTOLOGY_TYPES)
# Ensure that there are no duplicated 'Patient_ID' or 'Tumor_Sample_Barcode' values
assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()
assert not df.duplicated(subset=['Patient_ID']).any()
# Check that for all rows, Exome_ID has Sample_ID as its prefix.
for index, row in df.iterrows():
assert row['Tumor_Sample_Barcode'].startswith(row['Patient_ID'] + '_S')
return df
def infer_HER2_status(df):
"""Infer the sample's HER2_Status from 'HER2_IHC' and 'HER2_FISH' data.
"""
df['HER2_IHC'] = df['HER2_IHC'].fillna(value='Unknown')
df['HER2_FISH'] = df['HER2_FISH'].fillna(value='Unknown')
# Figure out the HER2_Status of the sample based on HER2 IHC and FISH results
df['HER2_IHC'] = df['HER2_IHC'].fillna(value='Unknown')
df['HER2_IHC'].replace('0.0', 'Negative', inplace=True)
df['HER2_IHC'].replace('2.0', 'Equivocal', inplace=True)
df['HER2_IHC'].replace('3.0', 'Positive', inplace=True)
df['HER2_IHC'].replace('-1.0', 'Unknown', inplace=True)
df['HER2_IHC'].replace('93.0', 'Unknown', inplace=True)
df['HER2_FISH'] = df['HER2_FISH'].fillna(value='Unknown')
df['HER2_FISH'].replace('0.0', 'Negative', inplace=True)
df['HER2_FISH'].replace('2.0', 'Equivocal', inplace=True)
df['HER2_FISH'].replace('1.0', 'Positive', inplace=True)
df['HER2_FISH'].replace('-1.0', 'Unknown', inplace=True)
df['HER2_FISH'].replace('93.0', 'Unknown', inplace=True)
for colname in ['HER2_IHC', 'HER2_FISH']:
assert set(df[colname].unique()) == set(['Positive', 'Negative', 'Equivocal', 'Unknown'])
| |
number of contributors read from the
# file.
log.debug('Contributors: {0} total representing {1} employers.'.format(contributorCount, len(contributorSet)))
# Set the global value.
self._defaultContributorSet = contributorSet
return contributorSet
def generateLicense(self, path):
licenseText = ""
# Check the input path before proceeding.
if not os.path.isfile(path):
log.warn('The specified file "{0}" is not a file or cannot be read.', path)
log.warn('Generating an empty license.')
return licenseText
log.debug('Generating license for file:{0}'.format(path))
# The line separator. This is useful in various places in this method.
sep = '\n' # os.linesep
# Grab all of the comments from the file.
self._findComments(path)
# Gather all possible metadata from the file and its git history.
self._findDocMetadata(path)
self._findGitMetadata(path)
# Determine the substitutions that need to go in the license text.
dates = self._getDates()
copyrightOwners = self._getCopyrightOwners()
initialAuthor = self._getInitialAuthor()
contributorList = self._getContributors()
# Convert the contributors into a (perhaps multi-line) string.
contributorsString = ""
for contributor in contributorList:
contributorsString += contributor
contributorsString += sep
# Replace the format keys in the license format with the determined
# values for the file.
licenseFormat = self._licenseFormat
pattern = re.compile('{DATE}')
licenseFormat = pattern.sub(dates, licenseFormat);
pattern = re.compile('{COPYRIGHT_OWNERS}')
licenseFormat = pattern.sub(copyrightOwners, licenseFormat);
pattern = re.compile('{AUTHOR}')
licenseFormat = pattern.sub(initialAuthor, licenseFormat);
pattern = re.compile('{CONTRIBUTORS}')
licenseFormat = pattern.sub(contributorsString, licenseFormat);
# Build the license text using the appropriate multiline comment
# characters for the source file.
starter = self._getMultilineCommentLineStarter(path)
licenseText = self._getMultilineCommentFirstLine(path) + sep
for line in licenseFormat.splitlines():
# Get the next line of the output license text.
outputLine = starter + line
# If the line is longer than the character limit, split it over
# multiple lines. There's also logic here to not break words.
while len(outputLine) > self._charLimit:
i = outputLine.rfind(' ', 0, self._charLimit) + 1
licenseText += outputLine[:i] + sep
outputLine = starter + outputLine[i:]
# Add the last (or only) output line.
licenseText += outputLine + sep
licenseText += self._getMultilineCommentLastLine()
return licenseText
def _findComments(self, path):
'''
Finds all comment blocks and places them (excluding the comment opener,
line openers (if present), and the comment ender) as separate strings,
one for each block, in self._commentBlocks
@param path: The path to the source file in question.
@return: Nothing. _commentBlocks is modified.
'''
# Opens the file and searches for all content in multiline comments.
# Note: This is potentially dangerous as the files contents are read
# into memory. Although this is (at least currently) highly unusual for
# a source file to be beyond a few thousand lines.
with open(path, 'r') as f:
self._commentBlocks = re.findall('/\*+(.*?)\*+/', f.read(), re.DOTALL)
file.closed
# Replace all leading asterisks. We shouldn't destroy empty lines, hence
# the \n is omitted from the second amount of whitespace characters.
regex = re.compile('^\s*\*+[ \t\r\f\v]*', re.MULTILINE)
for i in range(len(self._commentBlocks)):
self._commentBlocks[i] = regex.sub('', self._commentBlocks[i])
return
def _findDocMetadata(self, path):
'''
Constructs all metadata that can be obtained from the specified file's
existing documentation. This includes clearing and updating
_existingDateList and _existingAuthorSet.
@param path: The path to the file whose documentation will be scanned.
@return: Nothing. _existingDateList and _existingAuthorSet are modified.
'''
# Clear out the previous metadata.
self._existingDateList = []
self._existingAuthorSet = set()
# If the header comment contains the copyright date info, try to get the
# first (and last year, if available) from it.
if len(self._commentBlocks) > 0:
headerComment = self._commentBlocks[0]
result = re.match('^.*Copyright.*?\s+(\d{4})(,\s*(\d{4}))?.*$', headerComment, re.MULTILINE)
if result:
self._existingDateList.append(int(result.group(1)))
if len(result.groups()) == 3:
self._existingDateList.append(int(result.group(3)))
# Print the found dates to the debug log.
if len(self._existingDateList) == 0:
log.debug('Found no existing copyright date.')
else:
log.debug('Found existing copyright dates: {0}'.format(self._existingDateList))
# Find the authors for the @author tags.
regex = re.compile('^author')
for commentBlock in self._commentBlocks:
# Split the comment block into sections by @ tags.
tagSplit = commentBlock.split('@')
# Process each section after an @ sign where the first string is
# 'author' (this is in the pre-compiled regex).
for i in range(1, len(tagSplit)):
tagBlock = tagSplit[i]
result = regex.match(tagBlock)
# An author tag was found!
if result:
# Ignore the 'author' part of the tag "block".
authors = tagBlock.split()
authors.pop(0)
# Replaces all whitespace with a single space.
authors = ' '.join(authors).split(',')
# Loops over each found author and either adds their
# preferred name or the trimmed name to the set of existing
# authors.
for author in authors:
author = author.strip()
if author in self._authorDictionary:
self._existingAuthorSet.add(self._authorDictionary[author])
else:
self._existingAuthorSet.add(author)
# Print the found authors to the debug log.
if len(self._existingAuthorSet) == 0:
log.debug('Found no existing authors from author tags.')
else:
log.debug('Found existing authors from author tags: {0}'.format(self._existingAuthorSet))
return
def _findGitMetadata(self, path):
'''
Constructs all metadata that can be obtained from the specified file's
git history. This includes clearing and updating _dateList, _authorSet,
and _isOld.
@param path: The path to the file whose git history will be queried.
@return: Nothing. _dateList, _authorSet, and _isOld are modified.
'''
# Clear out the previous metadata.
self._dateList = []
self._authorSet = set()
self._isOld = False
# Call git log on the file. We need to pass --pretty=format:"%ci,%an" to
# get the log output in a simple format: yyyy-mm-dd -gmtdiff,<author>
directory = path[:path.rfind(os.sep)]
result = subprocess.check_output(['git', '-C', directory, 'log', '--pretty=format:"%ci,%an"', path], stderr=subprocess.STDOUT)
commits = result.replace('"', '').splitlines()
# Determine the years for the first and last commits.
commit = commits[0]
lastYear = int(commit[:commit.find('-')])
commit = commits[len(commits) - 1]
firstYear = int(commit[:commit.find('-')])
# Update self._dateList to hold the first (and last year if different).
self._dateList.append(firstYear)
if firstYear != lastYear:
self._dateList.append(lastYear)
# Print the found first/last date(s) to the log.
log.debug('Found the dates from the git history: {0}'.format(self._dateList))
# Determine whether the file is old.
firstDateString = commit.split()[0].split('-')
firstMonth = int(firstDateString[1])
firstDay = int(firstDateString[2])
if datetime(firstYear, firstMonth, firstDay) < datetime(2014, 11, 4):
self._isOld = True
# Print out whether or not the file is old to the log.
if self._isOld:
log.debug('The file predates the repo relocation.')
else:
log.debug('The file is more recent than the repo relocation.')
# Add all authors from the commit log to the set of authors. Use the
# preferred name if available.
authorSet = set()
for commit in commits:
authorSet.add(commit.split(',')[1])
for author in authorSet:
if author in self._authorDictionary:
self._authorSet.add(self._authorDictionary[author])
else:
self._authorSet.add(author)
# Print out the added authors to the log.
log.debug('Found authors from the git history: {0}'.format(self._authorSet))
return
def _getDates(self):
'''
Uses the current metadata for the file to determine the proper date
string to use for the file's license. If the dates span multiple years,
the returned string will be of the format "first_year, last_year".
Otherwise, the returned string will be of the format "first_year".
This method should not return a null value.
'''
dates = ""
# TODO
dates = "2001 a space odyssey"
return dates
def _getCopyrightOwners(self):
'''
Determines the copyright owner string to use for the file's license.
This method should not return a null value.
'''
copyrightOwners = self._defaultCopyrightOwners
return copyrightOwners
def _getInitialAuthor(self):
'''
Uses the current metadata for the file to determine the proper initial
author string to use for the file's license. If the file pre-dates the
move to the git repo, then the author provided by the class author tag
will be used. If the file has no such author specified, the default
author will be used.
This method should not return a null value.
'''
author = ""
# TODO
author = "PRIMECUTMIGGITYMOEMACKDADDYJIZZYBANGDOGGYDOGDAWG"
return author
def _getContributors(self):
'''
Uses the current metadata for the file to determine the proper list of
contributors to use for the file's license. If the file pre-dates the
move to the git repo AND has | |
'''
This module provides the class Searcher
which interacts with a Handle Search
Servlet.
Author: <NAME> (DKRZ), 2015-2016
'''
import logging
import re
import requests
import json
import b2handle
from past.builtins import xrange
from b2handle.handleexceptions import ReverseLookupException
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(b2handle.util.NullHandler())
REQUESTLOGGER = logging.getLogger('log_all_requests_of_testcases_to_file')
REQUESTLOGGER.propagate = False
REQUESTLOGGER.addHandler(b2handle.util.NullHandler())
class Searcher(object):
'''
This class interacts with a Handle Search
Servlet.
As such a Search Servlet is not provided by
the Handle System, this class caters to a
custom Search Servlet.
'''
def __init__(self, **args):
b2handle.util.log_instantiation(LOGGER, 'Searcher', args, ['password','reverselookup_password'])
optional_args = [
'reverselookup_baseuri',
'reverselookup_url_extension',
'handle_server_url',
'reverselookup_username',
'username',
'reverselookup_password',
'password',
'allowed_search_keys',
'HTTPS_verify'
]
b2handle.util.add_missing_optional_args_with_value_none(args, optional_args)
# Args that the constructor understands:
self.__reverselookup_baseuri = None
self.__reverselookup_url_extension = None
self.__allowed_search_keys = None
self.__HTTPS_verify = None
self.__user = None
self.__password = None
# Other attributes:
self.__has_search_access = False
self.__handle_system_username_used = False
self.__handle_system_password_used = False
self.__revlookup_auth_string = None
self.__header = None
self.__session = None
self.__search_url = None
# Defaults:
defaults = {
'allowed_search_keys': ['URL', 'CHECKSUM'],
'HTTPS_verify': True,
'reverselookup_url_extension': '/hrls/handles/',
}
# Set them:
self.__store_args_or_set_to_defaults(args, defaults)
self.__setup_search_access()
LOGGER.debug('End of instantiation of the search module.')
def __setup_search_access(self):
self.__check_and_set_search_access()
self.__session = requests.Session()
if self.__session is None:
LOGGER.info('Session could not be created.')
else:
LOGGER.debug('Session was created.')
def __check_and_set_search_access(self):
user_and_pw_exist = self.__check_and_set_search_authentication()
url_exists = self.__check_and_set_search_url()
if user_and_pw_exist and url_exists:
self.__has_search_access = True
def __check_and_set_search_authentication(self):
user_and_pw_exst = False
if self.__user is not None and self.__password is not None:
self.__set_revlookup_auth_string(self.__user, self.__password)
self.__header = {'Authorization': 'Basic ' + self.__revlookup_auth_string}
LOGGER.info('Reverse lookup authentication is set.')
return True
else:
msg = 'Reverse lookup not possible.'
if self.__user is None and self.__password is None:
LOGGER.info(msg+' Neither username nor password were provided.')
elif self.__user is None:
LOGGER.info(msg+' Username not provided. Password is '+str(self.__password))
else:
LOGGER.info(msg+' Password not provided. Username is '+str(self.__user))
return False
def __check_and_set_search_url(self):
if (self.__reverselookup_baseuri is not None and
self.__reverselookup_url_extension is not None):
self.__search_url = (
self.__reverselookup_baseuri.rstrip('/')+'/'+
self.__reverselookup_url_extension.strip('/')
)
return True
LOGGER.info('Reverse lookup endpoint set to '+str(self.__search_url))
else:
msg = 'Reverse lookup not possible.'
if (self.__reverselookup_baseuri is None and
self.__reverselookup_url_extension is None):
LOGGER.info(msg+' No URL for reverse lookup provided.')
elif self.__reverselookup_baseuri is None:
LOGGER.info(msg+' No URL for reverse lookup provided.')
else:
LOGGER.info(msg+' No URL path for reverse lookup provided.')
return False
def get_search_endpoint(self):
if self.__has_search_access:
return self.__search_url
else:
LOGGER.error(
'Searching not possible. Reason: No access '+
'to search system (endpoint: '+
str(self.__search_url)+').'
)
return None
def __store_args_or_set_to_defaults(self, args, defaults):
LOGGER.debug('Setting the attributes:')
if args['HTTPS_verify'] is not None: # Without this check, a passed "False" is not found!
self.__HTTPS_verify = b2handle.util.get_valid_https_verify(
args['HTTPS_verify']
)
LOGGER.info(' - https_verify set to: '+str(self.__HTTPS_verify))
else:
self.__HTTPS_verify = defaults['HTTPS_verify']
LOGGER.info(' - https_verify set to default: '+str(self.__HTTPS_verify))
if args['allowed_search_keys'] is not None: # Without this check, empty lists are not found!
self.__allowed_search_keys = args['allowed_search_keys']
LOGGER.info(' - allowed_search_keys set to: '+str(self.__allowed_search_keys))
else:
self.__allowed_search_keys = defaults['allowed_search_keys']
LOGGER.info(' - allowed_search_keys set to default: '+str(self.__allowed_search_keys))
if args['reverselookup_baseuri']:
self.__reverselookup_baseuri = args['reverselookup_baseuri']
LOGGER.info(' - solrbaseurl set to: '+self.__reverselookup_baseuri)
elif 'handle_server_url' in args.keys() and args['handle_server_url'] is not None:
self.__reverselookup_baseuri = args['handle_server_url']
LOGGER.info(' - solrbaseurl set to same as handle server: '+str(self.__reverselookup_baseuri))
else:
LOGGER.info(' - solrbaseurl: No default.')
if args['reverselookup_url_extension']:
self.__reverselookup_url_extension = args['reverselookup_url_extension']
LOGGER.info(' - reverselookup_url_extension set to: '+self.__reverselookup_url_extension)
else:
self.__reverselookup_url_extension = defaults['reverselookup_url_extension']
LOGGER.info(' - reverselookup_url_extension set to default: '+self.__reverselookup_url_extension)
# Authentication reverse lookup:
# If specified, use it.
# Else: Try using handle system authentication
# Else: search_handle does not work and will raise an exception.
if args['reverselookup_username']:
self.__user = args['reverselookup_username']
LOGGER.info(' - reverselookup_username set to: '+self.__user)
elif args['username']:
self.__user = args['username']
self.__handle_system_username_used = True
LOGGER.info(' - reverselookup_username set to handle server username: '+self.__user)
else:
LOGGER.info(' - reverselookup_username: Not specified. No default.')
if args['reverselookup_password']:
self.__password = args['reverselookup_password']
LOGGER.info(' - reverselookup_password set.')
elif args['password']:
self.__password = args['password']
self.__handle_system_password_used = True
LOGGER.info(' - reverselookup_password set to handle server password.')
else:
LOGGER.info(' - reverselookup_password: Not specified. No default.')
def search_handle(self, **args):
'''
Search for handles containing the specified key with the specified
value. The search terms are passed on to the reverse lookup servlet
as-is. The servlet is supposed to be case-insensitive, but if it
isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`.
*Note:* If allowed search keys are configured, only these are used. If
no allowed search keys are specified, all key-value pairs are
passed on to the reverse lookup servlet, possibly causing a
:exc:`~b2handle.handleexceptions.ReverseLookupException`.
Example calls:
* list_of_handles = search_handle('http://www.foo.com')
* list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999)
* list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999)
:param URL: Optional. The URL to search for (reverse lookup). [This is
NOT the URL of the search servlet!]
:param prefix: Optional. The Handle prefix to which the search should
be limited to. If unspecified, the method will search across all
prefixes present at the server given to the constructor.
:param key_value_pairs: Optional. Several search fields and values can
be specified as key-value-pairs,
e.g. CHECKSUM=123456, URL=www.foo.com
:raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that
cannot be used, or if something else goes wrong.
:return: A list of all Handles (strings) that bear the given key with
given value of given prefix or server. The list may be empty and
may also contain more than one element.
'''
LOGGER.debug('search_handle...')
if self.__has_search_access:
return self.__search_handle(**args)
else:
LOGGER.error(
'Searching not possible. Reason: No access '+
'to search system (endpoint: '+
str(self.__search_url)+').'
)
return None
def __search_handle(self, **args):
# Prefix specified? Remove them from the key value pairs to be searched.
prefix = None
if 'prefix' in args.keys():
prefix = args.pop('prefix')
# Any fulltext search terms specified? Remove them from the key value pairs to be searched.
fulltext_searchterms = []
if 'searchterms' in args.keys():
fulltext_searchterms = args.pop('searchterms')
# Check if there is any key-value pairs to be searched.
if len(args) == 0:
LOGGER.debug('search_handle: No key value pair was specified.')
msg = 'No search terms have been specified. Please specify'+\
' at least one key-value-pair.'
raise ReverseLookupException(msg=msg)
else:
isnone = b2handle.util.return_keys_of_value_none(args)
if len(isnone) > 0:
LOGGER.debug('search_handle: These keys had value None: '+str(isnone))
args = b2handle.util.remove_value_none_from_dict(args)
if len(args) == 0:
LOGGER.debug('search_handle: No key value pair with valid value was specified.')
msg = ('No search terms have been specified. Please specify'
' at least one key-value-pair.')
raise ReverseLookupException(msg=msg)
# Perform the search:
list_of_handles = []
LOGGER.debug('search_handle: key-value-pairs: '+str(args))
query = self.create_revlookup_query(*fulltext_searchterms, **args)
if query is None:
msg = 'No search query was specified'
raise ReverseLookupException(msg=msg)
resp = self.__send_revlookup_get_request(query)
# Check for undefined fields
regex = 'RemoteSolrException: Error from server at .+: undefined field .+'
match = re.compile(regex).search(str(resp.content))
if match is not None:
undefined_field = resp.content.split('undefined field ')[1]
msg = 'Tried to search in undefined field "'+undefined_field+'"..'
raise ReverseLookupException(msg=msg, query=query, response=resp)
if resp.status_code == 200:
try:
list_of_handles = json.loads(resp.content)
except ValueError:
msg = 'The response is not JSON.'
raise ReverseLookupException(msg=msg, query=query, response=resp)
elif resp.status_code == 401:
msg = 'Authentication failed.'
if self.__handle_system_username_used or self.__handle_system_password_used:
msg += (' If the Reverse Lookup Servlet you are'
' using does not accept the same username and/or password'
' as the Handle Server, please provide its username and/or'
' password separately when instantiating the client')
else:
msg += ' You need to specify a username and password to search'
raise ReverseLookupException(msg=msg, query=query, response=resp)
elif resp.status_code == 404:
msg = 'Wrong search servlet URL ('+resp.request.url+')'
regex = 'The handle you requested.+cannot be found'
match = re.compile(regex, re.DOTALL).search(str(resp.content))
if match is not None:
msg += '. It seems you reached a Handle Server'
raise ReverseLookupException(msg=msg, query=query, response=resp)
else:
raise ReverseLookupException(query=query, response=resp)
# Filter prefixes:
if prefix is not None:
LOGGER.debug('search_handle: Restricting search to prefix '+prefix)
filteredlist_of_handles = []
for i in xrange(len(list_of_handles)):
if list_of_handles[i].split('/')[0] == prefix:
filteredlist_of_handles.append(list_of_handles[i])
list_of_handles = filteredlist_of_handles
return list_of_handles
def create_revlookup_query(self, *fulltext_searchterms, **keyvalue_searchterms):
'''
Create the part of the solr request that comes after the question mark,
e.g. ?URL=*dkrz*&CHECKSUM=*abc*. If allowed search keys are
configured, only these are used. If no'allowed search keys are
specified, all key-value pairs are passed on to the reverse lookup
servlet.
:param fulltext_searchterms: Optional. Any term specified will be used
as search term. Not implemented yet, so will be ignored.
:param keyvalue_searchterms: Optional. Key-value pairs. Any | |
<gh_stars>1-10
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Abstract QuantumState class.
"""
import copy
from abc import abstractmethod
import numpy as np
from qiskit.quantum_info.operators.operator import Operator
from qiskit.result.counts import Counts
from qiskit.utils.deprecation import deprecate_function
class QuantumState:
"""Abstract quantum state base class"""
def __init__(self, op_shape=None):
"""Initialize a QuantumState object.
Args:
op_shape (OpShape): Optional, an OpShape object for state dimensions.
.. note::
If `op_shape`` is specified it will take precedence over other
kwargs.
"""
self._op_shape = op_shape
# RNG for measure functions
self._rng_generator = None
# Set higher priority than Numpy array and matrix classes
__array_priority__ = 20
def __eq__(self, other):
return isinstance(other, self.__class__) and self.dims() == other.dims()
@property
def dim(self):
"""Return total state dimension."""
return self._op_shape.shape[0]
@property
def num_qubits(self):
"""Return the number of qubits if a N-qubit state or None otherwise."""
return self._op_shape.num_qubits
@property
def _rng(self):
if self._rng_generator is None:
return np.random.default_rng()
return self._rng_generator
def dims(self, qargs=None):
"""Return tuple of input dimension for specified subsystems."""
return self._op_shape.dims_l(qargs)
def copy(self):
"""Make a copy of current operator."""
return copy.deepcopy(self)
def seed(self, value=None):
"""Set the seed for the quantum state RNG."""
if value is None:
self._rng_generator = None
elif isinstance(value, np.random.Generator):
self._rng_generator = value
else:
self._rng_generator = np.random.default_rng(value)
@abstractmethod
def is_valid(self, atol=None, rtol=None):
"""Return True if a valid quantum state."""
pass
@abstractmethod
def to_operator(self):
"""Convert state to matrix operator class"""
pass
@abstractmethod
def conjugate(self):
"""Return the conjugate of the operator."""
pass
@abstractmethod
def trace(self):
"""Return the trace of the quantum state as a density matrix."""
pass
@abstractmethod
def purity(self):
"""Return the purity of the quantum state."""
pass
@abstractmethod
def tensor(self, other):
"""Return the tensor product state self ⊗ other.
Args:
other (QuantumState): a quantum state object.
Returns:
QuantumState: the tensor product operator self ⊗ other.
Raises:
QiskitError: if other is not a quantum state.
"""
pass
@abstractmethod
def expand(self, other):
"""Return the tensor product state other ⊗ self.
Args:
other (QuantumState): a quantum state object.
Returns:
QuantumState: the tensor product state other ⊗ self.
Raises:
QiskitError: if other is not a quantum state.
"""
pass
def _add(self, other):
"""Return the linear combination self + other.
Args:
other (QuantumState): a state object.
Returns:
QuantumState: the linear combination self + other.
Raises:
NotImplementedError: if subclass does not support addition.
"""
raise NotImplementedError("{} does not support addition".format(type(self)))
def _multiply(self, other):
"""Return the scalar multipled state other * self.
Args:
other (complex): a complex number.
Returns:
QuantumState: the scalar multipled state other * self.
Raises:
NotImplementedError: if subclass does not support scala
multiplication.
"""
raise NotImplementedError("{} does not support scalar multiplication".format(type(self)))
@abstractmethod
def evolve(self, other, qargs=None):
"""Evolve a quantum state by the operator.
Args:
other (Operator or QuantumChannel): The operator to evolve by.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
"""
pass
@abstractmethod
def expectation_value(self, oper, qargs=None):
"""Compute the expectation value of an operator.
Args:
oper (BaseOperator): an operator to evaluate expval.
qargs (None or list): subsystems to apply the operator on.
Returns:
complex: the expectation value.
"""
pass
@abstractmethod
def probabilities(self, qargs=None, decimals=None):
"""Return the subsystem measurement probability vector.
Measurement probabilities are with respect to measurement in the
computation (diagonal) basis.
Args:
qargs (None or list): subsystems to return probabilities for,
if None return for all subsystems (Default: None).
decimals (None or int): the number of decimal places to round
values. If None no rounding is done (Default: None).
Returns:
np.array: The Numpy vector array of probabilities.
"""
pass
def probabilities_dict(self, qargs=None, decimals=None):
"""Return the subsystem measurement probability dictionary.
Measurement probabilities are with respect to measurement in the
computation (diagonal) basis.
This dictionary representation uses a Ket-like notation where the
dictionary keys are qudit strings for the subsystem basis vectors.
If any subsystem has a dimension greater than 10 comma delimiters are
inserted between integers so that subsystems can be distinguished.
Args:
qargs (None or list): subsystems to return probabilities for,
if None return for all subsystems (Default: None).
decimals (None or int): the number of decimal places to round
values. If None no rounding is done (Default: None).
Returns:
dict: The measurement probabilities in dict (ket) form.
"""
return self._vector_to_dict(
self.probabilities(qargs=qargs, decimals=decimals), self.dims(qargs), string_labels=True
)
def sample_memory(self, shots, qargs=None):
"""Sample a list of qubit measurement outcomes in the computational basis.
Args:
shots (int): number of samples to generate.
qargs (None or list): subsystems to sample measurements for,
if None sample measurement of all
subsystems (Default: None).
Returns:
np.array: list of sampled counts if the order sampled.
Additional Information:
This function *samples* measurement outcomes using the measure
:meth:`probabilities` for the current state and `qargs`. It does
not actually implement the measurement so the current state is
not modified.
The seed for random number generator used for sampling can be
set to a fixed value by using the stats :meth:`seed` method.
"""
# Get measurement probabilities for measured qubits
probs = self.probabilities(qargs)
# Generate list of possible outcome string labels
labels = self._index_to_ket_array(
np.arange(len(probs)), self.dims(qargs), string_labels=True
)
return self._rng.choice(labels, p=probs, size=shots)
def sample_counts(self, shots, qargs=None):
"""Sample a dict of qubit measurement outcomes in the computational basis.
Args:
shots (int): number of samples to generate.
qargs (None or list): subsystems to sample measurements for,
if None sample measurement of all
subsystems (Default: None).
Returns:
Counts: sampled counts dictionary.
Additional Information:
This function *samples* measurement outcomes using the measure
:meth:`probabilities` for the current state and `qargs`. It does
not actually implement the measurement so the current state is
not modified.
The seed for random number generator used for sampling can be
set to a fixed value by using the stats :meth:`seed` method.
"""
# Sample list of outcomes
samples = self.sample_memory(shots, qargs=qargs)
# Combine all samples into a counts dictionary
inds, counts = np.unique(samples, return_counts=True)
return Counts(zip(inds, counts))
def measure(self, qargs=None):
"""Measure subsystems and return outcome and post-measure state.
Note that this function uses the QuantumStates internal random
number generator for sampling the measurement outcome. The RNG
seed can be set using the :meth:`seed` method.
Args:
qargs (list or None): subsystems to sample measurements for,
if None sample measurement of all
subsystems (Default: None).
Returns:
tuple: the pair ``(outcome, state)`` where ``outcome`` is the
measurement outcome string label, and ``state`` is the
collapsed post-measurement state for the corresponding
outcome.
"""
# Sample a single measurement outcome from probabilities
dims = self.dims(qargs)
probs = self.probabilities(qargs)
sample = self._rng.choice(len(probs), p=probs, size=1)
# Format outcome
outcome = self._index_to_ket_array(sample, self.dims(qargs), string_labels=True)[0]
# Convert to projector for state update
proj = np.zeros(len(probs), dtype=complex)
proj[sample] = 1 / np.sqrt(probs[sample])
# Update state object
# TODO: implement a more efficient state update method for
# diagonal matrix multiplication
ret = self.evolve(Operator(np.diag(proj), input_dims=dims, output_dims=dims), qargs=qargs)
return outcome, ret
@staticmethod
def _index_to_ket_array(inds, dims, string_labels=False):
"""Convert an index array into a ket array.
Args:
inds (np.array): an integer index array.
dims (tuple): a list of subsystem dimensions.
string_labels (bool): return ket as string if True, otherwise
return as index array (Default: False).
Returns:
np.array: an array of ket strings if string_label=True, otherwise
an array of ket lists.
"""
shifts = [1]
for dim in dims[:-1]:
shifts.append(shifts[-1] * dim)
kets = np.array([(inds // shift) % dim for dim, shift in zip(dims, shifts)])
if string_labels:
max_dim = max(dims)
char_kets = np.asarray(kets, dtype=np.unicode_)
str_kets = char_kets[0]
for row in char_kets[1:]:
if max_dim > 10:
str_kets = np.char.add(",", str_kets)
str_kets = np.char.add(row, str_kets)
return str_kets.T
return kets.T
@staticmethod
def _vector_to_dict(vec, | |
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import random
from tempfile import TemporaryDirectory
from typing import Optional, List, Tuple
import mxnet as mx
import numpy as np
import pytest
from sockeye import constants as C
from sockeye import data_io
from sockeye import vocab
from sockeye.utils import SockeyeError, get_tokens, seed_rngs
from sockeye.test_utils import tmp_digits_dataset
seed_rngs(12)
define_bucket_tests = [(50, 10, [10, 20, 30, 40, 50]),
(50, 20, [20, 40, 50]),
(50, 50, [50]),
(5, 10, [5]),
(11, 5, [5, 10, 11]),
(19, 10, [10, 19])]
@pytest.mark.parametrize("max_seq_len, step, expected_buckets", define_bucket_tests)
def test_define_buckets(max_seq_len, step, expected_buckets):
buckets = data_io.define_buckets(max_seq_len, step=step)
assert buckets == expected_buckets
define_parallel_bucket_tests = [(50, 50, 10, True, 1.0, [(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)]),
(50, 50, 10, True, 0.5,
[(10, 5), (20, 10), (30, 15), (40, 20), (50, 25), (50, 30), (50, 35), (50, 40),
(50, 45), (50, 50)]),
(10, 10, 10, True, 0.1,
[(10, 2), (10, 3), (10, 4), (10, 5), (10, 6), (10, 7), (10, 8), (10, 9), (10, 10)]),
(10, 5, 10, True, 0.01, [(10, 2), (10, 3), (10, 4), (10, 5)]),
(50, 50, 10, True, 2.0,
[(5, 10), (10, 20), (15, 30), (20, 40), (25, 50), (30, 50), (35, 50), (40, 50),
(45, 50), (50, 50)]),
(5, 10, 10, True, 10.0, [(2, 10), (3, 10), (4, 10), (5, 10)]),
(5, 10, 10, True, 11.0, [(2, 10), (3, 10), (4, 10), (5, 10)]),
(50, 50, 50, True, 0.5, [(50, 25), (50, 50)]),
(50, 50, 50, True, 1.5, [(33, 50), (50, 50)]),
(75, 75, 50, True, 1.5, [(33, 50), (66, 75), (75, 75)]),
(50, 50, 8, False, 1.5, [(8, 8), (16, 16), (24, 24), (32, 32), (40, 40), (48, 48),
(50, 50)]),
(50, 75, 8, False, 1.5, [(8, 8), (16, 16), (24, 24), (32, 32), (40, 40), (48, 48),
(50, 56), (50, 64), (50, 72), (50, 75)])]
@pytest.mark.parametrize("max_seq_len_source, max_seq_len_target, bucket_width, bucket_scaling, length_ratio,"
"expected_buckets", define_parallel_bucket_tests)
def test_define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width, bucket_scaling, length_ratio,
expected_buckets):
buckets = data_io.define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width=bucket_width,
bucket_scaling=bucket_scaling, length_ratio=length_ratio)
assert buckets == expected_buckets
get_bucket_tests = [([10, 20, 30, 40, 50], 50, 50),
([10, 20, 30, 40, 50], 11, 20),
([10, 20, 30, 40, 50], 9, 10),
([10, 20, 30, 40, 50], 51, None),
([10, 20, 30, 40, 50], 1, 10),
([10, 20, 30, 40, 50], 0, 10),
([], 50, None)]
@pytest.mark.parametrize("buckets, length, expected_bucket",
get_bucket_tests)
def test_get_bucket(buckets, length, expected_bucket):
bucket = data_io.get_bucket(length, buckets)
assert bucket == expected_bucket
tokens2ids_tests = [(["a", "b", "c"], {"a": 1, "b": 0, "c": 300, C.UNK_SYMBOL: 12}, [1, 0, 300]),
(["a", "x", "c"], {"a": 1, "b": 0, "c": 300, C.UNK_SYMBOL: 12}, [1, 12, 300])]
@pytest.mark.parametrize("tokens, vocab, expected_ids", tokens2ids_tests)
def test_tokens2ids(tokens, vocab, expected_ids):
ids = data_io.tokens2ids(tokens, vocab)
assert ids == expected_ids
@pytest.mark.parametrize("tokens, expected_ids", [(["1", "2", "3", "0"], [1, 2, 3, 0]), ([], [])])
def test_strids2ids(tokens, expected_ids):
ids = data_io.strids2ids(tokens)
assert ids == expected_ids
@pytest.mark.parametrize("ids, expected_string", [([1, 2, 3, 0], "1 2 3 0"), ([], "")])
def test_ids2strids(ids, expected_string):
string = data_io.ids2strids(ids)
assert string == expected_string
sequence_reader_tests = [(["1 2 3", "2", "", "2 2 2"], False, False, False),
(["a b c", "c"], True, False, False),
(["a b c", ""], True, False, False),
(["a b c", "c"], True, True, True)]
@pytest.mark.parametrize("sequences, use_vocab, add_bos, add_eos", sequence_reader_tests)
def test_sequence_reader(sequences, use_vocab, add_bos, add_eos):
with TemporaryDirectory() as work_dir:
path = os.path.join(work_dir, 'input')
with open(path, 'w') as f:
for sequence in sequences:
print(sequence, file=f)
vocabulary = vocab.build_pruned_vocab(vocab.count_tokens(sequences)) if use_vocab else None
reader = data_io.SequenceReader(path, vocabulary=vocabulary, add_bos=add_bos, add_eos=add_eos)
read_sequences = [s for s in reader]
assert len(read_sequences) == len(sequences)
if vocabulary is None:
with pytest.raises(SockeyeError) as e:
data_io.SequenceReader(path, vocabulary=vocabulary, add_bos=True)
assert str(e.value) == "Adding a BOS or EOS symbol requires a vocabulary"
expected_sequences = [data_io.strids2ids(get_tokens(s)) if s else None for s in sequences]
assert read_sequences == expected_sequences
else:
expected_sequences = [data_io.tokens2ids(get_tokens(s), vocabulary) if s else None for s in sequences]
if add_bos:
expected_sequences = [[vocabulary[C.BOS_SYMBOL]] + s if s else None for s in expected_sequences]
if add_eos:
expected_sequences = [s + [vocabulary[C.EOS_SYMBOL]] if s else None for s in expected_sequences]
assert read_sequences == expected_sequences
@pytest.mark.parametrize("source_iterables, target_iterables",
[
(
[[[0], [1, 1], [2], [3, 3, 3]], [[0], [1, 1], [2], [3, 3, 3]]],
[[[0], [1]]]
),
(
[[[0], [1, 1]], [[0], [1, 1]]],
[[[0], [1, 1], [2], [3, 3, 3]]]
),
(
[[[0], [1, 1]]],
[[[0], [1, 1], [2], [3, 3, 3]]]
),
])
def test_nonparallel_iter(source_iterables, target_iterables):
with pytest.raises(SockeyeError) as e:
list(data_io.parallel_iter(source_iterables, target_iterables))
assert str(e.value) == "Different number of lines in source(s) and target(s) iterables."
@pytest.mark.parametrize("source_iterables, target_iterables",
[
(
[[[0], [1, 1]], [[0], [1]]],
[[[0], [1]]]
)
])
def test_not_source_token_parallel_iter(source_iterables, target_iterables):
with pytest.raises(SockeyeError) as e:
list(data_io.parallel_iter(source_iterables, target_iterables))
assert str(e.value).startswith("Source sequences are not token-parallel")
@pytest.mark.parametrize("source_iterables, target_iterables",
[
(
[[[0], [1]]],
[[[0], [1, 1]], [[0], [1]]],
)
])
def test_not_target_token_parallel_iter(source_iterables, target_iterables):
with pytest.raises(SockeyeError) as e:
list(data_io.parallel_iter(source_iterables, target_iterables))
assert str(e.value).startswith("Target sequences are not token-parallel")
@pytest.mark.parametrize("source_iterables, target_iterables, expected",
[
(
[[[0], [1, 1]], [[0], [1, 1]]],
[[[0], [1]]],
[([[0], [0]], [[0]]), ([[1, 1], [1, 1]], [[1]])]
),
(
[[[0], None], [[0], None]],
[[[0], [1]]],
[([[0], [0]], [[0]])]
),
(
[[[0], [1, 1]], [[0], [1, 1]]],
[[[0], None]],
[([[0], [0]], [[0]])]
),
(
[[None, [1, 1]], [None, [1, 1]]],
[[None, [1]]],
[([[1, 1], [1, 1]], [[1]])]
),
(
[[None, [1]]],
[[None, [1, 1]], [None, [1, 1]]],
[([[1]], [[1, 1], [1, 1]])]
),
(
[[None, [1, 1]], [None, [1, 1]]],
[[None, None]],
[]
)
])
def test_parallel_iter(source_iterables, target_iterables, expected):
assert list(data_io.parallel_iter(source_iterables, target_iterables)) == expected
def test_sample_based_define_bucket_batch_sizes():
batch_type = C.BATCH_TYPE_SENTENCE
batch_size = 32
max_seq_len = 100
buckets = data_io.define_parallel_buckets(max_seq_len, max_seq_len, 10, 1, 1.5)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets=buckets,
batch_size=batch_size,
batch_type=batch_type,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
for bbs in bucket_batch_sizes:
assert bbs.batch_size == batch_size
assert bbs.average_target_words_per_batch == bbs.bucket[1] * batch_size
@pytest.mark.parametrize("batch_num_devices,length_ratio,batch_sentences_multiple_of,expected_batch_sizes", [
# Reference batch sizes manually inspected for sanity. Note that for
# very unbalanced lengths, the last batch can be very large. This is
# due to the requirement for any size batch (total elements) to fit into
# the same allocated space for MXNet's memory sharing.
(1, 0.5, 1, [200.0, 100.0, 67.0, 50.0, 40.0, 33.0, 29.0, 25.0, 22.0, 41.0]),
(2, 0.5, 1, [200.0, 100.0, 66.0, 50.0, 40.0, 34.0, 28.0, 24.0, 22.0, 40.0]),
(8, 0.5, 1, [200.0, 96.0, 64.0, 48.0, 40.0, 32.0, 32.0, 24.0, 24.0, 40.0]),
(1, 1.5, 1, [100.0, 50.0, 33.0, 25.0, 20.0, 20.0, 20.0, 20.0]),
(1, 1.5, 8, [96.0, 48.0, 32.0, 24.0, 16.0, 16.0, 16.0, 24.0])])
def test_word_based_define_bucket_batch_sizes(batch_num_devices, length_ratio, batch_sentences_multiple_of, expected_batch_sizes):
batch_type = C.BATCH_TYPE_WORD
batch_size = 1000
max_seq_len = 50
buckets = data_io.define_parallel_buckets(max_seq_len, max_seq_len, 10, True, length_ratio)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets=buckets,
batch_size=batch_size,
batch_type=batch_type,
batch_num_devices=batch_num_devices,
data_target_average_len=[None] * len(buckets),
batch_sentences_multiple_of=batch_sentences_multiple_of)
max_num_words = 0
# last bucket batch size is different
for bbs, expected_batch_size in zip(bucket_batch_sizes, expected_batch_sizes):
assert bbs.batch_size == expected_batch_size
expected_average_target_words_per_batch = expected_batch_size * bbs.bucket[1]
assert bbs.average_target_words_per_batch == expected_average_target_words_per_batch
max_num_words = max(max_num_words, bbs.batch_size * max(*bbs.bucket))
last_bbs = bucket_batch_sizes[-1]
min_expected_batch_size = round((batch_size / last_bbs.bucket[1]) / batch_num_devices)
assert last_bbs.batch_size >= min_expected_batch_size
last_bbs_num_words = last_bbs.batch_size * max(*last_bbs.bucket)
assert last_bbs_num_words >= max_num_words
@pytest.mark.parametrize("batch_num_devices,length_ratio,batch_sentences_multiple_of,expected_batch_sizes", [
# Reference batch sizes manually inspected for sanity.
(1, 0.5, 1, [200, 100, 66, 50, 40, 33, 28, 25, 22, 20]),
(2, 0.5, 1, [200, 100, 66, 50, 40, 32, 28, 24, 22, 20]),
(8, 0.5, 1, [200, 96, 64, 48, 40, 32, 24, 24, 16, 16]),
(1, 1.5, 1, [100, 50, 33, 25, 20, 20, 20, 20]),
(1, 1.5, 8, [96, 48, 32, 24, 16, 16, 16, 16])])
def test_max_word_based_define_bucket_batch_sizes(batch_num_devices, length_ratio, batch_sentences_multiple_of, expected_batch_sizes):
batch_type = C.BATCH_TYPE_MAX_WORD
batch_size = 1000
max_seq_len = 50
buckets = data_io.define_parallel_buckets(max_seq_len, max_seq_len, 10, True, length_ratio)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets=buckets,
batch_size=batch_size,
batch_type=batch_type,
batch_num_devices=batch_num_devices,
data_target_average_len=[None] * len(buckets),
batch_sentences_multiple_of=batch_sentences_multiple_of)
for bbs, expected_batch_size in zip(bucket_batch_sizes, expected_batch_sizes):
assert bbs.batch_size == expected_batch_size
expected_average_target_words_per_batch = expected_batch_size * bbs.bucket[1]
assert bbs.average_target_words_per_batch == expected_average_target_words_per_batch
def _get_random_bucketed_data(buckets: List[Tuple[int, int]],
min_count: int,
max_count: int,
bucket_counts: Optional[List[Optional[int]]] = None):
"""
| |
<gh_stars>0
import warnings
from typing import Dict, Iterable, Optional
import pandas as pd
import duckdb
from .portals.base_portal import BaseGrouperPortalConstant
from .portals.pricing_portal import PricingPortal
from .tears.base_tear import BaseTear
from .tears.ic_tear import ICHorizonTear, ICTear
from .tears.inspection_tear import InspectionTear
from .tears.tilts_backtest_tear import TiltsBacktestTear
from .tears.turnover_tear import TurnoverTear
class Ntile:
def __init__(self, pricing_portal: PricingPortal, group_portal: Optional[BaseGrouperPortalConstant] = None):
"""
:param pricing_portal: the pricing portal which holds pricing data for all assets with factor values
:param group_portal: group portal which holds grouping information for all assets with factor values
if this is None then no group statistics will be calculated
"""
self._pricing_portal: PricingPortal = pricing_portal
self._group_portal = group_portal
self._factor_data = None
self._ntile_matrix = None
self._formatted_returns = None
def _input_checks(self, factor_series) -> None:
"""
checks the factor series to ensure it meet requirements to run a tearsheet
Requirements:
1) series must have MultiIndex with 2 levels
2) First level must be of type pd.Period
3) PricingPortal must have data for all Period dates in the series
4) There can only be one observations for a single asset on a single day
:param factor_series: the series we are checking
:return: None
:raise ValueError: if one of the requirements are not met
"""
# checking for series with multi index, possibly also check types for multi index
if not isinstance(factor_series.index, pd.MultiIndex) or factor_series.index.nlevels != 2:
raise ValueError('Factor input must have MultiIndex of period, id')
# ensure the index level zero is date
if not isinstance(factor_series.index.get_level_values(0), pd.PeriodIndex):
raise ValueError('Factor input must have MultiIndex with the first level being a period '
f'current factor dtype is {type(factor_series.index.get_level_values(0))}')
# we will check id when looking for overlapping portal names
no_pricing_for = set(factor_series.index.get_level_values(1)).difference(
self._pricing_portal.assets)
if len(no_pricing_for) != 0:
# raise ValueError(f'PricingPortal does not have data for: {no_pricing_for}')
warnings.warn(f'PricingPortal does not have data for: {no_pricing_for}')
# make sure pricing portal dates match up with factor
overlapping_periods = set(factor_series.index.get_level_values(0).drop_duplicates()).intersection(
self._pricing_portal.periods)
if len(overlapping_periods) == 0:
raise ValueError('No overlap between PricingPortal dates and factor dates')
if len(overlapping_periods) < 100:
warnings.warn(f'Only {len(overlapping_periods)} common dates between PricingPortal dates and factor')
# check for multiple observations on a single day for a single asset
if factor_series.index.duplicated().any():
raise ValueError('Multiple factor observations on single day for a single asset')
def _set_ntiles_and_returns(self, factor_data: pd.Series, ntiles: int):
"""
Sets self._formatted_returns and self._formatted_ntile
:param factor_data: the factor data
:param ntiles: amount of ntiles
:return: None
"""
self._ntile_factor_sql(factor_data, ntiles)
self._align_ntiles_pricing()
# can see what % of the dataframe is null here
self._make_null_summary(factor_data)
def _align_ntiles_pricing(self) -> None:
"""
ensures ntiled matrix and daily returns matrix have the same column and row order
sets self._formatted_returns and self._ntile_matrix
:return: None
"""
ntile_factor = self._factor_data['ntile'].unstack()
daily_returns = self._pricing_portal.delta_data
factor_date = ntile_factor.index.get_level_values('date')
self._formatted_returns = daily_returns[(daily_returns.index >= factor_date.min()) &
(daily_returns.index <= factor_date.max())]
# reindexing the ntiles data so that you have pricing and ntiles matching up
self._ntile_matrix = ntile_factor.reindex_like(self._formatted_returns)
def _make_null_summary(self, raw_factor_data) -> None:
"""
making a summary of how much factor data we matched to pricing data
:param raw_factor_data: the raw unstacked factor data
"""
length_og_factor_data = len(raw_factor_data)
# seeing what % of factor data is missing
num_na_data_points = raw_factor_data.isnull().sum()
pct_na_data_points = num_na_data_points / length_og_factor_data
# amount of data droped because of non aligned factor and returns dates:
# above should be non null length of ntiles before reindexing
# non null length of ntiles after indexing
number_of_finite_ntiles = length_og_factor_data - num_na_data_points
binary_if_ntile_data = self._ntile_matrix.notnull()
number_of_finite_ntiles_no_overlap_returns = number_of_finite_ntiles - binary_if_ntile_data.sum().sum()
pct_missing_ntile_no_overlap = number_of_finite_ntiles_no_overlap_returns / number_of_finite_ntiles
# amount of data we dont have returns for given we have overlapping pricing and factor
# should ffill ntile by holdign period since we need return data holding_period days out
binary_if_return_data = self._formatted_returns.notnull()
# should forward fill by holding period to make sure we have pricing for when we will be holding the stock
missing_from_no_returns_given_overlap = (number_of_finite_ntiles
- (binary_if_ntile_data * binary_if_return_data).sum().sum())
pct_missing_data_no_returns_given_overlap = missing_from_no_returns_given_overlap / number_of_finite_ntiles
# total number of unusable factor data points due to null or no maped returns
num_bad = (num_na_data_points
+ number_of_finite_ntiles_no_overlap_returns
+ missing_from_no_returns_given_overlap
)
pct_bad = num_bad / length_og_factor_data
print(f"Unusable Factor Data: {(round(pct_bad, 4)) * 100}%")
print(f"NA Factor Values: {(round(pct_na_data_points, 4)) * 100}%")
print(f"No Overlapping Returns: {(round(pct_missing_ntile_no_overlap, 4)) * 100}%")
print(f"Missing Returns Given Overlap: {(round(pct_missing_data_no_returns_given_overlap, 4)) * 100}%")
def _ntile_factor(self, factor: pd.Series, ntiles: int) -> None:
"""
This is slow replaced by
Universe relative Quantiles of a factor by day _ntile_factor_sql
pd.DataFrame of ntiled factor
index: (pd.Period, _asset_id)
Columns: (factor, ntile)
Values: (factor value, Ntile corresponding to factor value)
:param factor: same var as ntile_return_tearsheet
:param ntiles: same var as ntile_return_tearsheet
"""
# add a filter for if a day has less than 20% factor data then just put bin as -1 for all assets
# unstack the frame, percentile rank each row, divide whole matrix buy 1/ntiles, take the floor of every number
factor = factor[~factor.isnull()].to_frame('factor')
try:
factor['ntile'] = factor.groupby('date').transform(
lambda date_data: ntiles - pd.qcut(date_data, ntiles, labels=False)
).sort_index()
except Exception as e:
print('Hit error while binning data. Need to push the histogram')
print('Your data is mighty sus we can\'t Ntile it. This is normally due to bad data')
# forcing a histogram out
import matplotlib.pyplot as plt
factor.groupby('date').count().plot()
plt.show()
raise e
self._factor_data = factor
def _ntile_factor_sql(self, factor: pd.Series, ntiles: int) -> None:
"""
Universe relative Quantiles of a factor by day
Around 100X faster than pandas groupby qcut
pd.DataFrame of ntiled factor
index: (pd.Period, _asset_id)
Columns: (factor, ntile)
Values: (factor value, Ntile corresponding to factor value)
:param factor: same var as ntile_return_tearsheet
:param ntiles: same var as ntile_return_tearsheet
"""
factor = factor.to_frame('factor').reset_index()
factor['date'] = factor['date'].dt.to_timestamp()
sql_quantile = f"""SELECT *, NTILE({ntiles}) OVER(PARTITION BY date ORDER BY factor.factor DESC) as ntile
FROM factor
WHERE factor.factor IS NOT NULL"""
con = duckdb.connect(':memory:')
factor = con.execute(sql_quantile).df()
factor['date'] = factor['date'].dt.to_period(freq='D')
factor = factor.set_index(['date', 'id'])
self._factor_data = factor
#
# Start up methods
#
def _prep_for_run(self, factor: pd.Series, ntiles: int) -> None:
"""
prepares the ntiles class to run a tear sheet
:param factor: factor for tear sheet
:param ntiles: num ntiles for sheet
:return: None
"""
# checking to see if we have series or data frame
if isinstance(factor, pd.DataFrame):
if factor.shape[1] > 1: # there is a df passed with multible columns
raise ValueError('There are multiple columns in the passed DataFrame')
factor_series = factor.iloc[:, 0]
else:
factor_series = factor.copy()
self._input_checks(factor_series)
factor_series.index.names = ['date', 'id']
self.kick_tears(factor_series, ntiles)
self._print_start_end_dates()
def _print_start_end_dates(self):
"""
prints the start and end date of the backtest
"""
date = self._factor_data.index.get_level_values(0)
print(f'\nStart Date: {date.min()}')
print(f'End Date: {date.max()}\n')
def kick_tears(self, factor_series: pd.Series, ntiles: int) -> None:
"""
Clears the object of all factor and tear data.
Reruns Ntiling of factor
:param factor_series: the user passed factor
:param ntiles: the number of ntiles
:return: None
"""
self._clear()
self._set_ntiles_and_returns(factor_series, ntiles)
def _clear(self) -> None:
"""
clears all data points in the object except the pricing portal
:return: None
"""
self._factor_data = None
self._ntile_matrix = None
self._formatted_returns = None
@staticmethod
def _run(tears: Dict[str, BaseTear]) -> None:
"""
Runs all tear sheets that are set in the class
:return: None
"""
for tear in tears.values():
tear.compute_plot()
#
# Tear Sheets Below
#
def full_tear(self, factor: pd.Series, ntiles: int, holding_period: int, long_short: bool = True,
market_neutral=True, show_uni=False, show_ntile_tilts=False) -> Dict[str, BaseTear]:
"""
Creates basic visualizations of the factor data distribution by ntile and how complete the data is
Creates a fan chart of cumulative returns for the given factor values.
Creates a IC time series for the factor value and the forward returns
Createa a turnover sheet showing how often the factor data will turn over
The in the cumulative return plot, each value represents the cumulative return up to that days close.
Returns are not shifted each value represents portfolios value on the close of that day.
A set of weights is generated for each day based off factor quantile.
The portfolio is rebalanced daily, each days 1/holding_period of the portfolio is rebalanced.
All positions are equally weighted.
:param factor: The factor values being tested.
index: (pd.Period, | |
"""
Tests for EntityData generic list view
This test suite focuses on listing of record fields used by
record views and lists. This serves two purposes:
- it tests some additional options of the entity list logic
that are not tested by the dfeault list view, and
- it tests the logic that access site-wide data in addition to
local data.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
import unittest
from django.conf import settings
from django.db import models
from django.http import QueryDict
from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions
from django.test.client import Client
from utils.py3porting import urlparse, urljoin
from utils.SuppressLoggingContext import SuppressLogging
from annalist import layout
from annalist import message
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist.util import extract_entity_id
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitydata import EntityData
from annalist.models.entityfinder import EntityFinder
from annalist.models.entitytypeinfo import EntityTypeInfo
from annalist.views.uri_builder import (
uri_quote_param,
uri_params, uri_with_params,
continuation_params_url
)
from annalist.views.entitylist import EntityGenericListView
from annalist.views.form_utils.fieldchoice import FieldChoice
from .AnnalistTestCase import AnnalistTestCase
from .tests import (
test_layout,
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .init_tests import (
init_annalist_test_site, init_annalist_test_coll, resetSitedata
)
from .entity_testutils import (
make_message, make_quoted_message,
site_dir, collection_dir,
entitydata_list_url_query,
site_view_url,
collection_view_url, collection_edit_url,
continuation_url_param,
confirm_delete_params,
collection_create_values,
site_title,
create_test_user, create_user_permissions,
context_view_field,
# context_bind_fields
context_list_entities,
context_list_head_fields, context_list_item_fields,
context_list_item_field, context_list_item_field_value,
check_field_list_context_fields,
)
from .entity_testtypedata import (
recordtype_dir,
recordtype_url,
recordtype_create_values,
)
from .entity_testentitydata import (
recorddata_dir, entitydata_dir,
entity_url, entitydata_edit_url, entitydata_delete_confirm_url,
entitydata_list_type_url, entitydata_list_all_url,
entitydata_value_keys, entitydata_create_values, entitydata_values,
entitydata_delete_confirm_form_data,
entitylist_form_data
)
from .entity_testsitedata import (
make_field_choices, no_selection,
get_site_types, get_site_types_sorted, get_site_types_linked,
get_site_lists, get_site_lists_sorted, get_site_lists_linked,
get_site_views, get_site_views_sorted, get_site_views_linked,
get_site_list_types, get_site_list_types_sorted,
get_site_field_groups, get_site_field_groups_sorted,
get_site_fields, get_site_fields_sorted,
get_site_field_types, get_site_field_types_sorted,
get_site_entities, get_site_entities_sorted,
)
from .entity_testlistdata import (
recordlist_url,
num_testcoll_enumerate_all_entities, num_testcoll_all_entities_scope_all
)
# -----------------------------------------------------------------------------
#
# EntityDefaultListView tests
#
# -----------------------------------------------------------------------------
class EntityGenericListViewTest(AnnalistTestCase):
"""
Tests for record type edit views
"""
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.testcoll = Collection.create(self.testsite, "testcoll", collection_create_values("testcoll"))
self.testtype = RecordType.create(self.testcoll, "testtype", recordtype_create_values("testcoll", "testtype"))
self.testtype2 = RecordType.create(self.testcoll, "testtype2", recordtype_create_values("testcoll", "testtype2"))
self.testdata = RecordTypeData.create(self.testcoll, "testtype", {})
self.testdata2 = RecordTypeData.create(self.testcoll, "testtype2", {})
# self.user = User.objects.create_user('testuser', '<EMAIL>', '<PASSWORD>')
# self.user.save()
create_test_user(self.testcoll, "testuser", "testpassword")
self.client = Client(HTTP_HOST=TestHost)
loggedin = self.client.login(username="testuser", password="<PASSWORD>")
self.assertTrue(loggedin)
e1 = self._create_entity_data("entity1")
e2 = self._create_entity_data("entity2")
e3 = self._create_entity_data("entity3")
e4 = EntityData.create(self.testdata2, "entity4",
entitydata_create_values("entity4", type_id="testtype2")
)
self.list_ids = get_site_lists_linked("testcoll")
return
def tearDown(self):
# resetSitedata()
return
@classmethod
def tearDownClass(cls):
resetSitedata(scope="collections")
return
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def _create_entity_data(self, entity_id, update="Entity"):
"Helper function creates entity data with supplied entity_id"
e = EntityData.create(self.testdata, entity_id,
entitydata_create_values(entity_id, update=update)
)
return e
# -----------------------------------------------------------------------------
# Form rendering tests
# -----------------------------------------------------------------------------
def test_EntityDefaultListView(self):
self.assertEqual(EntityGenericListView.__name__, "EntityGenericListView", "Check EntityGenericListView class name")
return
def test_enumerate_all_entities(self):
# Test enumeration of all collection and site entities
# Introduced to facilitate debugging of site data storage rework
entity_list = (
EntityFinder(self.testcoll, selector="ALL")
.get_entities_sorted(type_id=None, altscope="all",
user_permissions=None,
context={},
search=""
)
)
actual_entity_ids = [ "%s/%s"%(e.get_type_id(), e.get_id()) for e in entity_list ]
# log.debug("@@ actual_entity_ids: \n"+"\n".join([repr(eti) for eti in actual_entity_ids]))
self.assertEqual(len(actual_entity_ids), num_testcoll_enumerate_all_entities) # Will change with site data
expect_entities = get_site_entities_sorted()
expect_entity_ids = [ fc.id for fc in expect_entities ]
# log.debug("@@ actual_entity_ids: \n"+"\n".join([ repr(eti) for eti in actual_entity_ids[145:] ]))
# log.debug("@@ expect_entity_ids: \n"+"\n".join([ repr(eti) for eti in expect_entity_ids[145:] ]))
self.assertEqual(actual_entity_ids, expect_entity_ids)
return
def test_enumerate_value_modes(self):
# Test enumeration of value modes (tests enumeration type listing)
# Introduced to facilitate debugging of site data storage rework
entity_list = (
EntityFinder(self.testcoll, selector="ALL")
.get_entities_sorted(type_id="_enum_value_mode", altscope="all",
user_permissions=None,
context={},
search=""
)
)
# Enumerate enumeration types
entity_types_ids = [ (e.get_type_id(), e.get_id()) for e in entity_list ]
# log.info("@@ entity_types_ids: \n"+"\n".join([repr(eti) for eti in entity_types_ids]))
self.assertEqual(len(entity_types_ids), 5)
return
def test_get_default_all_list(self):
# List all entities in current collection
u = entitydata_list_all_url("testcoll", list_id="Default_list_all") + "?continuation_url=/xyzzy/"
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
list_label = "List entities with type information"
list_title = "List entities with type information - Collection testcoll"
self.assertContains(r, "<title>%s</title>"%list_title, html=True)
self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True)
self.assertMatch(r.content, r'<input.type="hidden".name="continuation_url".+value="/xyzzy/"/>')
# log.info(r.content) #@@
cont = uri_params({"continuation_url": u})
tooltip1 = "" # 'title="%s"'%r.context['fields'][0]['field_help']
tooltip2 = "" # 'title="%s"'%r.context['fields'][1]['field_help']
tooltip3 = "" # 'title="%s"'%r.context['fields'][2]['field_help']
rowdata = """
<div class="tbody row select-row">
<div class="small-1 columns">
<input type="checkbox" class="select-box right" name="entity_select"
value="testtype/entity1" />
</div>
<div class="small-11 columns">
<div class="row view-listrow">
<div class="view-value small-3 columns" %(tooltip1)s>
<a href="%(base)s/c/testcoll/d/testtype/entity1/%(cont)s">entity1</a>
</div>
<div class="view-value small-2 columns" %(tooltip2)s>
<a href="/testsite/c/testcoll/d/_type/testtype/%(cont)s">RecordType testcoll/_type/testtype</a>
</div>
<div class="view-value small-7 columns" %(tooltip3)s>
<span>Entity testcoll/testtype/entity1</span>
</div>
</div>
</div>
</div>
"""%(
{ 'base': TestBasePath
, 'cont': cont
, 'tooltip1': tooltip1
, 'tooltip2': tooltip2
, 'tooltip3': tooltip3
}
)
# log.info(r.content)
# log.info(r.context["fields"])
# log.info(r.context["List_rows"])
self.assertContains(r, rowdata, html=True)
# Test context
self.assertEqual(r.context['title'], list_title)
self.assertEqual(r.context['heading'], list_label)
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], None)
self.assertEqual(r.context['continuation_url'], "/xyzzy/")
list_choices = r.context['list_choices']
self.assertEqual(set(list_choices.options), set(self.list_ids))
self.assertEqual(list_choices['field_value'], "Default_list_all")
# Unbound field descriptions
head_fields = context_list_head_fields(r.context)
self.assertEqual(len(head_fields), 1) # One row of 3 cols..
self.assertEqual(len(head_fields[0].description['row_field_descs']), 3)
f0 = context_view_field(r.context, 0, 0)
f1 = context_view_field(r.context, 0, 1)
f2 = context_view_field(r.context, 0, 2)
self.assertEqual(f0.field_id, 'Entity_id')
self.assertEqual(f1.field_id, 'Entity_type')
self.assertEqual(f2.field_id, 'Entity_label')
# Entities and bound fields
# log.info(entities) #@@
entities = context_list_entities(r.context)
self.assertEqual(len(entities), 6)
entity_fields = (
[ {'entity_type_id': "_type", 'annal:id': "testtype", 'rdfs:label': "RecordType testcoll/_type/testtype"}
, {'entity_type_id': "_type", 'annal:id': "testtype2", 'rdfs:label': "RecordType testcoll/_type/testtype2"}
, {'entity_type_id': "testtype", 'annal:id': "entity1", 'rdfs:label': "Entity testcoll/testtype/entity1"}
, {'entity_type_id': "testtype", 'annal:id': "entity2", 'rdfs:label': "Entity testcoll/testtype/entity2"}
, {'entity_type_id': "testtype", 'annal:id': "entity3", 'rdfs:label': "Entity testcoll/testtype/entity3"}
, {'entity_type_id': "testtype2", 'annal:id': "entity4", 'rdfs:label': "Entity testcoll/testtype2/entity4"}
])
field_keys = ('annal:id', 'entity_type_id', 'rdfs:label')
for eid in range(6):
item_fields = context_list_item_fields(r.context, entities[eid])
for fid in range(3):
item_field = item_fields[fid]
head_field = head_fields[0].description['row_field_descs'][fid]
# Check that row field descriptions match corresponding heading feld descriptions
for fkey in (
'field_id', 'field_name', 'field_label',
'field_property_uri', 'field_render_type',
'field_placement', 'field_value_type'):
self.assertEqual(item_field.description[fkey], head_field[fkey])
# Check row field values
fkey = field_keys[fid]
self.assertEqual(item_field['field_value'], entity_fields[eid][fkey])
self.assertEqual(item_field['entity_type_id'], entity_fields[eid]['entity_type_id'])
return
def test_get_default_all_scope_all_list(self):
# List all entities in current collection and site-wide
# This repeats parts of the previous test but with scope='all'
u = entitydata_list_all_url(
"testcoll", list_id="Default_list_all",
scope="all", continuation_url="/xyzzy/"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
list_label = "List entities with type information"
list_title = "List entities with type information - Collection testcoll"
self.assertContains(r, "<title>%s</title>"%list_title, html=True)
self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True)
# Test context
self.assertEqual(r.context['title'], list_title)
self.assertEqual(r.context['heading'], list_label)
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], None)
list_choices = r.context['list_choices']
self.assertEqual(set(list_choices.options), set(self.list_ids))
self.assertEqual(list_choices['field_value'], "Default_list_all")
# Unbound field descriptions
head_fields = context_list_head_fields(r.context)
self.assertEqual(len(head_fields), 1) # One row of 3 cols..
self.assertEqual(len(head_fields[0].description['row_field_descs']), 3)
f0 = context_view_field(r.context, 0, 0)
f1 = context_view_field(r.context, 0, 1)
f2 = context_view_field(r.context, 0, 2)
self.assertEqual(f0.field_id, 'Entity_id')
self.assertEqual(f1.field_id, 'Entity_type')
self.assertEqual(f2.field_id, 'Entity_label')
# Entities and bound fields
entities = context_list_entities(r.context)
# listed_entities = { e['entity_id']: e for e in entities }
# for eid in listed_entities:
# print "@@ eid %s"%(eid)
self.assertEqual(len(entities), num_testcoll_all_entities_scope_all) # Will change with site data
return
def test_get_types_list(self):
# List types in current collection
u = entitydata_list_type_url(
"testcoll", "_type", list_id="Type_list", scope=None
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# log.info(r.content) #@@
list_label = "Entity types"
list_title = "Entity types - Collection testcoll"
self.assertContains(r, "<title>%s</title>"%list_title, html=True)
self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True)
# Test context
self.assertEqual(r.context['title'], list_title)
self.assertEqual(r.context['heading'], list_label)
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], "_type")
# Fields
head_fields = context_list_head_fields(r.context)
self.assertEqual(len(head_fields), 1) # One row of 3 cols..
self.assertEqual(len(head_fields[0].description['row_field_descs']), 3)
f0 = context_view_field(r.context, 0, 0)
f1 = context_view_field(r.context, 0, 1)
f2 = context_view_field(r.context, 0, 2)
# 1st field
self.assertEqual(f0.field_id, 'Entity_id')
self.assertEqual(f0.field_name, 'entity_id')
# 2nd field
self.assertEqual(f1.field_id, 'Type_uri')
self.assertEqual(f1.field_name, 'Type_uri')
# 3rd field
self.assertEqual(f2.field_id, 'Entity_label')
self.assertEqual(f2.field_name, 'Entity_label')
# Entities
entities = context_list_entities(r.context)
listed_entities = { e['entity_id']: e for e in entities }
# self.assertIn('_initial_values', listed_entities)
type_entities = {"testtype", "testtype2"}
self.assertEqual(set(listed_entities.keys()), type_entities)
return
def test_get_types_scope_all_list(self):
# List types in current collection and site-wide
u = entitydata_list_type_url(
"testcoll", "_type", list_id="Type_list", scope="all"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# log.info(r.content) #@@
list_label = "Entity types"
list_title = "Entity types - Collection testcoll"
self.assertContains(r, "<title>%s</title>"%list_title, html=True)
self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True)
# Test context
self.assertEqual(r.context['title'], list_title)
self.assertEqual(r.context['heading'], list_label)
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], "_type")
# Fields
head_fields = context_list_head_fields(r.context)
self.assertEqual(len(head_fields), 1) # One row of 2 cols..
self.assertEqual(len(head_fields[0].description['row_field_descs']), 3)
f0 = context_view_field(r.context, 0, 0)
f1 = context_view_field(r.context, 0, 1)
f2 = context_view_field(r.context, 0, 2)
# 1st field
| |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import pytest
import torch
import pydrobert.torch.command_line as command_line
@pytest.mark.cpu
@pytest.mark.parametrize("include_frame_shift", [True, False])
def test_get_torch_spect_data_dir_info(
temp_dir, populate_torch_dir, include_frame_shift
):
_, alis, _, feat_sizes, _, _ = populate_torch_dir(
temp_dir, 19, num_filts=5, max_class=10, include_frame_shift=include_frame_shift
)
# add one with class idx 10 to ensure all classes are accounted for
torch.save(torch.rand(1, 5), os.path.join(temp_dir, "feat", "utt19.pt"))
torch.save(torch.tensor([10]), os.path.join(temp_dir, "ali", "utt19.pt"))
if include_frame_shift:
torch.save(
torch.tensor([[100, 0, 1]]), os.path.join(temp_dir, "ref", "utt19.pt")
)
else:
torch.save(torch.tensor([100]), os.path.join(temp_dir, "ref", "utt19.pt"))
feat_sizes += (1,)
alis = torch.cat(alis + [torch.tensor([10])])
alis = [class_idx.item() for class_idx in alis]
table_path = os.path.join(temp_dir, "info")
assert not command_line.get_torch_spect_data_dir_info(
[temp_dir, table_path, "--strict"]
)
def check():
table = dict()
with open(table_path) as table_file:
for line in table_file:
line = line.split()
table[line[0]] = int(line[1])
assert table["num_utterances"] == 20
assert table["total_frames"] == sum(feat_sizes)
assert table["num_filts"] == 5
assert table["max_ali_class"] == 10
assert table["max_ref_class"] == 100
for class_idx in range(11):
key = "count_{:02d}".format(class_idx)
assert table[key] == alis.count(class_idx)
check()
if include_frame_shift:
# ensure we're only looking at the ids in the recorded refs
torch.save(
torch.tensor([[100, 0, 101]]), os.path.join(temp_dir, "ref", "utt19.pt")
)
assert not command_line.get_torch_spect_data_dir_info([temp_dir, table_path])
table = dict()
with open(table_path) as table_file:
for line in table_file:
line = line.split()
table[line[0]] = int(line[1])
assert table["max_ref_class"] == 100
# invalidate the data set and try again
torch.save(
torch.tensor([[100, 0, 1]]).int(), os.path.join(temp_dir, "ref", "utt19.pt")
)
with pytest.raises(ValueError, match="long tensor"):
command_line.get_torch_spect_data_dir_info(
[temp_dir, table_path, "--strict"]
)
# ...but the problem is fixable. So if we set the flag...
with pytest.warns(UserWarning, match="long tensor"):
command_line.get_torch_spect_data_dir_info([temp_dir, table_path, "--fix"])
check()
# ...it shouldn't happen again
command_line.get_torch_spect_data_dir_info([temp_dir, table_path, "--strict"])
check()
def _write_token2id(path, swap, collapse_vowels=False):
vowels = {ord(x) for x in "aeiou"}
with open(path, "w") as f:
for v in range(ord("a"), ord("z") + 1):
if swap:
if collapse_vowels and v in vowels:
f.write("{} a\n".format(v - ord("a")))
else:
f.write("{} {}\n".format(v - ord("a"), chr(v)))
else:
assert not collapse_vowels
f.write("{} {}\n".format(chr(v), v - ord("a")))
@pytest.mark.cpu
@pytest.mark.parametrize("tokens", ["token2id", "id2token"])
@pytest.mark.parametrize(
"skip_frame_times,feat_sizing", [(True, False), (False, True), (False, False)]
)
def test_trn_to_torch_token_data_dir(temp_dir, tokens, skip_frame_times, feat_sizing):
trn_path = os.path.join(temp_dir, "ref.trn")
tokens_path = os.path.join(temp_dir, "token2id")
ref_dir = os.path.join(temp_dir, "ref")
_write_token2id(tokens_path, tokens == "id2token")
with open(trn_path, "w") as trn:
trn.write(
"""\
a b b c (utt1)
(utt2)
d { e / f } g (utt3)
{{{h / i} / j} / k} (utt4)
A a (utt5)
"""
)
with warnings.catch_warnings(record=True):
assert not command_line.trn_to_torch_token_data_dir(
[
trn_path,
tokens_path,
ref_dir,
"--alt-handler=first",
"--unk-symbol=c",
"--chunk-size=1",
]
+ (["--swap"] if tokens == "id2token" else [])
+ (["--skip-frame-times"] if skip_frame_times else [])
+ (["--feat-sizing"] if feat_sizing else [])
)
exp_utt1 = torch.tensor([0, 1, 1, 2])
exp_utt3 = torch.tensor([3, 4, 6])
exp_utt4 = torch.tensor([7])
exp_utt5 = torch.tensor([2, 0])
if feat_sizing:
exp_utt1 = exp_utt1.unsqueeze(-1)
exp_utt3 = exp_utt3.unsqueeze(-1)
exp_utt4 = exp_utt4.unsqueeze(-1)
exp_utt5 = exp_utt5.unsqueeze(-1)
elif not skip_frame_times:
neg1_tensor = torch.tensor([[-1, -1]] * 10)
exp_utt1 = torch.cat([exp_utt1.unsqueeze(-1), neg1_tensor[:4]], -1)
exp_utt3 = torch.cat([exp_utt3.unsqueeze(-1), neg1_tensor[:3]], -1)
exp_utt4 = torch.cat([exp_utt4.unsqueeze(-1), neg1_tensor[:1]], -1)
exp_utt5 = torch.cat([exp_utt5.unsqueeze(-1), neg1_tensor[:2]], -1)
act_utt1 = torch.load(os.path.join(ref_dir, "utt1.pt"))
assert exp_utt1.shape == act_utt1.shape
assert torch.all(act_utt1 == exp_utt1)
act_utt2 = torch.load(os.path.join(ref_dir, "utt2.pt"))
assert not act_utt2.numel()
act_utt3 = torch.load(os.path.join(ref_dir, "utt3.pt"))
assert exp_utt3.shape == act_utt3.shape
assert torch.all(act_utt3 == exp_utt3)
act_utt4 = torch.load(os.path.join(ref_dir, "utt4.pt"))
assert exp_utt4.shape == act_utt4.shape
assert torch.all(act_utt4 == exp_utt4)
act_utt5 = torch.load(os.path.join(ref_dir, "utt5.pt"))
assert exp_utt5.shape == act_utt5.shape
assert torch.all(act_utt5 == exp_utt5)
@pytest.mark.cpu
@pytest.mark.parametrize("tokens", ["token2id", "id2token"])
@pytest.mark.parametrize("include_frame_shift", [True, False])
def test_torch_token_data_dir_to_trn(temp_dir, tokens, include_frame_shift):
torch.manual_seed(1000)
num_utts = 100
max_tokens = 10
num_digits = torch.log10(torch.tensor(float(num_utts))).long().item() + 1
utt_fmt = "utt{{:0{}d}}".format(num_digits)
trn_path = os.path.join(temp_dir, "ref.trn")
tokens_path = os.path.join(temp_dir, "id2token")
ref_dir = os.path.join(temp_dir, "ref")
_write_token2id(tokens_path, tokens == "id2token")
if not os.path.isdir(ref_dir):
os.makedirs(ref_dir)
exps = []
for utt_idx in range(num_utts):
utt_id = utt_fmt.format(utt_idx)
num_tokens = torch.randint(max_tokens + 1, (1,)).long().item()
ids = torch.randint(26, (num_tokens,)).long()
if include_frame_shift:
tok = torch.stack([ids] + ([torch.full_like(ids, -1)] * 2), -1)
else:
tok = ids
torch.save(tok, os.path.join(ref_dir, utt_id + ".pt"))
transcript = " ".join([chr(x + ord("a")) for x in ids.tolist()])
transcript += " ({})".format(utt_id)
exps.append(transcript)
assert not command_line.torch_token_data_dir_to_trn(
[ref_dir, tokens_path, trn_path] + (["--swap"] if tokens == "token2id" else [])
)
with open(trn_path, "r") as trn:
acts = trn.readlines()
assert len(exps) == len(acts)
for exp, act in zip(exps, acts):
assert exp.strip() == act.strip()
def _write_wc2utt(path, swap, chan, num_utts):
num_digits = torch.log10(torch.tensor(float(num_utts))).long().item() + 1
idx_fmt = "{{0:0{}d}}".format(num_digits)
if swap:
fmt = "u_{0} w_{0} {{1}}\n".format(idx_fmt)
else:
fmt = "w_{0} {{1}} u_{0}\n".format(idx_fmt)
with open(path, "w") as f:
for utt_idx in range(num_utts):
f.write(fmt.format(utt_idx, chan))
@pytest.mark.cpu
@pytest.mark.parametrize("tokens", ["token2id", "id2token"])
@pytest.mark.parametrize("channels", ["wc2utt", "utt2wc", None])
def test_ctm_to_torch_token_data_dir(temp_dir, tokens, channels):
ctm_path = os.path.join(temp_dir, "ref.ctm")
tokens_path = os.path.join(temp_dir, tokens)
channels_path = os.path.join(temp_dir, channels) if channels else None
ref_dir = os.path.join(temp_dir, "ref")
_write_token2id(tokens_path, tokens == "id2token")
if channels:
_write_wc2utt(channels_path, channels == "utt2wc", "A", 5)
with open(ctm_path, "w") as ctm:
ctm.write(
"""\
;; some text
w_1 A 0.1 1.0 a
w_1 A 0.2 1.0 b
w_1 A 0.3 1.0 c ;; ignore this comment
w_2 A 0.0 0.0 b
w_3 A 0.0 1000.0 d
w_3 A 1.0 0.1 d
w_4 A 0.0 2.0 Z
w_4 A 0.1 1.1 a
"""
)
args = [ctm_path, tokens_path, ref_dir, "--unk-symbol=a"]
if tokens == "id2token":
args.append("--swap")
if channels == "utt2wc":
args.append("--utt2wc={}".format(channels_path))
elif channels == "wc2utt":
args.append("--wc2utt={}".format(channels_path))
assert not command_line.ctm_to_torch_token_data_dir(args)
act_utt1 = torch.load(os.path.join(ref_dir, "u_1.pt" if channels else "w_1.pt"))
assert torch.all(
act_utt1 == torch.tensor([[0, 10, 110], [1, 20, 120], [2, 30, 130]])
)
act_utt2 = torch.load(os.path.join(ref_dir, "u_2.pt" if channels else "w_2.pt"))
assert torch.all(act_utt2 == torch.tensor([[1, 0, 0]]))
act_utt3 = torch.load(os.path.join(ref_dir, "u_3.pt" if channels else "w_3.pt"))
assert torch.all(act_utt3 == torch.tensor([[3, 0, 100000], [3, 100, 110]]))
act_utt4 = torch.load(os.path.join(ref_dir, "u_4.pt" if channels else "w_4.pt"))
assert torch.all(act_utt4 == torch.tensor([[0, 0, 200], [0, 10, 120]]))
@pytest.mark.cpu
@pytest.mark.parametrize("tokens", ["token2id", "id2token"])
@pytest.mark.parametrize("channels", ["wc2utt", "utt2wc", None])
@pytest.mark.parametrize("frame_shift_ms", [20.0, 0.1])
def test_torch_token_data_dir_to_ctm(temp_dir, tokens, channels, frame_shift_ms):
torch.manual_seed(420)
ctm_path = os.path.join(temp_dir, "ref.ctm")
tokens_path = os.path.join(temp_dir, tokens)
channels_path = os.path.join(temp_dir, channels) if channels else None
ref_dir = os.path.join(temp_dir, "ref")
num_utts, max_tokens, max_start, max_dur = 100, 10, 1000, 100
max_tokens = 10
num_digits = torch.log10(torch.tensor(float(num_utts))).long().item() + 1
utt_fmt = "u_{{:0{}d}}".format(num_digits)
wfn_fmt = "{}_{{:0{}d}}".format("w" if channels else "u", num_digits)
_write_token2id(tokens_path, tokens == "id2token")
if channels:
_write_wc2utt(channels_path, channels == "utt2wc", "A", num_utts)
if not os.path.isdir(ref_dir):
os.makedirs(ref_dir)
exps = []
for utt_idx in range(num_utts):
utt_id = utt_fmt.format(utt_idx)
wfn_id = wfn_fmt.format(utt_idx)
num_tokens = torch.randint(max_tokens + 1, (1,)).long().item()
ids = torch.randint(26, (num_tokens,)).long()
starts = torch.randint(max_start, (num_tokens,)).long()
durs = torch.randint(max_dur, (num_tokens,)).long()
ends = starts + durs
tok = torch.stack([ids, starts, ends], -1)
torch.save(tok, os.path.join(ref_dir, utt_id + ".pt"))
for token, start, end in sorted(tok.tolist(), key=lambda x: x[1:]):
start = start * frame_shift_ms / 1000
end = end * frame_shift_ms / 1000
exps.append(
"{} A {} {} {}".format(
wfn_id, start, end - start, chr(token + ord("a"))
)
)
args = [
ref_dir,
tokens_path,
ctm_path,
"--frame-shift-ms={}".format(frame_shift_ms),
]
if tokens == "token2id":
args.append("--swap")
if channels == "utt2wc":
args.append("--utt2wc={}".format(channels_path))
elif channels == "wc2utt":
args.append("--wc2utt={}".format(channels_path))
assert not command_line.torch_token_data_dir_to_ctm(args)
with open(ctm_path, "r") as ctm:
acts = ctm.readlines()
assert len(exps) == len(acts)
for exp, act in zip(exps, acts):
assert exp.strip() == act.strip()
@pytest.mark.cpu
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("per_utt", [True, False])
@pytest.mark.parametrize(
"tokens,collapse_vowels",
[("token2id", False), ("id2token", True), ("id2token", False), (None, False)],
)
@pytest.mark.parametrize("norm", [True, False])
@pytest.mark.parametrize("with_timing", [True, False])
def test_compute_torch_token_data_dir_error_rates(
temp_dir, per_utt, tokens, collapse_vowels, norm, with_timing
):
torch.manual_seed(3820)
tokens_path = os.path.join(temp_dir, "map")
ignore_path = os.path.join(temp_dir, "ignore")
replace_path = os.path.join(temp_dir, "replace")
out_path = os.path.join(temp_dir, "out")
ref_dir = os.path.join(temp_dir, "ref")
hyp_dir = os.path.join(temp_dir, "hyp")
if not os.path.isdir(ref_dir):
os.makedirs(ref_dir)
if not os.path.isdir(hyp_dir):
os.makedirs(hyp_dir)
num_elem = 40
missing_prob = 0.1
max_fillers = 5
ignore_chars = "_#"
replace_chars = "*/"
with open(ignore_path, "w") as f:
if tokens is None:
f.write(" ".join([str(ord(c) - ord("a")) for c in ignore_chars]))
else:
f.write(" ".join(ignore_chars))
f.flush()
with open(replace_path, "w") as f:
for c in replace_chars:
if tokens is None:
f.write(
"{} {}\n".format(ord(c) - ord("a"), ord(ignore_chars[0]) - ord("a"))
)
else:
f.write("{} {}\n".format(c, ignore_chars[0]))
ignore_chars += replace_chars
tuples = (
("cat", "bat", 1, 1),
("transubstantiation", "transwhatnow", 10, 10),
("cool", "coal", 1, 0),
("zap", "zippy", 3, 2),
| |
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = Wav2Vec2ConformerSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
class Wav2Vec2ConformerRotaryPositionalEmbedding(nn.Module):
"""Rotary positional embedding
Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf
"""
def __init__(self, config):
super().__init__()
dim = config.hidden_size // config.num_attention_heads
base = config.rotary_embedding_base
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.cached_sequence_length = None
self.cached_rotary_positional_embedding = None
def forward(self, hidden_states):
sequence_length = hidden_states.shape[1]
if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
return self.cached_rotary_positional_embedding
self.cached_sequence_length = sequence_length
time_stamps = torch.arange(sequence_length).type_as(self.inv_freq)
freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
embeddings = torch.cat((freqs, freqs), dim=-1)
cos_embeddings = embeddings.cos()[:, None, None, :]
sin_embeddings = embeddings.sin()[:, None, None, :]
self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings])
return self.cached_rotary_positional_embedding
class Wav2Vec2ConformerRelPositionalEmbedding(nn.Module):
"""Relative positional encoding module."""
def __init__(self, config):
super().__init__()
self.max_len = config.max_source_positions
self.d_model = config.hidden_size
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, self.max_len))
def extend_pe(self, x):
# Reset the positional encodings
if self.pe is not None:
# self.pe contains both positive and negative parts
# the length of self.pe is 2 * input_len - 1
if self.pe.size(1) >= x.size(1) * 2 - 1:
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
# Suppose `i` is the position of query vector and `j` is the
# position of key vector. We use positive relative positions when keys
# are to the left (i>j) and negative relative positions otherwise (i<j).
pe_positive = torch.zeros(x.size(1), self.d_model)
pe_negative = torch.zeros(x.size(1), self.d_model)
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model)
)
pe_positive[:, 0::2] = torch.sin(position * div_term)
pe_positive[:, 1::2] = torch.cos(position * div_term)
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
# Reverse the order of positive indices and concat both positive and
# negative indices. This is used to support the shifting trick
# as in https://arxiv.org/abs/1901.02860
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
pe_negative = pe_negative[1:].unsqueeze(0)
pe = torch.cat([pe_positive, pe_negative], dim=1)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, hidden_states: torch.Tensor):
self.extend_pe(hidden_states)
start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1
end_idx = self.pe.size(1) // 2 + hidden_states.size(1)
relative_position_embeddings = self.pe[:, start_idx:end_idx]
return relative_position_embeddings
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [Wav2Vec2ConformerGroupNormConvLayer(config, layer_id=0)] + [
Wav2Vec2ConformerNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
Wav2Vec2ConformerLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Wav2Vec2Conformer
class Wav2Vec2ConformerFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
class Wav2Vec2ConformerConvolutionModule(nn.Module):
"""Convolution block used in the conformer block"""
def __init__(self, config):
super().__init__()
if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.pointwise_conv1 = torch.nn.Conv1d(
config.hidden_size,
2 * config.hidden_size,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.glu = torch.nn.GLU(dim=1)
self.depthwise_conv = torch.nn.Conv1d(
config.hidden_size,
config.hidden_size,
config.conv_depthwise_kernel_size,
stride=1,
padding=(config.conv_depthwise_kernel_size - 1) // 2,
groups=config.hidden_size,
bias=False,
)
self.batch_norm = torch.nn.BatchNorm1d(config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.pointwise_conv2 = torch.nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.dropout = torch.nn.Dropout(config.conformer_conv_dropout)
def forward(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
# exchange the temporal dimension and the feature dimension
hidden_states = hidden_states.transpose(1, 2)
# GLU mechanism
# => (batch, 2*channel, dim)
hidden_states = self.pointwise_conv1(hidden_states)
# => (batch, channel, dim)
hidden_states = self.glu(hidden_states)
# 1D Depthwise Conv
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.batch_norm(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
class Wav2Vec2ConformerSelfAttention(nn.Module):
"""Construct an Wav2Vec2ConformerSelfAttention object.
Can be enhanced with rotary or relative position embeddings.
"""
def __init__(self, config):
super().__init__()
self.head_size = config.hidden_size // config.num_attention_heads
self.num_heads = config.num_attention_heads
self.position_embeddings_type = config.position_embeddings_type
self.linear_q = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_k = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_v = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_out = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(p=config.attention_dropout)
if self.position_embeddings_type == "relative":
# linear transformation for positional encoding
self.linear_pos = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_size))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_size))
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
relative_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# self-attention mechanism
batch_size, sequence_length, hidden_size = hidden_states.size()
# make sure query/key states can be != value states
query_key_states = hidden_states
value_states = hidden_states
if self.position_embeddings_type == "rotary":
if relative_position_embeddings is None:
raise ValueError(
"`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'"
)
query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings)
# project query_key_states and value_states
query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
# => (batch, head, time1, d_k)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
if self.position_embeddings_type == "relative":
if relative_position_embeddings is None:
raise ValueError(
"`relative_position_embeddings` has to be defined when `self.position_embeddings_type =="
" 'relative'"
)
# apply relative_position_embeddings to qk scores
# as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860
scores = self._apply_relative_embeddings(
query=query, key=key, relative_position_embeddings=relative_position_embeddings
)
else:
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
# apply attention_mask if necessary
if attention_mask is not None:
scores = scores + attention_mask
# => (batch, head, time1, time2)
probs = torch.softmax(scores, dim=-1)
probs = self.dropout(probs)
# => (batch, head, time1, d_k)
hidden_states = torch.matmul(probs, value)
# => (batch, time1, hidden_size)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
hidden_states = self.linear_out(hidden_states)
return hidden_states, probs
def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
batch_size, sequence_length, hidden_size = hidden_states.size()
hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size)
cos = relative_position_embeddings[0, :sequence_length, ...]
sin = relative_position_embeddings[1, :sequence_length, ...]
# rotate hidden_states with rotary embeddings
hidden_states = hidden_states.transpose(0, 1)
rotated_states_begin = hidden_states[..., : self.head_size // 2]
rotated_states_end = hidden_states[..., self.head_size // 2 :]
rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1)
hidden_states = (hidden_states * cos) + (rotated_states * sin)
hidden_states = hidden_states.transpose(0, 1)
hidden_states = | |
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from ..metrics import calcFindableMinObs
from ..metrics import calcFindableNightlyLinkages
from .create_test_data import createTestDataSet
MIN_OBS = range(5, 10)
def test_calcFindableMinObs():
### Test calcFindableMinObs against the test data set
column_mapping = {
"truth" : "truth",
"obs_id" : "obs_id",
}
for min_obs in MIN_OBS:
# Generate test data set
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
min_obs,
5,
20)
findable_observations = calcFindableMinObs(observations_test, min_obs=min_obs, column_mapping=column_mapping)
for truth in findable_observations[column_mapping["truth"]].unique():
# Make sure all observations are correctly identified as findable
obs_ids = findable_observations[findable_observations[column_mapping["truth"]].isin([truth])]["obs_ids"].values[0]
np.testing.assert_array_equal(obs_ids, observations_test[observations_test["truth"] == truth]["obs_id"].values)
# Make sure all objects with not findable are not included in the findable_observations dataframe
not_findable_truths_test = all_truths_test[all_truths_test["findable"] == 0]["truth"].values
assert len(findable_observations[findable_observations[column_mapping["truth"]].isin(not_findable_truths_test)]) == 0
return
def test_calcFindableNightlyLinkages():
### Test calcFindableNightlyLinkages against the test data set
column_mapping = {
"truth" : "truth",
"obs_id" : "obs_id",
"time" : "time",
"night" : "night",
}
# Generate test data set
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
5,
5,
20)
# For every single truth in blue, their observations are seperated by a half day
for truth in observations_test[observations_test["class"] == "blue"]["truth"].unique():
mask = (observations_test["truth"] == truth)
observations_test.loc[mask, "time"] = np.arange(0, len(observations_test[mask])/2, 0.5)
# For every single truth in red, their observations are seperated by a quarter day
for truth in observations_test[observations_test["class"] == "red"]["truth"].unique():
mask = (observations_test["truth"] == truth)
observations_test.loc[mask, "time"] = np.arange(0, len(observations_test[mask])/4, 0.25)
# Observation times for greens are selected at random from the available ones in blues and greens
observations_test.loc[observations_test["class"] == "green", "time"] = np.random.choice(
observations_test[~observations_test["time"].isna()]["time"].values,
len(observations_test[observations_test["class"] == "green"]),
replace=True)
# Lets add a night column which is simply the floor of the observation time
observations_test["night"] = np.floor(observations_test["time"]).astype(int)
# With a maximum separation of 0.25 only reds should be findable
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=2,
max_obs_separation=0.25,
min_linkage_nights=1,
column_mapping=column_mapping
)
for truth in findable_observations[column_mapping["truth"]].unique():
# Make sure all observations are correctly identified as findable
obs_ids = findable_observations[findable_observations[column_mapping["truth"]].isin([truth])]["obs_ids"].values[0]
np.testing.assert_array_equal(obs_ids, observations_test[observations_test["truth"] == truth]["obs_id"].values)
# Make sure that only reds were found
classes_found = observations_test[observations_test["truth"].isin(findable_observations[column_mapping["truth"]].values)]["class"].unique()
np.testing.assert_array_equal(classes_found, np.array(["red"]))
# With a maximum separation of 0.5 reds and blues should be findable
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=2,
max_obs_separation=0.5,
min_linkage_nights=1,
column_mapping=column_mapping
)
for truth in findable_observations[column_mapping["truth"]].unique():
# Make sure all observations are correctly identified as findable
obs_ids = findable_observations[findable_observations[column_mapping["truth"]].isin([truth])]["obs_ids"].values[0]
np.testing.assert_array_equal(obs_ids, observations_test[observations_test["truth"] == truth]["obs_id"].values)
# Make sure that only reds and blues were found
classes_found = observations_test[observations_test["truth"].isin(findable_observations[column_mapping["truth"]].values)]["class"].unique()
np.testing.assert_array_equal(classes_found, np.array(["red", "blue"]))
# With a minimum linkage length of 1, everything should be findable
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=1,
max_obs_separation=0.5,
min_linkage_nights=1,
column_mapping=column_mapping
)
for truth in findable_observations[column_mapping["truth"]].unique():
# Make sure all observations are correctly identified as findable
obs_ids = findable_observations[findable_observations[column_mapping["truth"]].isin([truth])]["obs_ids"].values[0]
np.testing.assert_array_equal(obs_ids, observations_test[observations_test["truth"] == truth]["obs_id"].values)
# Make sure that all reds, blues, and greens were found
classes_found = observations_test[observations_test["truth"].isin(findable_observations[column_mapping["truth"]].values)]["class"].unique()
np.testing.assert_array_equal(classes_found, np.array(["red", "blue", "green"]))
# With a minimum linkage length of 100, nothing should be findable
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=100,
max_obs_separation=0.5,
min_linkage_nights=1,
column_mapping=column_mapping
)
assert len(findable_observations) == 0
### These next few tests focus on red05 which has the following observations:
# obs_id truth class time night
# obs00000 red05 red 0.00 0
# obs00008 red05 red 0.25 0
# obs00013 red05 red 0.50 0
# obs00024 red05 red 0.75 0
# obs00049 red05 red 1.00 1
# obs00051 red05 red 1.25 1
# obs00057 red05 red 1.50 1
# obs00070 red05 red 1.75 1
# obs00085 red05 red 2.00 2
# obs00096 red05 red 2.25 2
# Lets set min_linkage nights to 3 with a maximum separation of 0.25, only red05 should be findable
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=2,
max_obs_separation=0.25,
min_linkage_nights=3,
column_mapping=column_mapping
)
for truth in findable_observations[column_mapping["truth"]].unique():
# Make sure all observations are correctly identified as findable
obs_ids = findable_observations[findable_observations[column_mapping["truth"]].isin([truth])]["obs_ids"].values[0]
np.testing.assert_array_equal(obs_ids, observations_test[observations_test["truth"] == truth]["obs_id"].values)
# Make sure that only red05 should be findable
classes_found = observations_test[observations_test["truth"].isin(findable_observations[column_mapping["truth"]].values)]["class"].unique()
np.testing.assert_array_equal(classes_found, np.array(["red"]))
np.testing.assert_array_equal(findable_observations["truth"].values, np.array(["red05"]))
# Keep min_linkage nights to 3 with a maximum separation of 0.25, set the last of red05's observations to be outside the time separation
# resulting in only two viable tracklet nights, it should no longer be findable
observations_test.loc[observations_test["obs_id"] == "obs00096", "time"] = 2.26
# obs_id truth class time night findable
# obs00000 red05 red 0.00 0 Y
# obs00008 red05 red 0.25 0 Y
# obs00013 red05 red 0.50 0 Y
# obs00024 red05 red 0.75 0 Y
# obs00049 red05 red 1.00 1 Y
# obs00051 red05 red 1.25 1 Y
# obs00057 red05 red 1.50 1 Y
# obs00070 red05 red 1.75 1 Y
# obs00085 red05 red 2.00 2 N
# obs00096 red05 red 2.26 2 N
# red05 findable : N
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=2,
max_obs_separation=0.25,
min_linkage_nights=3,
column_mapping=column_mapping
)
# Red05 should no longer be findable
classes_found = observations_test[observations_test["truth"].isin(findable_observations[column_mapping["truth"]].values)]["class"].unique()
np.testing.assert_array_equal(classes_found, np.array([]))
# Set the observation back to its original time
observations_test.loc[observations_test["obs_id"] == "obs00096", "time"] = 2.25
# Keep min_linkage nights to 3 with a maximum separation of 0.25, set the two of the observations on night 1 to not be
# findable, red05 should still be findable with the remaining observations but those unfindable observations should not
# be returned as findable observations
observations_test.loc[observations_test["obs_id"] == "obs00057", "time"] = 1.51
observations_test.loc[observations_test["obs_id"] == "obs00070", "time"] = 1.77
# This observation needs to be shifted so that it is more than 0.25 from the previous exposure time
# so we dont count a linkage across nights
observations_test.loc[observations_test["obs_id"] == "obs00085", "time"] = 2.10
# obs_id truth class time night findable
# obs00000 red05 red 0.00 0 Y
# obs00008 red05 red 0.25 0 Y
# obs00013 red05 red 0.50 0 Y
# obs00024 red05 red 0.75 0 Y
# obs00049 red05 red 1.00 1 Y
# obs00051 red05 red 1.25 1 Y
# obs00057 red05 red 1.51 1 N
# obs00070 red05 red 1.76 1 N
# obs00085 red05 red 2.10 2 Y
# obs00096 red05 red 2.25 2 Y
# red05 findable : Y
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=2,
max_obs_separation=0.25,
min_linkage_nights=3,
column_mapping=column_mapping
)
for truth in findable_observations[column_mapping["truth"]].unique():
# Make sure all observations are correctly identified as findable
obs_ids = findable_observations[findable_observations[column_mapping["truth"]].isin([truth])]["obs_ids"].values[0]
np.testing.assert_array_equal(obs_ids, observations_test[(observations_test["truth"] == truth) & (~observations_test["obs_id"].isin(["obs00057", "obs00070"]))]["obs_id"].values)
# Make sure that only red05 should be findable
classes_found = observations_test[observations_test["truth"].isin(findable_observations[column_mapping["truth"]].values)]["class"].unique()
np.testing.assert_array_equal(classes_found, np.array(["red"]))
np.testing.assert_array_equal(findable_observations["truth"].values, np.array(["red05"]))
# Set the observations back to their previous values
observations_test.loc[observations_test["obs_id"] == "obs00057", "time"] = 1.50
observations_test.loc[observations_test["obs_id"] == "obs00070", "time"] = 1.75
observations_test.loc[observations_test["obs_id"] == "obs00085", "time"] = 2.00
# Keep min_linkage nights to 3 with a maximum separation of 0.25, remove some of red05's observations
# so that there are only two observations on each night -- it should still be the only object findable
observations_test = observations_test[~observations_test["obs_id"].isin(["obs00000", "obs00008", "obs00057", "obs00070"])]
# obs_id truth class time night findable
# obs00013 red05 red 0.50 0 Y
# obs00024 red05 red 0.75 0 Y
# obs00049 red05 red 1.00 1 Y
# obs00070 red05 red 1.75 1 Y
# obs00085 red05 red 2.00 2 Y
# obs00096 red05 red 2.25 2 Y
# red05 findable : Y
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=2,
max_obs_separation=0.25,
min_linkage_nights=3,
column_mapping=column_mapping
)
for truth in findable_observations[column_mapping["truth"]].unique():
# Make sure all observations are correctly identified as findable
obs_ids = findable_observations[findable_observations[column_mapping["truth"]].isin([truth])]["obs_ids"].values[0]
np.testing.assert_array_equal(obs_ids, observations_test[observations_test["truth"] == truth]["obs_id"].values)
# Make sure that only red05 should be findable
classes_found = observations_test[observations_test["truth"].isin(findable_observations[column_mapping["truth"]].values)]["class"].unique()
np.testing.assert_array_equal(classes_found, np.array(["red"]))
np.testing.assert_array_equal(findable_observations["truth"].values, np.array(["red05"]))
# Keep min_linkage nights to 3 with a maximum separation of 0.25, set one of red05's observations to be outside the time
# separation for a linkage -- it now should not be findable
observations_test.loc[observations_test["obs_id"] == "obs00096", "time"] = 2.26
# obs_id truth class time night findable
# obs00013 red05 red 0.50 0 Y
# obs00024 red05 red 0.75 0 Y
# obs00049 red05 red 1.00 1 Y
# obs00070 red05 red 1.75 1 Y
# obs00085 red05 red 2.00 2 N
# obs00096 red05 red 2.26 2 N
# red05 findable : N
findable_observations = calcFindableNightlyLinkages(
observations_test,
linkage_min_obs=2,
max_obs_separation=0.25,
min_linkage_nights=3,
column_mapping=column_mapping
)
# Red05 | |
<gh_stars>0
#!/usr/bin/python3
"""
Library for Casambi Cloud api.
Request api_key at: https://developer.casambi.com/
"""
import uuid
import json
import logging
import datetime
import socket
from pprint import pformat
from typing import Tuple
from colorsys import rgb_to_hsv
import requests
import websocket
_LOGGER = logging.getLogger(__name__)
class CasambiApiException(Exception):
"""Custom exception"""
class ConfigException(Exception):
"""Custom exception"""
class Casambi:
"""
Casambi api object
"""
def __init__(self, *, api_key, email, user_password, network_password, wire_id=1):
self.sock = None
self.web_sock = None
self.connected = False
self.network_id = None
self._session_id = None
self.wire_id = wire_id
self.api_key = api_key
self.email = email
self.user_password = <PASSWORD>
self.network_password = <PASSWORD>_password
def create_user_session(self):
"""
Function for creating a user session in Casambis cloud api
"""
url = "https://door.casambi.com/v1/users/session/"
headers = {"Content-type": "application/json", "X-Casambi-Key": self.api_key}
payload = {"email": self.email, "password": self.user_password}
response = requests.post(url, json=payload, headers=headers)
if response.status_code != 200:
reason = "create_user_session: headers: {},".format(headers)
reason += " payload: {},".format(payload)
reason += 'message: "Got a invalid status_code",'
reason += "status_code: {},".format(response.status_code)
reason += "#response: {}".format(response.text)
raise CasambiApiException(reason)
data = response.json()
self._session_id = data["sessionId"]
self.network_id = data["networks"][list(data["networks"].keys())[0]]["id"]
_LOGGER.debug(f"data from create_user_session: {pformat(data)}")
return data["sessionId"]
def create_network_session(self):
"""
Function for creating a network session in Casambis cloud api
"""
url = "https://door.casambi.com/v1/networks/session/"
headers = {
"X-Casambi-Key": self.api_key,
"Content-type": "application/json",
}
payload = {"email": self.email, "password": <PASSWORD>}
response = requests.post(url, json=payload, headers=headers)
if response.status_code != 200:
reason = "create_network_session: failed with"
reason += f"status_code: {response.status_code},"
reason += f"response: {response.text}"
raise CasambiApiException(reason)
data = response.json()
self.network_id = list(data.keys())[0]
self._session_id = data[self.network_id]["sessionId"]
return data.keys()
def get_network_information(self):
"""
Function for getting the network information from Casambis cloud api
"""
# GET https://door.casambi.com/v1/networks/{id}
url = f"https://door.casambi.com/v1/networks/{self.network_id}"
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = "get_network_information: url: {}".format(url)
reason += "failed with status_code: {},".format(response.status_code)
reason += "response: {}".format(response.text)
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_network_information: headers: {headers}"
dbg_msg += "response: {data}"
_LOGGER.debug(dbg_msg)
return data
def get_unit_state(self, *, unit_id):
"""
Getter for getting the unit state from Casambis cloud api
"""
# GET https://door.casambi.com/v1/networks/{id}
url = "https://door.casambi.com/v1/networks/"
url += f"{self.network_id}/units/{unit_id}/state"
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = "get_unit_state: url: {}".format(url)
reason += "failed with status_code: {},".format(response.status_code)
reason += "response: {}".format(response.text)
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_unit_state: headers: {headers} response: {data}"
_LOGGER.debug(dbg_msg)
return data
def ws_open(self) -> bool:
"""
openWireSucceed API key authentication failed. Either given key
was invalid or WebSocket functionality is not enabled for it.
keyAuthenticateFailed API key authentication failed. Given key was
invalid.
keyAuthorizeFailed API key authorize failed. Given key has not been
authorized or WebSocket functionality is not enabled for it.
invalidSession Either access to given network is not authorized
by session or given session is invalid.
invalidValueType Received values are not in correct value type,
for example when expecting a number but receiving string value instead.
invalidData Received data is invalid and cannot be
processed, for example expected list of items is in wrong data format.
"""
url = "wss://door.casambi.com/v1/bridge/"
reference = "{}".format(uuid.uuid1())
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
if not self.network_id:
raise CasambiApiException("Network id needs to be set!")
message = {
"method": "open",
"id": self.network_id,
"session": self._session_id,
"ref": reference,
"wire": self.wire_id, # wire id
"type": 1, # Client type, use value 1 (FRONTEND)
}
self.web_sock = websocket.create_connection(url, subprotocols=[self.api_key])
self.web_sock.send(json.dumps(message))
result = self.web_sock.recv()
data = json.loads(result)
_LOGGER.debug(f"ws_open response: {data}")
# Can get what ever like:
# {'wire': 1, 'method': 'peerChanged', 'online': True}
#
# if data['wireStatus'] != 'openWireSucceed':
# reason = "ws_open_message: url: {},".format(url)
# reason += "message: {},".format(message)
# reason += 'reason: "failed with to open wire!"'
# reason += "response: {}".format(data)
# raise CasambiApiException(reason)
if "wireStatus" in data and data["wireStatus"] == "openWireSucceed":
return True
if (
(("method" in data) and (data["method"] == "peerChanged"))
and (("wire" in data) and (data["wire"] == self.wire_id))
and (("online" in data) and data["online"])
):
return True
return False
def turn_unit_off(self, *, unit_id: int):
"""
Function for turning a unit of using the websocket
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
target_controls = {"Dimmer": {"value": 0}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def turn_unit_on(self, *, unit_id):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
reason = "expected unit_id to be an integer,"
reason += "got: {}".format(unit_id)
raise CasambiApiException(reason)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
target_controls = {"Dimmer": {"value": 1}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_vertical(self, *, unit_id: int, value: float):
"""
Support for setting vertical (dual led value)
"""
target_value = value
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
# Unit_id needs to be an integer
if isinstance(value, float):
target_value = float(value)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
if target_value < 0.0:
raise CasambiApiException("Value needs to be between 0 and 1")
if target_value > 1.0:
raise CasambiApiException("Value needs to be between 0 and 1")
target_controls = {"Vertical": {"value": target_value}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_target_controls(self, *, unit_id, target_controls):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_value(self, *, unit_id: int, value):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
if not (value >= 0 and value <= 1):
raise CasambiApiException("value needs to be between 0 and 1")
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
target_controls = {"Dimmer": {"value": value}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_rgbw_color(
self, *, unit_id: int, color_value: Tuple[int, int, int, int]
):
"""
Setter for RGB color
"""
target_controls = None
(red, green, blue, white) = color_value
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
white_value = white / 255.0
# 'name': 'white', 'type': 'White', 'value': 0.0
target_controls = {
"RGB": {"rgb": f"rgb({red}, {green}, {blue})"},
"Colorsource": {"source": "RGB"},
"White": {"value": white_value},
}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_rgb_color(
self, *, unit_id: int, color_value: Tuple[int, int, int], send_rgb_format=False
):
"""
Setter for RGB color
"""
target_controls = None
(red, green, blue) = color_value
(hue, sat, value) = rgb_to_hsv(red, green, blue)
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
if not send_rgb_format:
target_controls = {
"RGB": {"hue": round(hue, 1), "sat": round(sat, 1)},
"Colorsource": {"source": "RGB"},
}
else:
target_controls = {
"RGB": {"rgb": | |
<reponame>gkowzan/dfcs_vipa<filename>dfcs_vipa/calibration.py
"""Calibrate InGaAs camera nonlinearity.
Uses a series of measurements of a Gaussian beam/flat field imaged on the
camera with different integration times to establish the real dependence
between the energy incident upon the camera and the number of counts
returned by the acquisition software.
The camera is linear only in a very small range at very low collected energies.
The calibration function is used in following way:
calibrated = [cam*cal_func(cam) for cam in cameras]
where cameras is a list of camera images for different integration times.
"""
import logging
from itertools import combinations, product
from collections import namedtuple
import codecs
from lxml import etree
import numpy as np
from scipy.optimize import curve_fit, least_squares
from scipy.interpolate import (interp1d, splrep, splev)
from dfcs_vipa.collect import average_h5
from dfcs_vipa.experiment import smooth, find_index
import dfcs_vipa
log = logging.getLogger(__name__)
default_nodes = np.hstack([
np.linspace(0, 150, 15),
np.linspace(150, 4100, 20)])
def linear(x, a):
return a*x
def rational(x, a):
return 1/(1 + a*x)
###############################################################################
# piecewise linear function for RatioPiecewise class calibration #
###############################################################################
def piecewise_linear(x, nodes, slopes, fill_value=1.0):
"""Evaluate piecewise linear function.
The function is defined for values between min(nodes) and
max(nodes). The intercept of the first segment is fixed to one and
the intercepts of consecutive segments are fixed to max values of
the previous segments.
The intervals of each linear segment are half-closed [nodes[i],
nodes[i+1]). The segments are evaluated by calculating
slopes[i]*(x-nodes[i])+intercepts[i]
for corresponding segments. That is, the 'x' values are shifted
before applying the slope.
Parameters
----------
x : float or array_like
The points for which the function should be evaluated.
nodes : ndarray
Positions of the nodes separating segments.
slopes : ndarray
Linear slopes of segments.
fill_value : float
Value for points beyond [min(nodes), max(nodes)].
Returns
-------
float or array_like
Values of piecewise linear function at `x`.
"""
if len(slopes) != len(nodes) - 1:
raise ValueError("Length of 'slopes' should be one less than length of"
" 'nodes'")
# calculate the intercepts
intercepts = np.ones(len(slopes))
for i in range(1, len(slopes)):
intercepts[i] = slopes[i-1]*(nodes[i]-nodes[i-1]) + intercepts[i-1]
# evaluate the function
result = np.full(len(x), fill_value)
# idx = np.empty(len(x), dtype=np.bool)
for i in range(len(slopes)):
# print(x)
# print(nodes[i])
# idx[:] = np.logical_and(x >= nodes[i], x < nodes[i+1])
idx = np.where(np.logical_and(x >= nodes[i], x < nodes[i+1]))
result[idx] = slopes[i]*(x[idx]-nodes[i]) + intercepts[i]
return result
###############################################################################
# helper functions for reading measurement data #
###############################################################################
def read_time(fmt_string_xcf, num):
log.debug('reading integration time {:d}'.format(num))
path = fmt_string_xcf.format(num)
with codecs.open(str(path), 'r', 'utf-8') as f:
xml_string = '\n'.join(f.readlines()[1:])
tree = etree.fromstring(xml_string)
return float(tree[0].get('value'))
def read_cam_avg(fmt_string, fmt_string_dc, num):
log.debug('reading camera average {:d}'.format(num))
path = fmt_string.format(num)
path_dc = fmt_string_dc.format(num)
return average_h5(path, path_dc)
###############################################################################
# Mix-in class for reading measurement data #
###############################################################################
class ReadDataMixin:
def __init__(self, fmt_string, fmt_string_dc, fmt_list):
self.fmt_string = fmt_string + '.hdf5'
self.fmt_string_dc = fmt_string_dc + '.hdf5'
self.fmt_string_xcf = fmt_string_dc + '.xcf'
self.fmt_list = fmt_list
self.times = None
self.cameras = None
def read_time(self, num):
# log.debug('reading integration time {:d}'.format(num))
return read_time(self.fmt_string_xcf, num)
def read_times(self):
log.info('reading integration times')
self.times = np.array(
[self.read_time(i) for i in self.fmt_list]
)
def read_cam_avg(self, num):
# log.debug('reading camerage average {:d}'.format(num))
return read_cam_avg(self.fmt_string, self.fmt_string_dc, num)
def read_cameras(self):
log.info('reading camera averages')
self.cameras = [self.read_cam_avg(i) for i in self.fmt_list]
###############################################################################
# class for minimizing sigma of Gaussian beam ratios #
###############################################################################
FitResult = namedtuple('FitResult', 'x cov_x infodict mesg ier')
class RatioCalibration(ReadDataMixin):
def __init__(self, fmt_string, fmt_string_dc, fmt_list,
initial_coeffs=None):
super(RatioCalibration, self).__init__(fmt_string, fmt_string_dc,
fmt_list)
self.pairs = list(combinations(fmt_list, 2))
self.coeffs = None
if initial_coeffs is None:
self.skip_initial = False
self.initial_coeffs = np.full(len(self.nodes)-1, 1e-5)
else:
self.skip_initial = True
self.initial_coeffs = initial_coeffs
self._window = (slice(None, dfcs_vipa.ROWS), slice(None, dfcs_vipa.COLS))
self.total = dfcs_vipa.ROWS*dfcs_vipa.COLS
self.res = None
def corr_func(self, coeffs, x):
raise NotImplementedError("Implement this method in a subclass.")
@property
def window(self):
return self._window
@window.setter
def window(self, val):
self._window = val
rows = val[0].stop-val[0].start
cols = val[1].stop-val[1].start
self.total = rows*cols
def initial_fit(self, pos, fit_range=slice(None, None)):
log.info('performing the initial ratio fit')
powers = np.array(
[cam[pos] for cam in self.cameras]
)
popt, pcov = curve_fit(linear, self.times[fit_range],
powers[fit_range])
res = linear(self.times, *popt)/powers
def res_func(coeffs):
return self.corr_func(coeffs, powers)-res
coeffs = least_squares(
res_func,
self.initial_coeffs,
bounds=(0.0, 2*0.00075))
# coeffs = leastsq(res_func, self.initial_coeffs, full_output=True)[0]
self.initial_coeffs = coeffs.x
def residuals_old(self, coeffs):
# apply the correction
rows = self.window[0]
cols = self.window[1]
fixed = [cam[rows, cols]*self.corr_func(coeffs, cam[rows, cols])
for cam in self.cameras]
# calculate the residuals
len_pairs = len(self.pairs)
if self.res is None:
self.res = np.empty(self.total*len_pairs)
for i, (ip1, ip2) in zip(range(len_pairs), self.pairs):
ilow, ihigh = i*self.total, (i+1)*self.total
self.res[ilow:ihigh] = (fixed[ip1]/fixed[ip2]).reshape(-1)
self.res[ilow:ihigh] = self.res[ilow:ihigh]-self.res[ilow:ihigh].mean()
return self.res
def narrow_cameras(self):
rows = self.window[0]
cols = self.window[1]
self.cameras_narrowed = np.empty((len(self.fmt_list), self.total))
for i in range(len(self.cameras)):
self.cameras_narrowed[i] = self.cameras[i][rows, cols].reshape(-1)
def correct(self, coeffs):
# apply the correction
cams = self.cameras_narrowed
return cams*self.corr_func(coeffs, cams)
def residuals(self, coeffs):
# apply the correction
fixed = self.correct(coeffs)
# calculate the residuals
len_pairs = len(self.pairs)
# len_pairs = len(self.fmt_list)-1
if self.res is None:
self.res = np.empty(self.total*len_pairs)
# print((fixed[-1]/fixed[0]).reshape(-1))
for i, (ip1, ip2) in zip(range(len_pairs), self.pairs):
# for i in range(len_pairs):
ilow, ihigh = i*self.total, (i+1)*self.total
# self.res[ilow:ihigh] = (fixed[-1]/fixed[i]).reshape(-1)
self.res[ilow:ihigh] = (fixed[ip1]/fixed[ip2]).reshape(-1)
self.res[ilow:ihigh] = self.res[ilow:ihigh]/self.res[ilow:ihigh].mean()-1
# print(res)
return np.copy(self.res)
def fit(self, **kwargs):
log.info('performing the main ratio fit')
self.fit_results = least_squares(
self.residuals,
self.initial_coeffs,
bounds=(0.0, 2*0.00075),
verbose=2,
max_nfev=10,
# epsfcn=1e-2,
**kwargs)
def cal_func(cam):
return self.corr_func(self.fit_results.x, cam)
return cal_func
def calibrate(self, pos, init_range, **kwargs):
self.read_times()
self.read_cameras()
self.narrow_cameras()
if not self.skip_initial:
self.initial_fit(pos, init_range)
self.cal_func = self.fit(**kwargs)
class RatioCalibrationPiecewise(RatioCalibration):
def __init__(self, *args, **kwargs):
self.nodes = np.hstack([
np.linspace(-10, 150, 40),
np.arange(150, 360, 15),
np.linspace(360, 3000, 20),
np.linspace(3000, 4100, 25)
])
super(RatioCalibrationPiecewise, self).__init__(*args, **kwargs)
def corr_func(self, coeffs, x):
c = x.view()
c.shape = (np.prod(x.shape))
ret = piecewise_linear(c, self.nodes, coeffs)
return ret.reshape(x.shape)
#############################################################################
# functions for rescaling different measurements and fitting nonlinearity #
# directly #
#############################################################################
class RescaleTimesMinimum:
def __init__(self, ref_times, ref, other_times, other):
self.ref_times = ref_times
self.ref = ref
self.other_times = other_times
self.other = other
self.ref_func = interp1d(ref_times, ref, bounds_error=True)
self.t_max = ref_times.max()
self.s_max = ref_times.max()/other_times.min() # max. scale factor
self.s_min = ref_times.min()/other_times.max()
self.fit_results = None
def residuals(self, scale, res_points):
"""Calculate residuals for least squares fitting."""
t_scaled = self.other_times*scale
t_min = max([min(t_scaled), min(self.ref_times)])
t_max = min([max(t_scaled), max(self.ref_times)])
t_res = np.linspace(t_min, t_max, res_points)
other_func = interp1d(t_scaled, self.other, bounds_error=True)
return self.ref_func(t_res)-other_func(t_res)
def rescale_time(self, res_points=1000, guess=20):
"""Match 'other' curve to 'ref' by rescaling int. times.
Args:
- res_points: number of residuals.
Return:
- rescaled 'times'"""
self.fit_results = least_squares(
self.residuals, guess,
bounds=(self.s_min, self.s_max),
args=(res_points, ),
verbose=0,
jac='3-point',
ftol=1e-15,
xtol=1e-15,
gtol=1e-15,
loss='cauchy'
)
if self.fit_results.cost > 1000:
raise RuntimeError('Fit did not converge, cost = %{:.2f}'.format(
self.fit_results.cost
))
# plt.figure()
# plt.plot(self.ref_times, self.ref)
# plt.plot(self.other_times*self.fit_results.x[0], self.other)
# t_min = max([min(self.other_times*self.fit_results.x[0]),
# min(self.ref_times)])
# t_max = min([max(self.other_times*self.fit_results.x[0]),
# max(self.ref_times)])
# plt.axvline(x=t_min)
# plt.axvline(x=t_max)
# t_res = np.linspace(t_min, t_max, res_points)
# plt.plot(t_res, self.ref_func(t_res), 'ro')
return self.other_times*self.fit_results.x[0]
class RescaleTimes:
def __init__(self, ref_times, ref, other_times, other):
self.ref_times = ref_times
self.ref = ref
self.other_times
self.other = other
self.ref_func = interp1d(ref_times, ref, bounds_error=True)
self.t_min = np.min(ref_times)
self.s_max = np.max(ref_times)/self.t_min # max. scale factor
self.fit_results = None
def residuals(self, scale, res_points):
"""Calculate residuals for least squares fitting."""
t_scaled = self.other_times/scale
t_max = np.max(t_scaled)
t_res = np.linspace(self.t_min, t_max, res_points)
other_func = interp1d(t_scaled, self.other, bounds_error=True)
return self.ref_func(t_res)-other_func(t_res)
def rescale_time(self, res_points=1000, guess=20):
"""Match 'other' curve to 'ref' by rescaling int. times.
Args:
- res_points: number of residuals.
Return:
- rescaled 'times'"""
self.fit_results = least_squares(
self.residuals, guess,
bounds=(1.0, self.s_max),
args=(res_points, ),
verbose=0,
jac='3-point',
loss='soft_l1',
ftol=1e-12,
xtol=1e-12,
gtol=1e-12
)
# plt.figure()
# plt.plot(self.times, self.ref)
# plt.plot(self.times/self.fit_results.x[0], self.other)
return self.other_times/self.fit_results.x[0]
###############################################################################
# class for calibrating nonlinearity directly from time-power dependence #
###############################################################################
class DirectCalibration(ReadDataMixin):
"""Provides rescaling function to linearize raw data.
Uses the dependence of digital counts on integration time at constant
illumination to calculate the deviation from linearity. Rescales the time
dependence from different pixels to obtain more data points.
"""
def __init__(self, fmt_string, fmt_string_dc, fmt_list,
nodes=default_nodes):
super(DirectCalibration, self).__init__(fmt_string, fmt_string_dc,
fmt_list)
self.gathered = None
self.nodes = nodes
self.initial_coeffs = np.full(len(self.nodes)-1, 1e-5)
self.cal_func = None
def gather(self, pos_mins):
# powers_max = [cam[pos_max] for cam in self.cameras]
powers_min = [[cam[pos_min] for cam in self.cameras]
for pos_min in pos_mins]
powers_sample = np.array([cameras[-1] for cameras in powers_min])
powers_max = powers_min.pop(np.argmin(powers_sample))
times = self.times
powers = powers_max
while powers_min:
power_min = powers_min.pop()
try:
times_min = RescaleTimesMinimum(
times,
powers,
self.times,
power_min
).rescale_time()
except RuntimeError:
log.info('skipping pixel')
continue
times = np.hstack([times_min, times])
powers = np.hstack([power_min, | |
<reponame>bioimageit/bioimageit_core
# -*- coding: utf-8 -*-
"""BioImagePy local metadata service.
This module implements the local service for metadata
(Data, DataSet and Experiment) management.
This local service read/write and query metadata from a database
made od JSON file in the file system
Classes
-------
MetadataServiceProvider
"""
import os
import os.path
from pathlib import Path
import json
import re
from shutil import copyfile
from bioimageit_core.metadata.exceptions import MetadataServiceError
from bioimageit_core.metadata.containers import (METADATA_TYPE_RAW,
METADATA_TYPE_PROCESSED,
RawDataContainer,
ProcessedDataContainer,
ProcessedDataInputContainer,
DataSetContainer,
ExperimentContainer,
RunContainer,
RunInputContainer,
RunParameterContainer,
)
def md_file_path(md_uri: str) -> str:
"""get metadata file directory path
Returns
----------
str
The name of the metadata file directory path
"""
abspath = os.path.abspath(md_uri)
return os.path.dirname(abspath)
def relative_path(file: str, reference_file: str):
"""convert file absolute path to a relative path wrt reference_file
Parameters
----------
reference_file
Reference file
file
File to get absolute path
Returns
-------
relative path of uri wrt md_uri
"""
separator = os.sep
file = file.replace(separator + separator, separator)
reference_file = reference_file.replace(separator + separator, separator)
for i in range(len(file)):
common_part = reference_file[0:i]
if common_part not in file:
break
last_separator = common_part.rfind(separator)
short_reference_file = reference_file[last_separator + 1:]
number_of_sub_folder = short_reference_file.count(separator)
short_file = file[last_separator + 1:]
for i in range(number_of_sub_folder):
short_file = '..' + separator + short_file
return short_file
def absolute_path(file: str, reference_file: str):
"""convert file relative to reference_file into an absolute path
Parameters
----------
reference_file
Reference file
file
File to get absolute path
Returns
-------
relative path of uri wrt md_uri
"""
if os.path.isfile(file):
return os.path.abspath(file)
separator = os.sep
last_separator = reference_file.rfind(separator)
canonical_path = reference_file[0: last_separator + 1]
return simplify_path(canonical_path + file)
def simplify_path(path: str) -> str:
"""Simplify a path by removing ../"""
if path.find('..') < 0:
return path
separator = os.sep
keep_folders = path.split(separator)
found = True
while found:
pos = -1
folders = keep_folders
for i in range(len(folders)):
if folders[i] == '..':
pos = i
break
if pos > -1:
keep_folders = []
for i in range(0, pos - 1):
keep_folders.append(folders[i])
for i in range(pos + 1, len(folders)):
keep_folders.append(folders[i])
else:
found = False
clean_path = ''
for i in range(len(keep_folders)):
clean_path += keep_folders[i]
if i < len(keep_folders) - 1:
clean_path += separator
return clean_path
def normalize_path_sep(path: str) -> str:
"""Normalize the separators of a path
Parameters
----------
path: str
Path to normalize
Returns
-------
path normalized
"""
p1 = path.replace('/', os.sep).replace('\\\\', os.sep)
return p1
def to_unix_path(path: str) -> str:
"""Transform a path to unix path
Parameters
----------
path: str
Path to unixify
Returns
-------
Path with unix separator
"""
return path.replace('\\\\', '/').replace('\\', '/')
class LocalMetadataServiceBuilder:
"""Service builder for the metadata service"""
def __init__(self):
self._instance = None
def __call__(self, **_ignored):
if not self._instance:
self._instance = LocalMetadataService()
return self._instance
class LocalMetadataService:
"""Service for local metadata management"""
def __init__(self):
self.service_name = 'LocalMetadataService'
def _read_json(self, md_uri: str):
"""Read the metadata from the a json file"""
if os.path.getsize(md_uri) > 0:
with open(md_uri) as json_file:
return json.load(json_file)
def _write_json(self, metadata: dict, md_uri: str):
"""Write the metadata to the a json file"""
with open(md_uri, 'w') as outfile:
json.dump(metadata, outfile, indent=4)
def read_rawdata(self, md_uri: str) -> RawDataContainer:
"""Read a raw data metadata from the database
Parameters
----------
md_uri
URI of the data
Returns
-------
a RawDataContainer that stores the raw data metadata
"""
md_uri = os.path.abspath(md_uri)
if os.path.isfile(md_uri) and md_uri.endswith('.md.json'):
metadata = self._read_json(md_uri)
container = RawDataContainer()
container.type = metadata['origin']['type']
container.name = metadata['common']['name']
container.author = metadata['common']['author']
container.date = metadata['common']['date']
container.format = metadata['common']['format']
# copy the url if absolute, append md_uri path otherwise
container.uri = absolute_path(normalize_path_sep(metadata['common']['url']), md_uri)
if 'tags' in metadata:
for key in metadata['tags']:
container.tags[key] = metadata['tags'][key]
return container
raise MetadataServiceError('Metadata file format not supported')
def write_rawdata(self, container: RawDataContainer, md_uri: str):
"""Write a raw data metadata to the database
Parameters
----------
container
object that contains the raw data metadata to write
md_uri
URI of the data
"""
md_uri = os.path.abspath(md_uri)
metadata = dict()
metadata['origin'] = dict()
metadata['origin']['type'] = METADATA_TYPE_RAW()
metadata['common'] = dict()
metadata['common']['name'] = container.name
metadata['common']['author'] = container.author
metadata['common']['date'] = container.date
metadata['common']['format'] = container.format
metadata['common']['url'] = to_unix_path(relative_path(container.uri, md_uri))
metadata['tags'] = dict()
for key in container.tags:
metadata['tags'][key] = container.tags[key]
self._write_json(metadata, md_uri)
def read_processeddata(self, md_uri: str) -> ProcessedDataContainer:
"""Read a processed data metadata from the database
Parameters
----------
md_uri
URI of the data
Returns
-------
ProcessedDataContainer: object that contains the read processed
data metadata
"""
md_uri = os.path.abspath(md_uri)
if os.path.isfile(md_uri) and md_uri.endswith('.md.json'):
metadata = self._read_json(md_uri)
container = ProcessedDataContainer()
container.name = metadata['common']['name']
container.author = metadata['common']['author']
container.date = metadata['common']['date']
container.format = metadata['common']['format']
container.uri = absolute_path(normalize_path_sep(metadata['common']['url']), md_uri)
# origin run
container.run_uri = absolute_path(normalize_path_sep(metadata['origin']['runurl']),
md_uri)
# origin input
for input_ in metadata['origin']['inputs']:
container.inputs.append(
ProcessedDataInputContainer(
input_['name'],
absolute_path(normalize_path_sep(input_['url']), md_uri),
input_['type'],
)
)
# origin
if 'name' in metadata['origin']['output']:
container.output['name'] = metadata['origin']['output']["name"]
if 'label' in metadata['origin']['output']:
container.output['label'] = \
metadata['origin']['output']['label']
return container
raise MetadataServiceError('Metadata file format not supported')
def write_processeddata(self, container: ProcessedDataContainer,
md_uri: str):
"""Write a processed data metadata to the database
Parameters
----------
container
object that contains the processed data metadata to write
md_uri
URI of the data
"""
md_uri = os.path.abspath(md_uri)
metadata = dict()
# common
metadata['common'] = dict()
metadata['common']['name'] = container.name
metadata['common']['author'] = container.author
metadata['common']['date'] = container.date
metadata['common']['format'] = container.format
metadata['common']['url'] = to_unix_path(relative_path(container.uri, md_uri))
# origin type
metadata['origin'] = dict()
metadata['origin']['type'] = METADATA_TYPE_PROCESSED()
# run url
metadata['origin']['runurl'] = to_unix_path(relative_path(container.run_uri, md_uri))
# origin inputs
metadata['origin']['inputs'] = list()
for input_ in container.inputs:
metadata['origin']['inputs'].append(
{
'name': input_.name,
'url': to_unix_path(relative_path(input_.uri, md_uri)),
'type': input_.type,
}
)
# origin ouput
metadata['origin']['output'] = {
'name': container.output['name'],
'label': container.output['label'],
}
self._write_json(metadata, md_uri)
def read_rawdataset(self, md_uri: str) -> DataSetContainer:
"""Read a raw dataset metadata from the database
Parameters
----------
md_uri
URI of the dataset
Returns
-------
DataSetContainer: object that contains the read dataset metadata
"""
md_uri = os.path.abspath(md_uri)
if os.path.isfile(md_uri) and md_uri.endswith('.md.json'):
metadata = self._read_json(md_uri)
container = DataSetContainer()
container.name = metadata['name']
for uri in metadata['urls']:
container.uris.append(absolute_path(normalize_path_sep(uri), md_uri))
return container
return DataSetContainer()
def write_rawdataset(self, container: DataSetContainer, md_uri: str):
"""Write a raw dataset metadata to the database
Parameters
----------
container
object that contains the raw dataset metadata to write
md_uri
URI of the dataset
"""
md_uri = os.path.abspath(md_uri)
metadata = dict()
metadata['name'] = container.name
metadata['urls'] = list()
for uri in container.uris:
metadata['urls'].append(to_unix_path(relative_path(uri, md_uri)))
self._write_json(metadata, md_uri)
def read_processeddataset(self, md_uri: str) -> DataSetContainer:
"""Read a processed dataset metadata from the database
Parameters
----------
md_uri
URI of the dataset
Returns
-------
DataSetContainer: object that contains the read dataset metadata
"""
md_uri = os.path.abspath(md_uri)
if os.path.isfile(md_uri) and md_uri.endswith('.md.json'):
metadata = self._read_json(md_uri)
container = DataSetContainer()
container.name = metadata['name']
for uri in metadata['urls']:
container.uris.append(absolute_path(normalize_path_sep(uri), md_uri))
return container
return DataSetContainer()
def write_processeddataset(self, container: DataSetContainer, md_uri: str):
"""Write a processed dataset metadata to the database
Parameters
----------
container
object that contains the processed dataset metadata to write
md_uri
URI of the dataset
"""
md_uri = os.path.abspath(md_uri)
metadata = dict()
metadata['name'] = container.name
metadata['urls'] = list()
for uri in container.uris:
metadata['urls'].append(to_unix_path(relative_path(uri, md_uri)))
self._write_json(metadata, md_uri)
def add_run_processeddataset(self, run: RunContainer, dataset_md_uri: str):
"""Add a run to a processed dataset
Parameters
----------
run
Container of the Run metadata
dataset_md_uri
URI of the ProcessedDataset
"""
# create run URI
dataset_md_uri = os.path.abspath(dataset_md_uri)
dataset_dir = md_file_path(dataset_md_uri)
run_md_file_name = "run.md.json"
runid_count = 0
while os.path.isfile(os.path.join(dataset_dir, run_md_file_name)):
runid_count += 1
run_md_file_name = "run_" + str(runid_count) + ".md.json"
run_uri = os.path.join(dataset_dir, run_md_file_name)
# write run
self.write_run(run, run_uri)
return run_uri
def create_processed_dataset(self, name: str, experiment_md_uri: str):
"""create a new processed dataset
Parameters
----------
name
Name of the processed dataset
experiment_md_uri
URI of the experiment that contains the dataset
"""
# create the dataset metadata
experiment_md_uri = os.path.abspath(experiment_md_uri)
experiment_dir = md_file_path(experiment_md_uri)
dataset_dir = os.path.join(experiment_dir, name)
if not os.path.isdir(dataset_dir):
os.mkdir(dataset_dir)
processeddataset_uri = os.path.join(
experiment_dir, name, 'processeddataset.md.json'
)
container = DataSetContainer()
container.name = name
self.write_processeddataset(container, processeddataset_uri)
print("experiment at:", experiment_md_uri)
print("create the processed dataset at:", processeddataset_uri)
# add the dataset to the experiment
experiment_container = self.read_experiment(experiment_md_uri)
experiment_container.processeddatasets.append(to_unix_path(processeddataset_uri))
self.write_experiment(experiment_container, experiment_md_uri)
return container, processeddataset_uri
def create_data_processeddataset(self, data: ProcessedDataContainer,
md_uri: str):
"""create a new data metadata in the dataset
The input data object must contain only the metadata (ie no
uri and no md_uri).
This method generate the uri and the md_uri and save all the
metadata
Parameters
----------
data
metadata of the processed data to create
md_uri
URI of the | |
<reponame>chillaxor/blogbin<filename>public/yum-3.2.28/yumcommands.py
#!/usr/bin/python -t
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2006 Duke University
# Written by <NAME>
"""
Classes for subcommands of the yum command line interface.
"""
import os
import cli
from yum import logginglevels
from yum import _
from yum import misc
import yum.Errors
import operator
import locale
import fnmatch
import time
from yum.i18n import utf8_width, utf8_width_fill, to_unicode
import yum.config
def checkRootUID(base):
"""
Verify that the program is being run by the root user.
@param base: a YumBase object.
"""
if base.conf.uid != 0:
base.logger.critical(_('You need to be root to perform this command.'))
raise cli.CliError
def checkGPGKey(base):
if not base.gpgKeyCheck():
for repo in base.repos.listEnabled():
if (repo.gpgcheck or repo.repo_gpgcheck) and repo.gpgkey == '':
msg = _("""
You have enabled checking of packages via GPG keys. This is a good thing.
However, you do not have any GPG public keys installed. You need to download
the keys for packages you wish to install and install them.
You can do that by running the command:
rpm --import public.gpg.key
Alternatively you can specify the url to the key you would like to use
for a repository in the 'gpgkey' option in a repository section and yum
will install it for you.
For more information contact your distribution or package provider.
""")
base.logger.critical(msg)
raise cli.CliError
def checkPackageArg(base, basecmd, extcmds):
if len(extcmds) == 0:
base.logger.critical(
_('Error: Need to pass a list of pkgs to %s') % basecmd)
base.usage()
raise cli.CliError
def checkItemArg(base, basecmd, extcmds):
if len(extcmds) == 0:
base.logger.critical(_('Error: Need an item to match'))
base.usage()
raise cli.CliError
def checkGroupArg(base, basecmd, extcmds):
if len(extcmds) == 0:
base.logger.critical(_('Error: Need a group or list of groups'))
base.usage()
raise cli.CliError
def checkCleanArg(base, basecmd, extcmds):
VALID_ARGS = ('headers', 'packages', 'metadata', 'dbcache', 'plugins',
'expire-cache', 'rpmdb', 'all')
if len(extcmds) == 0:
base.logger.critical(_('Error: clean requires an option: %s') % (
", ".join(VALID_ARGS)))
for cmd in extcmds:
if cmd not in VALID_ARGS:
base.logger.critical(_('Error: invalid clean argument: %r') % cmd)
base.usage()
raise cli.CliError
def checkShellArg(base, basecmd, extcmds):
"""
Verify that the arguments given to 'yum shell' are valid.
yum shell can be given either no args, or exactly one argument,
which is the name of a file. If these are not met,
raise cli.CliError.
"""
if len(extcmds) == 0:
base.verbose_logger.debug(_("No argument to shell"))
elif len(extcmds) == 1:
base.verbose_logger.debug(_("Filename passed to shell: %s"),
extcmds[0])
if not os.path.isfile(extcmds[0]):
base.logger.critical(
_("File %s given as argument to shell does not exist."),
extcmds[0])
base.usage()
raise cli.CliError
else:
base.logger.critical(
_("Error: more than one file given as argument to shell."))
base.usage()
raise cli.CliError
class YumCommand:
def __init__(self):
self.done_command_once = False
def doneCommand(self, base, msg, *args):
if not self.done_command_once:
base.verbose_logger.log(logginglevels.INFO_2, msg, *args)
self.done_command_once = True
def getNames(self):
return []
def getUsage(self):
"""
@return: A usage string for the command, including arguments.
"""
raise NotImplementedError
def getSummary(self):
"""
@return: A one line summary of what the command does.
"""
raise NotImplementedError
def doCheck(self, base, basecmd, extcmds):
pass
def doCommand(self, base, basecmd, extcmds):
"""
@return: (exit_code, [ errors ]) where exit_code is:
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
return 0, [_('Nothing to do')]
def needTs(self, base, basecmd, extcmds):
return True
class InstallCommand(YumCommand):
def getNames(self):
return ['install']
def getUsage(self):
return _("PACKAGE...")
def getSummary(self):
return _("Install a package or packages on your system")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkGPGKey(base)
checkPackageArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _("Setting up Install Process"))
try:
return base.installPkgs(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class UpdateCommand(YumCommand):
def getNames(self):
return ['update']
def getUsage(self):
return _("[PACKAGE...]")
def getSummary(self):
return _("Update a package or packages on your system")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkGPGKey(base)
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _("Setting up Update Process"))
try:
return base.updatePkgs(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class DistroSyncCommand(YumCommand):
def getNames(self):
return ['distribution-synchronization', 'distro-sync']
def getUsage(self):
return _("[PACKAGE...]")
def getSummary(self):
return _("Synchronize installed packages to the latest available versions")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkGPGKey(base)
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _("Setting up Distribution Synchronization Process"))
try:
base.conf.obsoletes = 1
return base.distroSyncPkgs(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
def _add_pkg_simple_list_lens(data, pkg, indent=''):
""" Get the length of each pkg's column. Add that to data.
This "knows" about simpleList and printVer. """
na = len(pkg.name) + 1 + len(pkg.arch) + len(indent)
ver = len(pkg.version) + 1 + len(pkg.release)
rid = len(pkg.ui_from_repo)
if pkg.epoch != '0':
ver += len(pkg.epoch) + 1
for (d, v) in (('na', na), ('ver', ver), ('rid', rid)):
data[d].setdefault(v, 0)
data[d][v] += 1
def _list_cmd_calc_columns(base, ypl):
""" Work out the dynamic size of the columns to pass to fmtColumns. """
data = {'na' : {}, 'ver' : {}, 'rid' : {}}
for lst in (ypl.installed, ypl.available, ypl.extras,
ypl.updates, ypl.recent):
for pkg in lst:
_add_pkg_simple_list_lens(data, pkg)
if len(ypl.obsoletes) > 0:
for (npkg, opkg) in ypl.obsoletesTuples:
_add_pkg_simple_list_lens(data, npkg)
_add_pkg_simple_list_lens(data, opkg, indent=" " * 4)
data = [data['na'], data['ver'], data['rid']]
columns = base.calcColumns(data, remainder_column=1)
return (-columns[0], -columns[1], -columns[2])
class InfoCommand(YumCommand):
def getNames(self):
return ['info']
def getUsage(self):
return "[PACKAGE|all|installed|updates|extras|obsoletes|recent]"
def getSummary(self):
return _("Display details about a package or group of packages")
def doCommand(self, base, basecmd, extcmds):
try:
highlight = base.term.MODE['bold']
ypl = base.returnPkgLists(extcmds, installed_available=highlight)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
else:
update_pkgs = {}
inst_pkgs = {}
local_pkgs = {}
columns = None
if basecmd == 'list':
# Dynamically size the columns
columns = _list_cmd_calc_columns(base, ypl)
if highlight and ypl.installed:
# If we have installed and available lists, then do the
# highlighting for the installed packages so you can see what's
# available to update, an extra, or newer than what we have.
for pkg in (ypl.hidden_available +
ypl.reinstall_available +
ypl.old_available):
key = (pkg.name, pkg.arch)
if key not in update_pkgs or pkg.verGT(update_pkgs[key]):
update_pkgs[key] = pkg
if highlight and ypl.available:
# If we have installed and available lists, then do the
# highlighting for the available packages so you can see what's
# available to install vs. update vs. old.
for pkg in ypl.hidden_installed:
key = (pkg.name, pkg.arch)
if key not in inst_pkgs or pkg.verGT(inst_pkgs[key]):
inst_pkgs[key] = pkg
if highlight and ypl.updates:
# Do the local/remote split we get in "yum updates"
for po in sorted(ypl.updates):
if po.repo.id != 'installed' and po.verifyLocalPkg():
local_pkgs[(po.name, po.arch)] = po
# Output the packages:
clio = base.conf.color_list_installed_older
clin = base.conf.color_list_installed_newer
clir = base.conf.color_list_installed_reinstall
clie = base.conf.color_list_installed_extra
rip = base.listPkgs(ypl.installed, _('Installed Packages'), basecmd,
highlight_na=update_pkgs, columns=columns,
highlight_modes={'>' : clio, '<' : clin,
'=' : clir, 'not in' : clie})
clau = base.conf.color_list_available_upgrade
clad = base.conf.color_list_available_downgrade
clar = base.conf.color_list_available_reinstall
clai = base.conf.color_list_available_install
rap = base.listPkgs(ypl.available, _('Available Packages'), basecmd,
highlight_na=inst_pkgs, columns=columns,
highlight_modes={'<' : clau, '>' : clad,
'=' : clar, 'not in' : clai})
rep = base.listPkgs(ypl.extras, _('Extra Packages'), basecmd,
columns=columns)
cul = base.conf.color_update_local
cur = base.conf.color_update_remote
rup = base.listPkgs(ypl.updates, _('Updated Packages'), basecmd,
highlight_na=local_pkgs, columns=columns,
highlight_modes={'=' : cul, 'not in' : cur})
# XXX put this into the ListCommand at some point
if len(ypl.obsoletes) > 0 and basecmd == 'list':
# if we've looked up obsolete lists and it's a list request
rop = [0, '']
print _('Obsoleting Packages')
# The tuple is (newPkg, oldPkg) ... so sort by new
for obtup in sorted(ypl.obsoletesTuples,
key=operator.itemgetter(0)):
base.updatesObsoletesList(obtup, 'obsoletes',
columns=columns)
else:
rop = base.listPkgs(ypl.obsoletes, _('Obsoleting Packages'),
basecmd, columns=columns)
rrap = base.listPkgs(ypl.recent, _('Recently Added Packages'),
basecmd, columns=columns)
# extcmds is pop(0)'d if they pass a | |
<filename>tests/test_cdec.py
from datetime import timezone, timedelta, datetime
from unittest.mock import MagicMock, patch
import re
import geopandas as gpd
import numpy as np
import pandas as pd
import pytest
from pandas import Timestamp
from metloom.pointdata import CDECPointData, PointDataCollection
from metloom.variables import CdecStationVariables
from tests.test_point_data import BasePointDataTest, side_effect_error
class TestCDECStation(BasePointDataTest):
@staticmethod
def cdec_daily_precip_response():
return [
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 2,
"sensorType": "SNOW WC",
"date": "2021-5-16 00:00",
"obsDate": "2021-5-16 00:00",
"value": -0.11,
"dataFlag": " ",
"units": "INCHES",
},
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 2,
"sensorType": "SNOW WC",
"date": "2021-5-17 00:00",
"obsDate": "2021-5-17 00:00",
"value": -0.10,
"dataFlag": " ",
"units": "INCHES",
},
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 2,
"sensorType": "SNOW WC",
"date": "2021-5-18 00:00",
"obsDate": "2021-5-18 00:00",
"value": -0.10,
"dataFlag": " ",
"units": "INCHES",
},
]
@staticmethod
def cdec_daily_temp_response():
return [
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 30,
"sensorType": "SNOW WC",
"date": "2021-5-15 00:00",
"obsDate": "2021-5-15 00:00",
"value": 2.1,
"dataFlag": " ",
"units": "DEG F",
},
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 30,
"sensorType": "SNOW WC",
"date": "2021-5-17 00:00",
"obsDate": "2021-5-17 00:00",
"value": 2.4,
"dataFlag": " ",
"units": "DEG F",
},
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 30,
"sensorType": "SNOW WC",
"date": "2021-5-18 00:00",
"obsDate": "2021-5-18 00:00",
"value": 2.2,
"dataFlag": " ",
"units": "DEG F",
},
]
@staticmethod
def cdec_hourly_temp_response():
return [
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 30,
"sensorType": "SNOW WC",
"date": "2021-5-15 00:00",
"obsDate": "2021-5-15 00:00",
"value": 2.1,
"dataFlag": " ",
"units": "DEG F",
},
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 30,
"sensorType": "SNOW WC",
"date": "2021-5-15 01:00",
"obsDate": "2021-5-15 01:00",
"value": 2.4,
"dataFlag": " ",
"units": "DEG F",
},
{
"stationId": "TNY",
"durCode": "D",
"SENSOR_NUM": 30,
"sensorType": "SNOW WC",
"date": "2021-5-15 03:00",
"obsDate": "2021-5-15 03:00",
"value": 2.2,
"dataFlag": " ",
"units": "DEG F",
},
]
@pytest.fixture(scope="function")
def tny_station(self):
return CDECPointData("TNY", "Tenaya Lake")
@pytest.fixture(scope="class")
def tny_daily_expected(self):
points = gpd.points_from_xy([-119.0], [42.0], z=[1000.0])
df = gpd.GeoDataFrame.from_dict(
[
{
"datetime": pd.Timestamp("2021-05-15 08:00:00+0000",
tz="UTC"),
"ACCUMULATED PRECIPITATION": np.nan,
"ACCUMULATED PRECIPITATION_units": np.nan,
"AVG AIR TEMP": 2.1,
"AVG AIR TEMP_units": "DEG F",
"site": "TNY",
"datasource": "CDEC"
},
{
"datetime": pd.Timestamp("2021-05-16 08:00:00+0000", tz="UTC"),
"ACCUMULATED PRECIPITATION": -0.11,
"ACCUMULATED PRECIPITATION_units": "INCHES",
"AVG AIR TEMP": np.nan,
"AVG AIR TEMP_units": np.nan,
"site": "TNY",
"datasource": "CDEC"
},
{
"datetime": pd.Timestamp("2021-05-17 08:00:00+0000", tz="UTC"),
"ACCUMULATED PRECIPITATION": -0.10,
"ACCUMULATED PRECIPITATION_units": "INCHES",
"AVG AIR TEMP": 2.4,
"AVG AIR TEMP_units": "DEG F",
"site": "TNY",
"datasource": "CDEC"
},
{
"datetime": pd.Timestamp("2021-05-18 08:00:00+0000", tz="UTC"),
"ACCUMULATED PRECIPITATION": -0.10,
"ACCUMULATED PRECIPITATION_units": "INCHES",
"AVG AIR TEMP": 2.2,
"AVG AIR TEMP_units": "DEG F",
"site": "TNY",
"datasource": "CDEC"
},
],
geometry=[points[0]] * 4,
)
# needed to reorder the columns for the pd testing compare
df = df.filter(
[
"datetime",
"geometry",
"site",
"measurementDate",
"ACCUMULATED PRECIPITATION",
"ACCUMULATED PRECIPITATION_units",
"AVG AIR TEMP",
"AVG AIR TEMP_units",
"datasource"
]
)
df.set_index(keys=["datetime", "site"], inplace=True)
return df
@staticmethod
def tny_meta_return():
return {
"STATION": [
{
"SENS_LONG_NAME": "SNOW, WATER CONTENT",
"ELEVATION": 1000.0,
"LATITUDE": 42.0,
"LONGITUDE": -119.0,
}
]
}
@classmethod
def tny_side_effect(cls, url, **kwargs):
mock = MagicMock()
params = kwargs["params"]
if params.get("dur_code") == "D" and params.get('SensorNums') == "2":
mock.json.return_value = cls.cdec_daily_precip_response()
elif params.get("dur_code") == "D" and params.get('SensorNums') == "30":
mock.json.return_value = cls.cdec_daily_temp_response()
elif params.get("dur_code") == "H":
raise NotImplementedError()
elif "getStationInfo" in url:
mock.json.return_value = cls.tny_meta_return()
else:
raise ValueError("unknown scenario")
return mock
@classmethod
def tny_hourly_side_effect(cls, url, **kwargs):
mock = MagicMock()
params = kwargs["params"]
if params.get("dur_code") == 'H':
mock.json.return_value = cls.cdec_hourly_temp_response()
elif "getStationInfo" in url:
mock.json.return_value = cls.tny_meta_return()
else:
mock.json.return_value = []
return mock
@classmethod
def station_search_side_effect(cls, *args, **kargs):
url = args[0]
sensor_num = re.findall(r'.*&sensor=(\d+)&', url)[0]
if sensor_num == "3":
return cls.station_search_response()
elif sensor_num == "18":
return [
pd.DataFrame.from_records(
[
(
"AAA",
"A Fake Station",
"TUOLUMNE R",
"TUOLUMNE",
-119.0,
37.0,
9900,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
(
"DAN",
"<NAME>",
"TUOLUMNE R",
"TUOLUMNE",
-119.257,
37.897,
9800,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
(
"TNY",
"<NAME>",
"<NAME>",
"MARIPOSA",
-119.448,
37.838,
8150,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
(
"BBB",
"B Fake Station",
"TUOLUMNE R",
"TUOLUMNE",
-119.5,
37.5,
9905,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
],
columns=[
"ID",
"Station Name",
"River Basin",
"County",
"Longitude",
"Latitude",
"ElevationFeet",
"Operator",
"Map",
],
)
]
else:
raise ValueError(f"{sensor_num} is not configured")
@staticmethod
def station_search_response():
return [
pd.DataFrame.from_records(
[
(
"GIN",
"GIN FLAT",
"<NAME>",
"MARIPOSA",
-119.773,
37.767,
7050,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
(
"DAN",
"<NAME>",
"<NAME>",
"TUOLUMNE",
-119.257,
37.897,
9800,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
(
"TNY",
"<NAME>",
"<NAME>",
"MARIPOSA",
-119.448,
37.838,
8150,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
(
"GFL",
"GIN FLAT (COURSE)",
"<NAME>",
"MARIPOSA",
-119.773,
37.765,
7000,
"Yosemite National Park",
np.nan,
),
(
"TUM",
"<NAME>",
"<NAME>",
"TUOLUMNE",
-119.350,
37.873,
8600,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
(
"SLI",
"SLIDE CANYON",
"TUOLUMNE R",
"TUOLUMNE",
-119.43,
38.092,
9200,
"CA Dept of Water Resources/DFM-Hydro-SMN",
np.nan,
),
],
columns=[
"ID",
"Station Name",
"River Basin",
"County",
"Longitude",
"Latitude",
"ElevationFeet",
"Operator",
"Map",
],
)
]
def test_class_variables(self):
assert CDECPointData("no", "no").tzinfo == timezone(timedelta(hours=-8.0))
def test_get_metadata(self, tny_station):
with patch("metloom.pointdata.cdec.requests") as mock_requests:
mock_requests.get.side_effect = self.tny_side_effect
metadata = tny_station.metadata
mock_get = mock_requests.get
assert mock_get.call_count == 1
mock_get.assert_called_with(
"http://cdec.water.ca.gov/cdecstation2/CDecServlet/getStationInfo",
params={"stationID": "TNY"},
)
expected = gpd.points_from_xy([-119.0], [42.0], z=[1000.0])[0]
assert expected == metadata
def test_get_daily_data(self, tny_station, tny_daily_expected):
with patch("metloom.pointdata.cdec.requests") as mock_requests:
mock_get = mock_requests.get
mock_get.side_effect = self.tny_side_effect
response = tny_station.get_daily_data(
datetime(2021, 5, 15),
datetime(2021, 5, 18),
[CdecStationVariables.PRECIPITATIONACCUM,
CdecStationVariables.TEMPAVG],
)
# mock_get = mock_requests.get
mock_get.assert_any_call(
"http://cdec.water.ca.gov/dynamicapp/req/JSONDataServlet",
params={
"Stations": "TNY",
"dur_code": "D",
"Start": "2021-05-15T00:00:00",
"End": "2021-05-18T00:00:00",
"SensorNums": "30",
},
)
assert mock_get.call_count == 3
pd.testing.assert_frame_equal(response, tny_daily_expected)
def test_get_daily_from_hourly_data(self, tny_station):
"""
Check that we fall back on resampled hourly data if we don't find
daily data
"""
with patch("metloom.pointdata.cdec.requests") as mock_requests:
mock_get = mock_requests.get
mock_get.side_effect = self.tny_hourly_side_effect
response = tny_station.get_daily_data(
datetime(2021, 5, 15),
datetime(2021, 5, 16),
[CdecStationVariables.TEMPAVG],
)
assert mock_get.call_count == 3
expected = gpd.GeoDataFrame.from_dict(
{
'AVG AIR TEMP': {
(Timestamp('2021-05-15 08:00:00+0000', tz='UTC'),
'TNY'): 2.233333},
'AVG AIR TEMP_units': {
(Timestamp('2021-05-15 08:00:00+0000', tz='UTC'),
'TNY'): 'DEG F'},
'datasource': {
(Timestamp('2021-05-15 08:00:00+0000', tz='UTC'),
'TNY'): 'CDEC'}
}, geometry=gpd.points_from_xy([-119.0], [42.0], z=[1000.0])
)
expected.index.set_names(["datetime", "site"], inplace=True)
pd.testing.assert_frame_equal(
response, expected, check_exact=False, check_like=True
)
def test_points_from_geometry(self, shape_obj):
expected_url = (
"https://cdec.water.ca.gov/dynamicapp/staSearch?"
"sta=&sensor_chk=on&sensor=3"
"&collect=NONE+SPECIFIED&dur="
"&active_chk=on&active=Y"
"&loc_chk=on&lon1=-119.8"
"&lon2=-119.2&lat1=37.7"
"&lat2=38.2"
"&elev1=-5&elev2=99000&nearby=&basin=NONE+SPECIFIED"
"&hydro=NONE+SPECIFIED&county=NONE+SPECIFIED"
"&agency_num=160&display=sta"
)
with patch("metloom.pointdata.cdec.pd.read_html") as mock_table_read:
mock_table_read.return_value = self.station_search_response()
result = CDECPointData.points_from_geometry(
shape_obj, [CdecStationVariables.SWE]
)
mock_table_read.assert_called_with(expected_url)
assert len(result) == 5
assert [st.id for st in result] == ["GIN", "DAN", "TNY", "TUM", "SLI"]
@staticmethod
def check_str_for_float(whole_string, key):
"""
Parse lat/lon from cdec search string for table read
"""
split_str = whole_string.split("&")
matches = [ss for ss in split_str if key in ss]
if len(matches) > 1 or len(matches) == 0:
raise ValueError(f"{key} is a bad checker")
match = matches[0]
result_val = match.split("=")[-1]
return float(result_val)
def test_points_from_geometry_buffer(self, shape_obj):
with patch("metloom.pointdata.cdec.pd.read_html") as mock_table_read:
mock_table_read.return_value = self.station_search_response()
CDECPointData.points_from_geometry(
shape_obj, [CdecStationVariables.SWE], buffer=0.1
)
"&loc_chk=on&lon1=-119.8"
"&lon2=-119.2&lat1=37.7"
"&lat2=38.2"
result_str = mock_table_read.call_args[0][0]
expected = {
'lat2': 38.3, 'lat1': 37.6,
'lon2': -119.1, 'lon1': -119.9
}
for k, v in expected.items():
result = self.check_str_for_float(result_str, k)
assert v == pytest.approx(result)
def test_points_from_geometry_multi_sensor(self, shape_obj):
with patch("metloom.pointdata.cdec.pd.read_html") as mock_table_read:
# patch the snowcourse check so we don't fetch metadata
with patch.object(
CDECPointData, 'is_only_snow_course', return_value=False
):
mock_table_read.side_effect = self.station_search_side_effect
result = CDECPointData.points_from_geometry(
shape_obj,
[CdecStationVariables.SWE, CdecStationVariables.SNOWDEPTH],
within_geometry=False
)
expected_names = [
"A Fake Station", "B Fake Station", "GIN FLAT",
"DANA MEADOWS", "TENAYA LAKE", "GIN FLAT (COURSE)",
"TUOLUMNE MEADOWS", "SLIDE CANYON"
]
expected_codes = [
"AAA", "BBB", "GIN", "DAN", "TNY", "TUM", "SLI", "GFL"
]
assert len(result) == 8
assert all([st.id in expected_codes for st in result])
assert all([st.name in expected_names for st in result])
def test_points_from_geometry_fail(self, shape_obj):
with patch("metloom.pointdata.cdec.pd") as mock_pd:
mock_pd.read_html.side_effect = side_effect_error
result = CDECPointData.points_from_geometry(
shape_obj, [CdecStationVariables.SWE]
)
assert result.points == []
def test_point_collection_to_dataframe(self, shape_obj):
with patch("metloom.pointdata.cdec.pd.read_html") as mock_table_read:
mock_table_read.return_value = self.station_search_response()
result = CDECPointData.points_from_geometry(
shape_obj, [CdecStationVariables.SWE]
)
assert isinstance(result, PointDataCollection)
points_df = result.to_dataframe()
for idp, point in enumerate(result):
point_row = points_df.iloc[idp]
assert point.name == point_row["name"]
assert point.id == point_row["id"]
assert point.metadata == point_row["geometry"]
assert point.DATASOURCE == point_row["datasource"]
def test_can_parse_dates(self, tny_station):
df = pd.DataFrame.from_records([
{"datetime": "2021-03-14 01:00:00"},
# This time does not exist in US/Pacific, but does in CDEC
{"datetime": "2021-03-14 02:00:00"},
{"datetime": "2021-03-14 03:00:00"},
])
df["datetime"] | |
kwargs.update(edge_attrs)
if labels[-1]:
# this will be a directed edge
del kwargs['dir']
kwargs.setdefault('arrowsize', '0.5')
if reverse[-1]:
edge_from, edge_to = edge_to, edge_from
graph.edge(edge_from, edge_to, **kwargs)
else:
# simple case, direct edge from node i to j
# N.B., adjust edge length so we measure distance from edge of
# circle rather than center
el = (edge_length * sep) + width_i / 2 + width_j / 2
kwargs = {'len': str(el)}
kwargs.update(edge_attrs)
edge_from, edge_to = str(i), str(j)
if variant_labels is not None:
idx_diff = np.nonzero(h_distinct[:, i] != h_distinct[:, j])[0][0]
label = variant_labels[idx_diff]
if label:
# this will be a directed edge
del kwargs['dir']
kwargs.setdefault('arrowsize', '0.5')
allele_i = h_distinct[idx_diff, i]
allele_j = h_distinct[idx_diff, j]
if allele_i > allele_j:
# reverse direction of edge
edge_from, edge_to = edge_to, edge_from
else:
label = ''
kwargs.setdefault('label', label)
graph.edge(edge_from, edge_to, **kwargs)
import sys
class DummyLogger(object):
def __call__(self, *args, **kwargs):
pass
class DebugLogger(object):
def __init__(self, name, out=None):
self.name = name
if out is None:
out = sys.stdout
elif isinstance(out, str):
out = open(out, mode='at')
self.out = out
def __call__(self, *msg):
print(self.name, *msg, file=self.out)
self.out.flush()
def _pairwise_haplotype_distance(h, metric='hamming'):
assert metric in ['hamming', 'jaccard']
dist = allel.pairwise_distance(h, metric=metric)
dist *= h.n_variants
dist = scipy.spatial.distance.squareform(dist)
# N.B., np.rint is **essential** here, otherwise can get weird rounding errors
dist = np.rint(dist).astype('i8')
return dist
def graph_haplotype_network(h,
hap_colors='grey',
distance_metric='hamming',
network_method='mjn',
comment=None,
engine='neato',
format='png',
mode='major',
overlap=True,
splines=True,
graph_attrs=None,
node_size_factor=0.005,
node_attrs=None,
show_node_labels=False,
fontname='monospace',
fontsize=None,
edge_length=0.5,
edge_weight=10,
edge_attrs=None,
show_alternate_edges=True,
alternate_edge_attrs=None,
anon_width=0.03,
anon_fillcolor='white',
anon_node_attrs=None,
intermediate_nodes=True,
max_dist=5,
variant_labels=None,
debug=False,
debug_out=None,
max_allele=3,
return_components=False,
show_singletons=True,
):
"""TODO doc me"""
if debug:
log = DebugLogger('[graph_haplotype_network]', out=debug_out)
else:
log = DummyLogger()
# check inputs
h = allel.HaplotypeArray(h)
log(h.shape)
# optimise - keep only segregating variants
ac = h.count_alleles()
loc_seg = ac.is_segregating()
h = h[loc_seg]
if variant_labels is not None:
variant_labels = np.asarray(variant_labels, dtype=object)[loc_seg]
# find distinct haplotypes
h_distinct_sets = h.distinct()
# log('h_distinct_sets', h_distinct_sets)
# find indices of distinct haplotypes - just need one per set
h_distinct_indices = [sorted(s)[0] for s in h_distinct_sets]
log('h_distinct_indices', h_distinct_indices)
# reorder by index
ix = np.argsort(h_distinct_indices)
h_distinct_indices = [h_distinct_indices[i] for i in ix]
log('h_distinct_indices (reordered)', h_distinct_indices)
h_distinct_sets = [h_distinct_sets[i] for i in ix]
# obtain an array of distinct haplotypes
h_distinct = h.take(h_distinct_indices, axis=1)
# deal with colors - count how many of each color per distinct haplotype
color_counters = None
if isinstance(hap_colors, (list, tuple, np.ndarray)):
assert len(hap_colors) == h.n_haplotypes
color_counters = [
collections.Counter([hap_colors[i] for i in s])
for s in h_distinct_sets
]
# count how many observations per distinct haplotype
hap_counts = [len(s) for s in h_distinct_sets]
# compute pairwise distance matrix
dist = _pairwise_haplotype_distance(h_distinct, distance_metric)
if network_method.lower() == 'mst':
# compute minimum spanning tree
edges = scipy.sparse.csgraph.minimum_spanning_tree(dist).toarray().astype(int)
# deal with maximum distance
if max_dist:
edges[edges > max_dist] = 0
# no alternate edges when using mst
alternate_edges = None
elif network_method.lower() == 'msn':
# compute network
edges, alternate_edges = minimum_spanning_network(dist,
max_dist=max_dist,
debug=debug,
debug_out=debug_out)
edges = np.triu(edges)
alternate_edges = np.triu(alternate_edges)
elif network_method.lower() == 'mjn':
# compute network - N.B., MJN may add new haplotypes
h_distinct, edges, alternate_edges = median_joining_network(h_distinct,
max_dist=max_dist,
debug=debug,
debug_out=debug_out,
max_allele=max_allele)
edges = np.triu(edges)
alternate_edges = np.triu(alternate_edges)
else:
raise ValueError(network_method)
# setup graph
graph = graphviz.Digraph(comment=comment, engine=engine, format=format)
if graph_attrs is None:
graph_attrs = dict()
graph_attrs.setdefault('overlap', str(overlap).lower())
graph_attrs.setdefault('splines', str(splines).lower())
graph_attrs.setdefault('mode', mode)
graph_attrs.setdefault('sep', '0')
graph.attr('graph', **graph_attrs)
# add the main nodes
if node_attrs is None:
node_attrs = dict()
node_attrs.setdefault('fixedsize', 'true')
node_attrs.setdefault('shape', 'circle')
node_attrs.setdefault('fontname', fontname)
node_attrs.setdefault('fontsize', str(fontsize))
if anon_node_attrs is None:
anon_node_attrs = dict()
anon_node_attrs.setdefault('fixedsize', 'true')
anon_node_attrs.setdefault('shape', 'circle')
anon_node_attrs.setdefault('style', 'filled')
anon_node_attrs.setdefault('fillcolor', anon_fillcolor)
anon_node_attrs.setdefault('fontname', fontname)
anon_node_attrs.setdefault('fontsize', str(fontsize))
for i in range(edges.shape[0]):
kwargs = dict()
if i < len(hap_counts):
# original haplotype
n = hap_counts[i]
connected = np.any((edges[i] > 0) | (edges[:, i] > 0))
if not show_singletons and n == 1 and not connected:
continue
# calculate width from number of items - make width proportional to area
width = np.sqrt(n * node_size_factor)
# determine style and fill color
if color_counters:
cc = color_counters[i]
if len(cc) > 1:
# more than one color, make a pie chart
style = 'wedged'
fillcolor = ':'.join(['%s;%s' % (k, v/n) for k, v in cc.items()])
else:
# just one color, fill with solid color
style = 'filled'
fillcolor = list(cc.keys())[0]
else:
style = 'filled'
fillcolor = hap_colors
kwargs.update(node_attrs)
kwargs.setdefault('style', style)
kwargs.setdefault('fillcolor', fillcolor)
kwargs.setdefault('width', str(width))
else:
# not an original haplotype, inferred during network building
n = 1
width = anon_width
fillcolor = anon_fillcolor
kwargs.update(anon_node_attrs)
kwargs.setdefault('width', str(anon_width))
# add graph node
if show_node_labels is False:
label = ''
elif show_node_labels is True:
label = str(i)
elif isinstance(show_node_labels, int) and n >= show_node_labels:
label = str(i)
elif show_node_labels == 'count' and n > 1:
label = str(n)
else:
label = ''
kwargs.setdefault('label', label)
graph.node(str(i), **kwargs)
# setup defaults
if edge_attrs is None:
edge_attrs = dict()
edge_attrs.setdefault('style', 'normal')
edge_attrs.setdefault('weight', str(edge_weight))
edge_attrs.setdefault('fontname', fontname)
edge_attrs.setdefault('fontsize', str(fontsize))
edge_attrs.setdefault('dir', 'none')
if alternate_edge_attrs is None:
alternate_edge_attrs = dict()
alternate_edge_attrs.setdefault('style', 'dashed')
alternate_edge_attrs.setdefault('weight', str(edge_weight))
alternate_edge_attrs.setdefault('fontname', fontname)
alternate_edge_attrs.setdefault('fontsize', str(fontsize))
alternate_edge_attrs.setdefault('dir', 'none')
# add main edges
_graph_edges(graph,
edges,
hap_counts,
node_size_factor,
edge_length,
anon_width,
intermediate_nodes,
edge_attrs,
anon_node_attrs,
h_distinct,
variant_labels)
# add alternate edges
if show_alternate_edges and alternate_edges is not None:
_graph_edges(graph,
alternate_edges,
hap_counts,
node_size_factor,
edge_length,
anon_width,
intermediate_nodes,
alternate_edge_attrs,
anon_node_attrs,
h_distinct,
variant_labels)
if return_components:
from scipy.sparse.csgraph import connected_components
n_components, component_labels = connected_components(edges)
return graph, h_distinct_sets, component_labels
else:
return graph, hap_counts
def minimum_spanning_network(dist, max_dist=None, debug=False, debug_out=None):
"""TODO"""
if debug:
log = DebugLogger('[minimum_spanning_network]', out=debug_out)
else:
log = DummyLogger()
# TODO review implementation, see if this can be tidied up
# keep only the upper triangle of the distance matrix, to avoid adding the same
# edge twice
dist = np.triu(dist)
# setup the output array of links between nodes
edges = np.zeros_like(dist)
# setup an array of alternate links
alternate_edges = np.zeros_like(dist)
# intermediate variable - assignment of haplotypes to clusters (a.k.a. sub-networks)
# initially each distinct haplotype is in its own cluster
cluster = np.arange(dist.shape[0])
# start with haplotypes separated by a single mutation
step = 1
log('[%s]' % step, 'begin')
# iterate until all haplotypes in a single cluster, or max_dist reached
while len(set(cluster)) > 1 and (max_dist is None or step <= max_dist):
log('[%s]' % step, 'processing, cluster:', cluster)
# keep track of which clusters have been merged at this height
merged = set()
# remember what cluster assignments were at the previous height
prv_cluster = cluster.copy()
# iterate over all pairs where distance equals current step size
for i, j in zip(*np.nonzero(dist == step)):
log('[%s]' % step, 'found potential edge', i, j)
# current cluster assignment for each haplotype
a = cluster[i]
b = cluster[j]
# previous cluster assignment for each haplotype
pa = prv_cluster[i]
pb = prv_cluster[j]
log('[%s]' % step, a, b, pa, pb, merged)
# check to see if both nodes already in the same cluster
if a != b:
# nodes are in different clusters, so we can merge (i.e., connect) the
# clusters
log('[%s]' % step, 'assign an edge')
edges[i, j] = dist[i, j]
edges[j, i] = dist[i, j]
# merge clusters
c = cluster.max() + 1
loc_a = cluster == a
loc_b = cluster == b
cluster[loc_a] = c
cluster[loc_b] = c
merged.add(tuple(sorted([pa, pb])))
log('[%s]' % step, 'merged', cluster, merged)
elif tuple(sorted([pa, pb])) in merged or step == 1:
# the two clusters have already been merged at this level, this is an
# alternate connection
# N.B., special case step = 1 because no previous cluster assignments
# (TODO really?)
log('[%s]' % step, 'assign an alternate edge')
alternate_edges[i, j] = dist[i, j]
alternate_edges[j, i] = dist[i, j]
else:
log('[%s]' % step, 'WTF?')
# increment step
step += 1
log('# edges:', np.count_nonzero(np.triu(edges)))
log('# alt edges:', np.count_nonzero(np.triu(alternate_edges)))
return edges, alternate_edges
def _remove_obsolete(h, orig_n_haplotypes, max_dist, log):
n_removed = None
edges = alt_edges = None
while n_removed is None or n_removed > 0:
# step 1 - compute distance
dist = _pairwise_haplotype_distance(h, metric='hamming')
# step 2 - construct the minimum spanning network
edges, alt_edges = minimum_spanning_network(dist, max_dist=max_dist)
all_edges = edges | |
<filename>boto3_type_annotations_with_docs/boto3_type_annotations/lambda_/paginator.py
from typing import Dict
from botocore.paginate import Paginator
class ListAliases(Paginator):
def paginate(self, FunctionName: str, FunctionVersion: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Lambda.Client.list_aliases`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListAliases>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
FunctionName='string',
FunctionVersion='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Aliases': [
{
'AliasArn': 'string',
'Name': 'string',
'FunctionVersion': 'string',
'Description': 'string',
'RoutingConfig': {
'AdditionalVersionWeights': {
'string': 123.0
}
},
'RevisionId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Aliases** *(list) --*
A list of aliases.
- *(dict) --*
Provides configuration information about a Lambda function `alias <https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html>`__ .
- **AliasArn** *(string) --*
The Amazon Resource Name (ARN) of the alias.
- **Name** *(string) --*
The name of the alias.
- **FunctionVersion** *(string) --*
The function version that the alias invokes.
- **Description** *(string) --*
A description of the alias.
- **RoutingConfig** *(dict) --*
The `routing configuration <https://docs.aws.amazon.com/lambda/latest/dg/lambda-traffic-shifting-using-aliases.html>`__ of the alias.
- **AdditionalVersionWeights** *(dict) --*
The name of the second alias, and the percentage of traffic that's routed to it.
- *(string) --*
- *(float) --*
- **RevisionId** *(string) --*
A unique identifier that changes when you update the alias.
- **NextToken** *(string) --*
A token to resume pagination.
:type FunctionName: string
:param FunctionName: **[REQUIRED]**
The name of the Lambda function.
**Name formats**
* **Function name** - ``MyFunction`` .
* **Function ARN** - ``arn:aws:lambda:us-west-2:123456789012:function:MyFunction`` .
* **Partial ARN** - ``123456789012:function:MyFunction`` .
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
:type FunctionVersion: string
:param FunctionVersion:
Specify a function version to only list aliases that invoke that version.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListEventSourceMappings(Paginator):
def paginate(self, EventSourceArn: str = None, FunctionName: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Lambda.Client.list_event_source_mappings`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListEventSourceMappings>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
EventSourceArn='string',
FunctionName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'EventSourceMappings': [
{
'UUID': 'string',
'BatchSize': 123,
'EventSourceArn': 'string',
'FunctionArn': 'string',
'LastModified': datetime(2015, 1, 1),
'LastProcessingResult': 'string',
'State': 'string',
'StateTransitionReason': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **EventSourceMappings** *(list) --*
A list of event source mappings.
- *(dict) --*
A mapping between an AWS resource and an AWS Lambda function. See CreateEventSourceMapping for details.
- **UUID** *(string) --*
The identifier of the event source mapping.
- **BatchSize** *(integer) --*
The maximum number of items to retrieve in a single batch.
- **EventSourceArn** *(string) --*
The Amazon Resource Name (ARN) of the event source.
- **FunctionArn** *(string) --*
The ARN of the Lambda function.
- **LastModified** *(datetime) --*
The date that the event source mapping was last updated.
- **LastProcessingResult** *(string) --*
The result of the last AWS Lambda invocation of your Lambda function.
- **State** *(string) --*
The state of the event source mapping. It can be one of the following: ``Creating`` , ``Enabling`` , ``Enabled`` , ``Disabling`` , ``Disabled`` , ``Updating`` , or ``Deleting`` .
- **StateTransitionReason** *(string) --*
The cause of the last state change, either ``User initiated`` or ``Lambda initiated`` .
- **NextToken** *(string) --*
A token to resume pagination.
:type EventSourceArn: string
:param EventSourceArn:
The Amazon Resource Name (ARN) of the event source.
* **Amazon Kinesis** - The ARN of the data stream or a stream consumer.
* **Amazon DynamoDB Streams** - The ARN of the stream.
* **Amazon Simple Queue Service** - The ARN of the queue.
:type FunctionName: string
:param FunctionName:
The name of the Lambda function.
**Name formats**
* **Function name** - ``MyFunction`` .
* **Function ARN** - ``arn:aws:lambda:us-west-2:123456789012:function:MyFunction`` .
* **Version or Alias ARN** - ``arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD`` .
* **Partial ARN** - ``123456789012:function:MyFunction`` .
The length constraint applies only to the full ARN. If you specify only the function name, it\'s limited to 64 characters in length.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListFunctions(Paginator):
def paginate(self, MasterRegion: str = None, FunctionVersion: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Lambda.Client.list_functions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/ListFunctions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
MasterRegion='string',
FunctionVersion='ALL',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Functions': [
{
'FunctionName': 'string',
'FunctionArn': 'string',
'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'java8'|'python2.7'|'python3.6'|'python3.7'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'provided',
'Role': 'string',
'Handler': 'string',
'CodeSize': 123,
'Description': 'string',
'Timeout': 123,
'MemorySize': 123,
'LastModified': 'string',
'CodeSha256': 'string',
'Version': 'string',
'VpcConfig': {
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
],
'VpcId': 'string'
},
'DeadLetterConfig': {
'TargetArn': 'string'
},
'Environment': {
'Variables': {
'string': 'string'
},
'Error': {
'ErrorCode': 'string',
'Message': 'string'
}
},
'KMSKeyArn': 'string',
'TracingConfig': {
'Mode': 'Active'|'PassThrough'
},
'MasterArn': 'string',
'RevisionId': 'string',
'Layers': [
{
'Arn': 'string',
'CodeSize': 123
},
]
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
A list of Lambda functions.
- **Functions** *(list) --*
A list of Lambda functions.
- *(dict) --*
Details about a function's configuration.
- **FunctionName** *(string) --*
The name of the function.
- **FunctionArn** *(string) --*
The function's Amazon Resource Name (ARN).
- **Runtime** *(string) --*
The runtime environment for the Lambda function.
- **Role** *(string) --*
The function's execution role.
- **Handler** *(string) --*
The function that Lambda calls to begin executing your function.
- **CodeSize** *(integer) --*
The size of the function's deployment package, in bytes.
- **Description** *(string) --*
The function's description.
- **Timeout** *(integer) --*
The amount of time that Lambda allows a function to run before stopping it.
- **MemorySize** *(integer) --*
The memory that's allocated to the function.
- **LastModified** *(string) --*
The date and time that the function was last updated, in `ISO-8601 format <https://www.w3.org/TR/NOTE-datetime>`__ (YYYY-MM-DDThh:mm:ss.sTZD).
- **CodeSha256** *(string) --*
The SHA256 hash of the function's deployment package.
- **Version** *(string) --*
The version of the Lambda function.
- **VpcConfig** *(dict) --*
The function's networking configuration.
- **SubnetIds** *(list) --*
A list of VPC subnet IDs.
- *(string) --*
- **SecurityGroupIds** *(list) --*
A list of VPC security groups IDs.
- *(string) --*
- **VpcId** *(string) --*
The ID of the VPC.
- **DeadLetterConfig** *(dict) --*
The function's dead letter queue.
- **TargetArn** *(string) --*
The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
- **Environment** *(dict) --*
The function's environment variables.
- **Variables** *(dict) --*
Environment variable | |
interval(self, start, end, color, name):
# if there is an active sequence, add interval in that sequence
if self.active_sequence is not None:
self.active_sequence.add_interval(_Interval(self, start, end, color, name))
else:
# close current function if any
if self.active_function is not None:
self.active_functionsp.add_function(self.active_function)
self.active_function = None
# if there is an active function panel, close it
if self.active_functionsp is not None:
self.add_panel(self.active_functionsp)
self.active_functionsp = None
# if there is no active intervals panel create one
if self.active_intervalsp is None:
self.active_intervalsp = _IntervalPanel(self, self.next_panel_name, pauses=self.next_panel_pauses)
self.next_panel_name = None
# add interval in the intervals panel
self.active_intervalsp.add_interval(_Interval(self, start, end, color, name))
def transition(self, start, end):
# if there is no active sequence, create one
if self.active_sequence is None:
sequence(self)
# add transition in the sequence
if start < end:
self.active_sequence.add_interval(_Interval(self, start, end, -1, None))
def function(self, name=None, origin=None, horizon=None, style=None, color=None):
# close current intervals panel if any
if self.active_intervalsp is not None:
self.add_panel(self.active_intervalsp)
self.active_intervalsp = None
# close current sequence if any
if self.active_sequence is not None:
self.active_sequencesp.add_sequence(self.active_sequence)
self.active_sequence = None
# close current sequence panel if any
if self.active_sequencesp is not None:
self.add_panel(self.active_sequencesp)
self.active_sequencesp = None
# create new function panel if none exists
if self.active_functionsp is None:
self.active_functionsp = _FunctionPanel(self, name=self.next_panel_name, pauses=self.next_panel_pauses)
self.next_panel_name = None
# create new function in the function panel
if self.active_function is not None:
self.active_functionsp.add_function(self.active_function)
self.active_function = _Function(self, name, origin, horizon, style, color)
def segment(self, start, end, vstart, vend, name):
# if there is an active sequence, add segment in that sequence
if self.active_sequence is not None:
self.active_sequence.add_segment(_Segment(self, start, end, vstart, vend, name))
elif self.active_function is not None:
self.active_function.add_segment(_Segment(self, start, end, vstart, vend, name))
else:
self.function() # Create active function
self.segment(start, end, vstart, vend, name)
def pause(self, start, end):
if self.active_sequencesp is not None:
self.active_sequencesp.add_pause(start, end)
elif self.active_functionsp is not None:
self.active_functionsp.add_pause(start, end)
elif self.active_intervalsp is not None:
self.active_intervalsp.add_pause(start, end)
elif self.next_panel_pauses is not None:
self.next_panel_pauses.append((start, end))
else:
self._pauses.append((start, end))
def flush(self):
self.panel()
def add_panel(self, p):
self.panels.append(p)
def compute_time_step(self):
span = self.horizon - self.origin
step = span / self.nbSteps
lg = math.floor(math.log10(step))
nstep = step / math.pow(10, lg)
self.timeStep = math.pow(10, lg) * min(self.timeStepSync, key=lambda x: abs(x - nstep))
def show(self):
n = len(self.panels)
self.update_bounds()
for s in self.panels:
s.preshow()
self.compute_time_step()
heigths = [s.get_height() for s in self.panels]
f, axarr = plt.subplots(n, sharex=True, num=self.name, gridspec_kw=dict(height_ratios=heigths))
f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
if n == 1:
if self.panels[0].name is not None:
axarr.set_ylabel(self.panels[0].name)
self.panels[0].display(axarr)
else:
for i in range(0, n):
# box = dict(facecolor='white', pad=10, alpha=0.8)
if self.panels[i].name is not None:
axarr[i].set_ylabel(self.panels[i].name) # , bbox=box)
axarr[i].yaxis.set_label_coords(-0.05, 0.5)
self.panels[i].display(axarr[i])
plt.margins(0.05)
plt.tight_layout()
class _Visu(object):
def __init__(self):
super(_Visu, self).__init__()
self._active_figure = None
self._all_figures = []
self._naming = None
def timeline(self, title=None, origin=None, horizon=None):
if self._active_figure is not None:
self._all_figures.append(self._active_figure)
self._active_figure = _TimeLine(title, origin, horizon)
def matrix(self, title=None, matrix=None, tuples=None):
if self._active_figure is not None:
self._active_figure.flush()
self._all_figures.append(self._active_figure)
self._active_figure = _Matrix(title, matrix, tuples)
@property
def has_active_timeline(self):
return (self._active_figure is not None) and isinstance(self._active_figure, _TimeLine)
@property
def active_timeline(self):
assert self.has_active_timeline, "No active timeline"
return self._active_figure
@property
def has_active_matrix(self):
return (self._active_figure is not None) and isinstance(self._active_figure, _Matrix)
@property
def active_matrix(self):
assert self.has_active_matrix, "No active matrix"
return self._active_figure
def panel(self, name=None):
if not self.has_active_timeline:
timeline()
self.active_timeline.panel(name)
def sequence(self, name=None):
if not self.has_active_timeline:
timeline()
self.active_timeline.sequence(name)
def interval(self, start, end, color, name):
if not self.has_active_timeline:
timeline()
self.active_timeline.interval(start, end, color, name)
def transition(self, start, end):
if not self.has_active_timeline:
timeline()
self.active_timeline.transition(start, end)
def function(self, name=None, origin=None, horizon=None, style=None, color=None):
if not self.has_active_timeline:
timeline()
self.active_timeline.function(name, origin, horizon, style, color)
def segment(self, start, end, vstart, vend, name):
if not self.has_active_timeline:
timeline()
self.active_timeline.segment(start, end, vstart, vend, name)
def pause(self, start, end):
if not self.has_active_timeline:
timeline()
self.active_timeline.pause(start, end)
def show(self, pngfile=None):
""" Show the figure
Args:
pngfile (optional): Destination PNG file, None for screen
"""
if self.has_active_timeline:
panel()
for f in self._all_figures:
f.show()
if self._active_figure is not None:
self._active_figure.show()
if pngfile is None:
plt.show()
else:
plt.savefig(pngfile)
self._active_figure = None
self._all_figures = []
_visu = _Visu()
##############################################################################
# This section depends on CPO classes and defines the default display of some
# classes like CpoSolution and CpoTransitionMatrix
##############################################################################
def _canonical_interval(*args):
"""
Accepted formats for args:
[int start, int end, int|string color, string name] (canonical form)
[int start, int end, int|string color]
[int start, int end]
[CpoIntervalVarSolution cpointerval, int|string color, string name]
[CpoIntervalVarSolution cpointerval, int|string color]
[CpoIntervalVarSolution cpointerval]
"""
n = len(args)
color = 0
name = ''
assert 1 < n, "Empty argument list for interval"
if isinstance(args[0], CpoIntervalVarSolution):
start = args[0].get_start()
end = args[0].get_end()
k = 1
else:
assert args[0] == 'intervalmin' or is_int(args[0]), "Wrong type for start of interval"
assert 2 <= n, "Missing end value for interval"
assert args[1] == 'intervalmax' or is_int(args[1]), "Wrong type for end of interval"
if args[0] == 'intervalmin':
start = INTERVAL_MIN
else:
start = args[0]
if args[1] == 'intervalmax':
end = INTERVAL_MAX
else:
end = args[1]
k = 2
if k < n:
color = args[k]
assert color is None or is_int(color) or is_string(color), "Wrong type for interval color: use 'int' or 'str'"
k += 1
if k < n:
name = args[k]
assert name is None or is_string(name), "Wrong type for interval name: use 'str'"
k += 1
return start, end, color, name
def _canonical_transition(*args):
"""
Accepted formats for args:
[int start, int end]
[CpoIntervalVarSolution]
"""
n = len(args)
assert 1 < n, "Empty argument list for transition"
if isinstance(args[0], CpoIntervalVarSolution):
start = args[0].get_start()
end = args[0].get_end()
else:
assert args[0] == 'intervalmin' or is_int(args[0]), "Wrong type for start of transition"
assert 2 <= n, "Missing end value for transition"
assert args[1] == 'intervalmax' or is_int(args[1]), "Wrong type for end of transition"
if args[0] == 'intervalmin':
start = INTERVAL_MIN
else:
start = args[0]
if args[1] == 'intervalmax':
end = INTERVAL_MAX
else:
end = args[1]
return start, end
def _canonical_pause(*args):
"""
Accepted formats for args:
[int start, int end]
[CpoIntervalVarSolution]
"""
n = len(args)
assert 1 < n, "Empty argument list for pause"
if isinstance(args[0], CpoIntervalVarSolution):
start = args[0].get_start()
end = args[0].get_end()
else:
assert args[0] == 'intervalmin' or is_int(args[0]), "Wrong type for start of pause"
assert 2 <= n, "Missing end value for pause"
assert args[1] == 'intervalmax' or is_int(args[1]), "Wrong type for end of pause"
if args[0] == 'intervalmin':
start = INTERVAL_MIN
else:
start = args[0]
if args[1] == 'intervalmax':
end = INTERVAL_MAX
else:
end = args[1]
return start, end
def _canonical_segment(*args):
"""
Accepted formats for args:
[int start, int end, int vstart, int vend, string name] (canonical form)
[int start, int end, int vstart, int vend]
[int start, int end, int vstart]
[CpoIntervalVarSolution cpointerval, int vstart, int vend, string name]
[CpoIntervalVarSolution cpointerval, int vstart, int vend]
[CpoIntervalVarSolution cpointerval, int vstart]
"""
n = len(args)
name = ''
assert 1 < n, "Empty argument list for segment"
if isinstance(args[0], CpoIntervalVarSolution):
start = args[0].get_start()
end = args[0].get_end()
k = 1
else:
assert args[0] == 'intervalmin' or is_int(args[0]), "Wrong type for start of segment"
assert 2 <= n, "Missing end value for segment"
assert args[1] == 'intervalmax' or is_int(args[1]), "Wrong type for end of segment"
if args[0] == 'intervalmin':
start = INTERVAL_MIN
else:
start = args[0]
if args[1] == 'intervalmax':
end = INTERVAL_MAX
else:
end = args[1]
k = 2
assert k < n, "Missing start value (or slope) for segment"
assert is_number(args[k]), \
"Wrong type for segment start value (or slope): not a number"
vstart = args[k]
k += 1
if k < n:
if is_string(args[k]):
name = args[k]
k = n
vend = vstart
else:
assert is_number(args[k]), \
"Wrong type for segment end value (or slope): not a number"
vend = args[k]
k += 1
else:
vend = vstart
if k < n:
assert is_string(args[k]), "Wrong type for segment name: use 'str'"
name = args[k]
k += 1
return start, end, vstart, vend, name
def _define_solution(solution, name=None):
timeline(name)
vs = solution.get_all_var_solutions()
itvcolors = dict()
panel(name="Sequences")
# Create sequence variables
has_sequence = False
for v in vs:
if isinstance(v, CpoSequenceVarSolution):
| |
"""Process Sentinel-1 data into interferograms using GAMMA"""
import argparse
import glob
import logging
import os
import re
import shutil
import sys
from datetime import datetime, timezone
from pathlib import Path
from secrets import token_hex
from hyp3lib import GranuleError
from hyp3lib.SLC_copy_S1_fullSW import SLC_copy_S1_fullSW
from hyp3lib.execute import execute
from hyp3lib.getParameter import getParameter
from hyp3lib.get_orb import downloadSentinelOrbitFile
from hyp3lib.makeAsfBrowse import makeAsfBrowse
from hyp3lib.par_s1_slc_single import par_s1_slc_single
from hyp3lib.system import gamma_version
from lxml import etree, objectify
import hyp3_gamma
from hyp3_gamma.insar.getDemFileGamma import get_dem_file_gamma
from hyp3_gamma.insar.interf_pwr_s1_lt_tops_proc import interf_pwr_s1_lt_tops_proc
from hyp3_gamma.insar.unwrapping_geocoding import unwrapping_geocoding
from hyp3_gamma.metadata import create_metadata_file_set_insar
log = logging.getLogger(__name__)
def get_bursts(mydir, name):
back = os.getcwd()
os.chdir(os.path.join(mydir, "annotation"))
total_bursts = None
time = []
for myfile in os.listdir("."):
if name in myfile:
root = etree.parse(myfile)
for coord in root.iter('azimuthAnxTime'):
time.append(float(coord.text))
for count in root.iter('burstList'):
total_bursts = int(count.attrib['count'])
os.chdir(back)
return time, total_bursts
def get_burst_overlaps(reference_dir, secondary_dir):
log.info("Calculating burst overlaps; in directory {}".format(os.getcwd()))
burst_tab1 = "%s_burst_tab" % reference_dir[17:25]
burst_tab2 = "%s_burst_tab" % secondary_dir[17:25]
with open(burst_tab1, "w") as f1:
with open(burst_tab2, "w") as f2:
for name in ['001.xml', '002.xml', '003.xml']:
time1, total_bursts1 = get_bursts(reference_dir, name)
log.info("total_bursts1, time1 {} {}".format(total_bursts1, time1))
time2, total_bursts2 = get_bursts(secondary_dir, name)
log.info("total_bursts2, time2 {} {}".format(total_bursts2, time2))
cnt = 1
start1 = 0
start2 = 0
found = 0
x = time1[0]
for y in time2:
if abs(x - y) < 0.20:
log.info("Found burst match at 1 %s" % cnt)
found = 1
start1 = 1
start2 = cnt
cnt += 1
if found == 0:
y = time2[0]
cnt = 1
for x in time1:
if abs(x - y) < 0.20:
log.info("Found burst match at %s 1" % cnt)
start1 = cnt
start2 = 1
cnt += 1
size1 = total_bursts1 - start1 + 1
size2 = total_bursts2 - start2 + 1
if size1 > size2:
size = size2
else:
size = size1
f1.write("%s %s\n" % (start1, start1 + size - 1))
f2.write("%s %s\n" % (start2, start2 + size - 1))
return burst_tab1, burst_tab2
def get_copol(granule_name):
polarization = granule_name[14:16]
if polarization in ['SV', 'DV']:
return 'vv'
if polarization in ['SH', 'DH']:
return 'hh'
raise GranuleError(f'Cannot determine co-polarization of granule {granule_name}')
def least_precise_orbit_of(orbits):
if any([orb is None for orb in orbits]):
return 'O'
if any(['RESORB' in orb for orb in orbits]):
return 'R'
return 'P'
def timedetla_in_days(delta):
seconds_in_a_day = 60 * 60 * 24
total_seconds = abs(delta.total_seconds())
return round(total_seconds/seconds_in_a_day)
def get_product_name(reference_name, secondary_name, orbit_files, pixel_spacing=80, apply_water_mask=False):
plat1 = reference_name[2]
plat2 = secondary_name[2]
datetime1 = reference_name[17:32]
datetime2 = secondary_name[17:32]
ref_datetime = datetime.strptime(datetime1, '%Y%m%dT%H%M%S')
sec_datetime = datetime.strptime(datetime2, '%Y%m%dT%H%M%S')
days = timedetla_in_days(ref_datetime - sec_datetime)
pol1 = reference_name[15:16]
pol2 = secondary_name[15:16]
orb = least_precise_orbit_of(orbit_files)
mask = 'w' if apply_water_mask else 'u'
product_id = token_hex(2).upper()
return f'S1{plat1}{plat2}_{datetime1}_{datetime2}_{pol1}{pol2}{orb}{days:03}_INT{pixel_spacing}_G_{mask}eF_' \
f'{product_id}'
def get_orbit_parameters(reference_file):
"""
input: manifest.safe in the reference.safe directory
return: {"orbitnumber": orbitnumber, "relative_orbitnumber":relative_orbitnumber,
"cyclenumber":cyclenumber, "pass_direction":pass_direction}
"""
file = os.path.join(reference_file, 'manifest.safe')
if os.path.exists(file):
with open(file, 'rb') as f:
xml = f.read()
root = objectify.fromstring(xml)
meta = root.find('metadataSection')
xmldata = meta.find('*[@ID="measurementOrbitReference"]').metadataWrap.xmlData
orbit = xmldata.find('safe:orbitReference', root.nsmap)
orbitnumber = orbit.find('safe:orbitNumber', root.nsmap)
relative_orbitnumber = orbit.find('safe:relativeOrbitNumber', root.nsmap)
cyclenumber = orbit.find('safe:cycleNumber', root.nsmap)
pass_direction = orbit.find('safe:extension', root.nsmap).\
find('s1:orbitProperties', root.nsmap).find('s1:pass', root.nsmap)
return {"orbitnumber": orbitnumber, "relative_orbitnumber": relative_orbitnumber,
"cyclenumber": cyclenumber, "pass_direction": pass_direction}
return {"orbitnumber": None, "relative_orbitnumber": None,
"cyclenumber": None, "pass_direction": None}
def move_output_files(output, reference, prod_dir, long_output, include_displacement_maps, include_look_vectors,
include_wrapped_phase, include_inc_map, include_dem):
inName = "{}.mli.geo.tif".format(reference)
outName = "{}_amp.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
inName = "water_mask.tif"
outName = "{}_water_mask.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
inName = "{}.cc.geo.tif".format(output)
outName = "{}_corr.tif".format(os.path.join(prod_dir, long_output))
if os.path.isfile(inName):
shutil.copy(inName, outName)
inName = "{}.adf.unw.geo.tif".format(output)
outName = "{}_unw_phase.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
if include_wrapped_phase:
inName = "{}.diff0.man.adf.geo.tif".format(output)
outName = "{}_wrapped_phase.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
if include_dem:
inName = "{}.dem.tif".format(output)
outName = "{}_dem.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
if include_displacement_maps:
inName = "{}.los.disp.geo.org.tif".format(output)
outName = "{}_los_disp.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
inName = "{}.vert.disp.geo.org.tif".format(output)
outName = "{}_vert_disp.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
if include_inc_map:
inName = "{}.inc.tif".format(output)
outName = "{}_inc_map.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
inName = "{}.inc_ell.tif".format(output)
outName = "{}_inc_map_ell.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
if include_look_vectors:
inName = "{}.lv_theta.tif".format(output)
outName = "{}_lv_theta.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
inName = "{}.lv_phi.tif".format(output)
outName = "{}_lv_phi.tif".format(os.path.join(prod_dir, long_output))
shutil.copy(inName, outName)
makeAsfBrowse("{}.diff0.man.adf.bmp.geo.tif".format(output),
"{}_color_phase".format(os.path.join(prod_dir, long_output)), use_nn=True)
makeAsfBrowse("{}.adf.unw.geo.bmp.tif".format(output),
"{}_unw_phase".format(os.path.join(prod_dir, long_output)), use_nn=True)
def make_parameter_file(mydir, parameter_file_name, alooks, rlooks, dem_source, coords, ref_point_info):
res = 20 * int(alooks)
reference_date = mydir[:15]
secondary_date = mydir[17:]
reference_date_short = reference_date[:8]
log.info("In directory {} looking for file with date {}".format(os.getcwd(), reference_date_short))
reference_file = glob.glob("*%s*.SAFE" % reference_date)[0]
secondary_file = glob.glob("*%s*.SAFE" % secondary_date)[0]
parfile = f'{reference_date_short}.mli.par'
erad_nadir = getParameter(parfile, 'earth_radius_below_sensor')
erad_nadir = erad_nadir.split()[0]
sar_to_earth_center = getParameter(parfile, 'sar_to_earth_center')
sar_to_earth_center = sar_to_earth_center.split()[0]
height = float(sar_to_earth_center) - float(erad_nadir)
near_slant_range = getParameter(parfile, 'near_range_slc')
near_slant_range = near_slant_range.split()[0]
center_slant_range = getParameter(parfile, 'center_range_slc')
center_slant_range = center_slant_range.split()[0]
far_slant_range = getParameter(parfile, 'far_range_slc')
far_slant_range = far_slant_range.split()[0]
with open("baseline.log") as f:
for line in f:
if "estimated baseline perpendicular component" in line:
# FIXME: RE is overly complicated here. this is two simple string splits
t = re.split(":", line)
s = re.split(r'\s+', t[1])
baseline = float(s[1])
back = os.getcwd()
os.chdir(os.path.join(reference_file, "annotation"))
utctime = None
for myfile in os.listdir("."):
if "001.xml" in myfile:
root = etree.parse(myfile)
for coord in root.iter('productFirstLineUtcTime'):
utc = coord.text
log.info("Found utc time {}".format(utc))
t = utc.split("T")
log.info("{}".format(t))
s = t[1].split(":")
log.info("{}".format(s))
utctime = ((int(s[0]) * 60 + int(s[1])) * 60) + float(s[2])
os.chdir(back)
heading = None
name = f'{reference_date[:8]}.mli.par'
with open(name, "r") as f:
for line in f:
if "heading" in line:
t = re.split(":", line)
# FIXME: RE is overly complicated here. this is two simple string splits
s = re.split(r'\s+', t[1])
heading = float(s[1])
reference_orbit_parameters = get_orbit_parameters(reference_file)
secondary_orbit_parameters = get_orbit_parameters(secondary_file)
reference_file = reference_file.replace(".SAFE", "")
secondary_file = secondary_file.replace(".SAFE", "")
with open(parameter_file_name, 'w') as f:
f.write('Reference Granule: %s\n' % reference_file)
f.write('Secondary Granule: %s\n' % secondary_file)
f.write('Reference Pass Direction: %s\n' % reference_orbit_parameters["pass_direction"])
f.write('Reference Orbit Number: %s\n' % reference_orbit_parameters["orbitnumber"])
f.write('Secondary Pass Direction: %s\n' % secondary_orbit_parameters["pass_direction"])
f.write('Secondary Orbit Number: %s\n' % secondary_orbit_parameters["orbitnumber"])
f.write('Baseline: %s\n' % baseline)
f.write('UTC time: %s\n' % utctime)
f.write('Heading: %s\n' % heading)
f.write('Spacecraft height: %s\n' % height)
f.write('Earth radius at nadir: %s\n' % erad_nadir)
f.write('Slant range near: %s\n' % near_slant_range)
f.write('Slant range center: %s\n' % center_slant_range)
f.write('Slant range far: %s\n' % far_slant_range)
f.write('Range looks: %s\n' % rlooks)
f.write('Azimuth looks: %s\n' % alooks)
f.write('INSAR phase filter: adf\n')
f.write('Phase filter parameter: 0.6\n')
f.write('Resolution of output (m): %s\n' % res)
f.write('Range bandpass filter: no\n')
f.write('Azimuth bandpass filter: no\n')
f.write('DEM source: %s\n' % dem_source)
f.write('DEM resolution (m): %s\n' % (res * 2))
f.write('Unwrapping type: mcf\n')
f.write('Phase at reference point: %s\n' % ref_point_info["refoffset"])
f.write('Azimuth line of the reference point in SAR space: %s\n' % coords["row_s"])
f.write('Range pixel of the reference point in SAR space: %s\n' % coords["col_s"])
f.write('Y coordinate of the reference point in the map projection: %s\n' % coords["y"])
f.write('X coordinate of the reference point in the map projection: %s\n' % coords["x"])
f.write('Latitude of the reference point (WGS84): %s\n' % coords["lat"])
f.write('Longitude of the reference point (WGS84): %s\n' % coords["lon"])
f.write('Unwrapping threshold: none\n')
f.write('Speckle filter: no\n')
def insar_sentinel_gamma(reference_file, secondary_file, rlooks=20, alooks=4, include_look_vectors=False,
include_displacement_maps=False, include_wrapped_phase=False, include_inc_map=False,
include_dem=False, apply_water_mask=False):
log.info("\n\nSentinel-1 differential interferogram creation program\n")
wrk = os.getcwd()
reference_date = reference_file[17:32]
reference = reference_file[17:25]
secondary_date = secondary_file[17:32]
secondary = secondary_file[17:25]
igramName = "{}_{}".format(reference_date, secondary_date)
if "IW_SLC__" not in reference_file:
raise GranuleError(f'Reference file {reference_file} is not of type IW_SLC!')
if "IW_SLC__" not in secondary_file:
raise GranuleError(f'Secondary file {secondary_file} is not of type IW_SLC!')
pol = get_copol(reference_file)
log.info("Processing the {} polarization".format(pol))
# Ingest the data files into gamma format
log.info("Starting par_S1_SLC")
orbit_files = []
for granule in (reference_file, secondary_file):
orbit_file, _ = downloadSentinelOrbitFile(granule)
par_s1_slc_single(granule, pol, os.path.abspath(orbit_file))
orbit_files.append(orbit_file)
# Fetch the DEM file
log.info("Getting a DEM file")
dem_source = 'GLO-30'
dem_pixel_size = int(alooks) * 40 # typically 160 or 80; IFG pixel size will be half the DEM pixel size (80 or 40)
get_dem_file_gamma('big.dem', 'big.par', reference_file, pixel_size=dem_pixel_size)
log.info("Got dem of type {}".format(dem_source))
# Figure out which bursts overlap between the two swaths
burst_tab1, burst_tab2 = get_burst_overlaps(reference_file, secondary_file)
log.info("Finished calculating overlap - in directory {}".format(os.getcwd()))
shutil.move(burst_tab1, f'{reference}/{burst_tab1}')
shutil.move(burst_tab2, f'{secondary}/{burst_tab2}')
# Mosaic the swaths together and copy SLCs over
log.info("Starting SLC_copy_S1_fullSW.py")
os.chdir(reference)
SLC_copy_S1_fullSW(wrk, reference, "SLC_TAB", burst_tab1, mode=1, dem="big", dempath=wrk, raml=rlooks, azml=alooks)
os.chdir("..")
os.chdir(secondary)
SLC_copy_S1_fullSW(wrk, secondary, "SLC_TAB", burst_tab2, mode=2, raml=rlooks, azml=alooks)
os.chdir("..")
# Interferogram creation, matching, refinement
log.info("Starting interf_pwr_s1_lt_tops_proc.py 0")
hgt = "DEM/HGT_SAR_{}_{}".format(rlooks, alooks)
interf_pwr_s1_lt_tops_proc(reference, secondary, hgt, rlooks=rlooks, alooks=alooks, iterations=3, step=0)
log.info("Starting interf_pwr_s1_lt_tops_proc.py 1")
interf_pwr_s1_lt_tops_proc(reference, | |
<filename>run.py
"""Contains a main function for training and/or evaluating a model."""
import os
import sys
import numpy as np
import random
import shutil
import copy
from parse_args import interpret_args
import data_util
from data_util import atis_data
from model.schema_interaction_model import SchemaInteractionATISModel
from logger import Logger
from model.model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries, \
generate_samples, dis_train_epoch, dis_eval_epoch, get_progressbar
import progressbar
from data_util.atis_vocab import EOS_TOK
from data_util.dis_data_iter import DisDataIter
import torch
from torch.autograd import Variable
from model.discriminator import Discriminator
import torch.optim as optim
import torch.nn as nn
np.random.seed(0)
random.seed(0)
VALID_EVAL_METRICS = [Metrics.LOSS, Metrics.TOKEN_ACCURACY, Metrics.STRING_ACCURACY]
TRAIN_EVAL_METRICS = [Metrics.LOSS, Metrics.TOKEN_ACCURACY, Metrics.STRING_ACCURACY]
FINAL_EVAL_METRICS = [Metrics.STRING_ACCURACY, Metrics.TOKEN_ACCURACY]
def train(model, data, params, start_epoch=0):
""" Trains a model.
Inputs:
model (ATISModel): The model to train.
data (ATISData): The data that is used to train.
params (namespace): Training parameters.
"""
# Get the training batches.
log = Logger(os.path.join(params.logdir, params.logfile), "w")
num_train_original = atis_data.num_utterances(data.train_data)
log.put("Original number of training utterances:\t"
+ str(num_train_original))
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size,
max_output_length=maximum_output_length,
randomize=not params.deterministic)
if params.num_train >= 0:
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size,
max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data,
max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
log.put(
"Actual number of used training examples:\t" +
str(num_train_examples))
log.put("(Shortened by output limit of " +
str(maximum_output_length) +
")")
log.put("Number of steps per epoch:\t" + str(num_steps_per_epoch))
log.put("Batch size:\t" + str(batch_size))
print(
"Kept " +
str(num_train_examples) +
"/" +
str(num_train_original) +
" examples")
print(
"Batch size of " +
str(batch_size) +
" gives " +
str(num_steps_per_epoch) +
" steps per epoch")
# Keeping track of things during training.
epochs = start_epoch
patience = params.initial_patience
learning_rate_coefficient = 1.
previous_epoch_loss = float('inf')
maximum_validation_accuracy = 0.
maximum_string_accuracy = 0.
countdown = int(patience)
if params.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.trainer, mode='min', )
keep_training = True
while keep_training:
log.put("Epoch:\t" + str(epochs))
model.set_dropout(params.dropout_amount)
if not params.scheduler:
model.set_learning_rate(learning_rate_coefficient * params.initial_learning_rate)
# Run a training step.
if params.interaction_level:
epoch_loss = train_epoch_with_interactions(
train_batches,
params,
model,
randomize=not params.deterministic,
sampling=params.is_sampling
)
else:
epoch_loss = train_epoch_with_utterances(
train_batches,
model,
randomize=not params.deterministic)
log.put("train epoch loss:\t" + str(epoch_loss))
model.set_dropout(0.)
# Run an evaluation step on a sample of the training data.
train_eval_results = eval_fn(training_sample,
model,
params.train_maximum_sql_length,
name=os.path.join(params.logdir, "train-eval"),
write_results=True,
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
# Run an evaluation step on the validation set.
valid_eval_results = eval_fn(valid_examples,
model,
params.eval_maximum_sql_length,
name=os.path.join(params.logdir, "valid-eval"),
write_results=True,
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
if train_eval_results[Metrics.STRING_ACCURACY] >= params.gen_acc_threshold:
keep_training = False
if params.scheduler:
scheduler.step(valid_loss)
if valid_loss > previous_epoch_loss:
learning_rate_coefficient *= params.learning_rate_ratio
log.put(
"learning rate coefficient:\t" +
str(learning_rate_coefficient))
previous_epoch_loss = valid_loss
if string_accuracy > maximum_string_accuracy:
maximum_string_accuracy = string_accuracy
patience = patience * params.patience_ratio
countdown = int(patience)
log.put(
"maximum string accuracy:\t" +
str(maximum_string_accuracy))
log.put("patience:\t" + str(patience))
if countdown <= 0:
keep_training = False
countdown -= 1
log.put("countdown:\t" + str(countdown))
log.put("")
epochs += 1
if params.max_epoch and epochs >= params.max_epoch:
keep_training = False
# save checkpoint
ckp = {
'epoch': epochs,
'state_dict': model.state_dict(),
'optimizer_state_dict': model.trainer.state_dict(),
'bert_optimizer_state_dict': model.bert_trainer.state_dict()
}
save_ckp(ckp, params.logdir, params.gen_pretrain_ckp)
log.put("Finished training!")
log.close()
def evaluate(model, data, params, split):
"""Evaluates a pretrained model on a dataset.
Inputs:
model (ATISModel): Model class.
data (ATISData): All of the data.
params (namespace): Parameters for the model.
"""
filename = split
if filename == 'dev':
split = data.dev_data
elif filename == 'train':
split = data.train_data
elif filename == 'test':
split = data.test_data
elif filename == 'valid':
split = data.valid_data
else:
raise ValueError("Split not recognized: " + str(params.evaluate_split))
if params.use_predicted_queries:
filename += "_use_predicted_queries"
else:
filename += "_use_gold_queries"
full_name = os.path.join(params.logdir, filename) + params.results_note
if params.interaction_level or params.use_predicted_queries:
examples = data.get_all_interactions(split)
if params.interaction_level:
valid_eval_results = evaluate_interaction_sample(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=<PASSWORD>,
database_timeout=params.database_timeout,
use_predicted_queries=params.use_predicted_queries,
max_generation_length=params.eval_maximum_sql_length,
write_results=True,
use_gpu=True,
compute_metrics=params.compute_metrics)[0]
else:
valid_eval_results = evaluate_using_predicted_queries(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=<PASSWORD>.database_password,
database_timeout=params.database_timeout)[0]
else:
examples = data.get_all_utterances(split)
valid_eval_results = evaluate_utterance_sample(
examples,
model,
name=full_name,
gold_forcing=False,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
max_generation_length=params.eval_maximum_sql_length,
database_username=params.database_username,
database_password=<PASSWORD>,
database_timeout=params.database_timeout,
write_results=True)[0]
for name, value in valid_eval_results.items():
print("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
print("token accuracy:\t" + str(valid_token_accuracy))
print("maximum string accuracy:\t" + str(string_accuracy))
def pretrain_discriminator(params, generator, discriminator,
dis_criterion, dis_optimizer, data,
start_epoch=0):
log = Logger(os.path.join(params.logdir, params.dis_logfile), 'w')
if params.interaction_level:
get_data = data.get_all_interactions
else:
get_data = data.get_all_utterances
train_data = get_data(
data.train_data,
max_output_length=params.train_maximum_sql_length
)
valid_data = get_data(
data.valid_data,
max_output_length=params.train_maximum_sql_length
)
real_train_path = os.path.join(params.samples_dir, params.real_train_file)
fake_train_path = os.path.join(params.samples_dir, params.fake_train_file)
real_valid_path = os.path.join(params.samples_dir, params.real_valid_file)
fake_valid_path = os.path.join(params.samples_dir, params.fake_valid_file)
generator.set_dropout(0.)
if params.generated_train:
print("Already generated training samples!")
else:
print("Generating training samples!")
with torch.no_grad():
if params.debug:
generate_samples(generator, train_data,
real_train_path, fake_train_path,
params.train_maximum_sql_length,
sampling=params.is_sampling,
gen_num=100,
train=True)
else:
generate_samples(generator, train_data,
real_train_path, fake_train_path,
params.train_maximum_sql_length,
sampling=params.is_sampling,
train=True)
print("Finished generating training samples!")
train_iter = DisDataIter(real_train_path,
fake_train_path,
params.dis_batch_size)
log.put(
"Number of training examples:\t" + str(train_iter.data_num)
)
log.put(
"Number of steps per epoch:\t" + str(train_iter.num_batches)
)
log.put("Batch size:\t" + str(train_iter.batch_size))
print(
"Number of training examples: " + str(train_iter.data_num)
)
print("Batch size of " + str(train_iter.batch_size) + " gives "
+ str(train_iter.num_batches) + " steps per epoch")
if params.generated_valid:
print("Already generated validation samples!")
else:
print("Generating validation samples!")
with torch.no_grad():
if params.debug:
generate_samples(generator, valid_data,
real_valid_path, fake_valid_path,
params.train_maximum_sql_length,
sampling=params.is_sampling,
gen_num=100)
else:
generate_samples(generator, valid_data,
real_valid_path, fake_valid_path,
params.train_maximum_sql_length,
sampling=params.is_sampling)
print("Finished generating validation samples!")
valid_iter = DisDataIter(real_valid_path,
fake_valid_path,
params.dis_batch_size)
print("Begin pre-training!")
for epoch in range(start_epoch, params.num_dis_epoch):
log.put("Epoch:\t" + str(epoch))
print("Epoch: " + str(epoch))
t_metrics = dis_train_epoch(
discriminator,
train_iter,
dis_criterion,
dis_optimizer)
log.put("Train loss:\t" + str(t_metrics["loss"]))
log.put("Train accuracy:\t" + str(t_metrics["acc"]))
log.put("Train real accuracy:\t" + str(t_metrics["real_acc"]))
log.put("Train fake accuracy:\t" + str(t_metrics["fake_acc"]))
log.put("Train confidence:\t" + str(t_metrics["con"]))
log.put("Train real confidence:\t" + str(t_metrics["real_con"]))
log.put("Train fake confidence:\t" + str(t_metrics["fake_con"]))
print("Train loss: " + str(t_metrics["loss"]))
print("Train accuracy: " + str(t_metrics["acc"]))
print("Train real accuracy: " + str(t_metrics["real_acc"]))
print("Train fake accuracy: " + str(t_metrics["fake_acc"]))
print("Train confidence: " + str(t_metrics["con"]))
print("Train real confidence: " + str(t_metrics["real_con"]))
print("Train fake confidence: " + str(t_metrics["fake_con"]))
with torch.no_grad():
v_metrics = dis_eval_epoch(
discriminator,
valid_iter,
dis_criterion)
log.put("Valid loss:\t" + str(v_metrics["loss"]))
log.put("Valid accuracy:\t" + str(v_metrics["acc"]))
log.put("Valid real accuracy:\t" + str(v_metrics["real_acc"]))
log.put("Valid fake accuracy:\t" + str(v_metrics["fake_acc"]))
log.put("Valid confidence:\t" + str(v_metrics["con"]))
log.put("Valid real confidence:\t" + str(v_metrics["real_con"]))
log.put("Valid fake confidence:\t" + str(v_metrics["fake_con"]))
print("Valid loss: " + str(v_metrics["loss"]))
print("Valid accuracy: " + str(v_metrics["acc"]))
print("Valid real accuracy: " + str(v_metrics["real_acc"]))
print("Valid fake accuracy: " + str(v_metrics["fake_acc"]))
print("Valid confidence: " + str(v_metrics["con"]))
print("Valid real confidence: " + str(v_metrics["real_con"]))
print("Valid fake confidence: " + str(v_metrics["fake_con"]))
# save checkpoint
ckp = {
'epoch': epoch + 1,
'state_dict': discriminator.state_dict(),
'optimizer_state_dict': dis_optimizer.state_dict(),
}
save_ckp(ckp, params.logdir, params.dis_pretrain_ckp)
if t_metrics["con"] >= params.train_accuracy_threshold:
break
log.put("Finished pre-training discriminator!")
log.close()
print("Finished pre-training discriminator!")
def adv_train(generator, discriminator, dis_criterion,
dis_optimizer, data, params, start_epoch=0,
start_batches=None, start_pos_in_batch=0):
log = Logger(os.path.join(params.logdir, params.adv_logfile), 'w')
if params.interaction_level:
get_batch = data.get_interaction_batches
get_data = data.get_all_interactions
get_sample = data.get_random_interactions
evaluate = evaluate_interaction_sample
else:
get_batch = data.get_utterance_batches
get_data = data.get_all_utterances
get_sample = data.get_random_utterances
evaluate = evaluate_utterance_sample
if start_batches:
train_batch = start_batches
else:
train_batch = get_batch(
params.gan_batch_size,
max_output_length=params.train_maximum_sql_length
)
num_batch = len(train_batch)
train_data = get_data(
data.train_data,
max_output_length=params.train_maximum_sql_length
)
train_sample = get_sample(
params.train_evaluation_size,
max_output_length=params.train_maximum_sql_length
)
valid_data = get_data(
data.valid_data,
max_output_length=params.train_maximum_sql_length
)
progbar = get_progressbar("adversarial training ",
num_batch * params.adv_epoch)
progbar.start()
print("")
real_path = os.path.join(params.samples_dir, params.adv_real_file)
fake_path = os.path.join(params.samples_dir, params.adv_fake_file)
real_valid = os.path.join(params.samples_dir, params.adv_real_valid)
fake_valid = os.path.join(params.samples_dir, params.adv_fake_valid)
generator.set_dropout(params.dropout_amount)
for epoch in range(start_epoch, params.adv_epoch):
log.put("Epoch:\t" + str(epoch))
print("Epoch: " + str(epoch))
for i in range(start_pos_in_batch, num_batch):
batch = train_batch[i]
gen_loss = 0.
progbar2 = get_progressbar("generator ",
params.gan_batch_size)
progbar2.start()
for j, example in enumerate(batch.items):
seq, _, prob, pred = \
generator(example, params.train_maximum_sql_length,
sampling=params.is_sampling)
if seq[-1] == EOS_TOK:
seq = seq[:-1]
prob = prob[:-1]
with torch.no_grad():
rewards = generator.get_reward(
seq, pred, example, params.roll_num,
params.max_gen_len, discriminator,
bias=params.bias, mle=params.mle
)
# log.put("Generator reward:\t" + str(rewards.tolist()))
# print("Generator reward: " + str(rewards.tolist()))
rewards = torch.Tensor(rewards).cuda()
loss = generator.update_gan_loss(prob, rewards)
gen_loss += loss
torch.cuda.empty_cache()
progbar2.update(j)
progbar2.finish()
log.put("Generator mean loss:\t" + str(gen_loss/params.gan_batch_size))
print("Generator mean loss: " + str(gen_loss/params.gan_batch_size))
if params.teacher_forcing:
forcing_loss = 0.
progbar3 = get_progressbar("forcing ",
params.gan_batch_size)
progbar3.start()
for j, example in enumerate(batch.items):
seq, _, | |
from device
output = self.device.execute(self.cli_command)
# initial return dictionary
result_dict = {}
# DHCPv6 LDRA is Enabled.
p0 = re.compile(r'^DHCPv6 +LDRA +is +(?P<status>(Enabled|Disabled))')
# DHCPv6 LDRA policy: client-facing-disable
# DHCPv6 LDRA policy: client-facing-trusted
# DHCPv6 LDRA policy: client-facing-untrusted
# DHCPv6 LDRA policy: server-facing
p1 = re.compile(r'^DHCPv6 +LDRA +policy: +(?P<policy>[a-zA-z\-]+)')
# Target: Gi1/0/20
# Target: Gi1/0/12 vlan 2 vlan 3 vlan 10
# Target: Gi1/0/11 vlan 4 vlan 5 vlan 11
# Target: Gi1/0/6 Gi1/0/7 Gi1/0/8 Gi1/0/9 Gi1/0/10 Gi1/0/13 Gi1/0/14 Gi1/0/15
p2 = re.compile(r'^Target:\s+(?P<targets>[\w\/\s]+)')
# Gi1/0/16 Gi1/0/17 Gi1/0/18 Gi1/0/19
p3 = re.compile(r'^(?P<targets_ext>[\w\/\s]+)')
for line in output.splitlines():
line = line.strip()
# skip empty lines
if not line:
continue
# DHCPv6 LDRA is Enabled.
m0 = p0.match(line)
if m0:
group = m0.groupdict()
ldra_dict = result_dict.setdefault('ldra', group)
continue
# DHCPv6 LDRA policy: client-facing-disable
# DHCPv6 LDRA policy: client-facing-trusted
# DHCPv6 LDRA policy: client-facing-untrusted
# DHCPv6 LDRA policy: server-facing
m1 = p1.match(line)
if m1:
pol_group = {}
m1_group = m1.groupdict()
pol_group['policy'] = m1_group['policy'].replace('-', '_')
ldra_dict[pol_group['policy']] = {}
continue
# Target: Gi1/0/20
# Target: Gi1/0/12 vlan 2 vlan 3 vlan 10
# Target: Gi1/0/11 vlan 4 vlan 5 vlan 11
# Target: Gi1/0/6 Gi1/0/7 Gi1/0/8 Gi1/0/9 Gi1/0/10 Gi1/0/13 Gi1/0/14 Gi1/0/15
m2 = p2.match(line)
if m2:
intf_group = m2.groupdict()
intf_list = re.split(r'\s+(?!\d+)', intf_group['targets'])
ldra_dict[pol_group['policy']]['targets'] = \
[Common.convert_intf_name(intf) for intf in intf_list]
continue
# Gi1/0/16 Gi1/0/17 Gi1/0/18 Gi1/0/19
m3 = p3.match(line)
if m3:
intf_group_ext = m3.groupdict()
intf_list = re.split(r'\s+(?!\d+)', intf_group_ext['targets_ext'])
ldra_dict[pol_group['policy']]['targets'].extend(
[Common.convert_intf_name(intf) for intf in intf_list])
continue
return result_dict
class ShowIpv6DhcpLdraStatisticsSchema(MetaParser):
"""
Schema for show ipv6 dhcp-ldra statistics
"""
schema = {
'statistics': {
Any(): {
'total_recvd': int,
'total_sent': int,
'total_discard': int,
Optional('msg_sent'): {
Any(): int
},
Optional('msg_received'): {
Any(): int
}
}
}
}
class ShowIpv6DhcpLdraStatistics(ShowIpv6DhcpLdraStatisticsSchema):
"""
Parser for show ipv6 dhcp-ldra statistics
"""
cli_command = 'show ipv6 dhcp-ldra statistics'
def cli(self, output=None):
"""
Parse the output from the cli and return parsed data
"""
if output is None:
# get output from device
output = self.device.execute(self.cli_command)
# initial return dictionary
result_dict = {}
# DHCPv6 LDRA client facing statistics.
# DHCPv6 LDRA server facing statistics.
p0 = re.compile(r'DHCPv6\s+LDRA\s+(?P<mode>[\w\s]+)\s+statistics')
# Messages received 20
p1 = re.compile(r'Messages\s+received\s+(?P<total_recvd>\d+)')
# Messages sent 20
p2 = re.compile(r'Messages\s+sent\s+(?P<total_sent>\d+)')
# Messages discarded 0
p3 = re.compile(r'Messages\s+discarded\s+(?P<total_discard>\d+)')
# Messages Received
# Messages Sent
p4 = re.compile(r'Messages\s+(?P<msg_mode>Received|Sent)')
# SOLICIT 1
# REQUEST 1
# RENEW 18
# RELAY-FORWARD 20
p5 = re.compile(r'^(?P<msg_type>[a-zA-Z\-]+)\s+(?P<count>\d+)')
for line in output.splitlines():
line = line.strip()
# skip empty lines
if not line:
continue
# DHCPv6 LDRA client facing statistics.
# DHCPv6 LDRA server facing statistics.
m0 = p0.match(line)
if m0:
group = {}
m0_group = m0.groupdict()
group['mode'] = m0_group['mode'].replace(' ', '_')
stats_dict = result_dict.setdefault('statistics', {})
stats_dict[group['mode']] = {}
continue
# Messages received 20
m1 = p1.match(line)
if m1:
recvd_group = m1.groupdict()
recvd_group['total_recvd'] = int(recvd_group['total_recvd'])
stats_dict[group['mode']].update(recvd_group)
continue
# Messages sent 20
m2 = p2.match(line)
if m2:
sent_group = m2.groupdict()
sent_group['total_sent'] = int(sent_group['total_sent'])
stats_dict[group['mode']].update(sent_group)
continue
# Messages discarded 0
m3 = p3.match(line)
if m3:
discard_group = m3.groupdict()
discard_group['total_discard'] = int(discard_group['total_discard'])
stats_dict[group['mode']].update(discard_group)
continue
# Messages Received
# Messages Sent
m4 = p4.match(line)
if m4:
msg_mode_group = m4.groupdict()
msg_dict = stats_dict[group['mode']].setdefault(f"msg_{msg_mode_group['msg_mode'].lower()}", {})
continue
# SOLICIT 1
# REQUEST 1
# RENEW 18
# RELAY-FORWARD 20
m5 = p5.match(line)
if m5:
msg_type_group = m5.groupdict()
msg_dict[msg_type_group['msg_type'].lower().replace('-', '_')] = int(msg_type_group['count'])
continue
return result_dict
# ====================================================
# schema for show ipv6 routers
# ====================================================
class ShowIpv6RoutersSchema(MetaParser):
"""Schema for show ipv6 routers"""
schema = {
'router': {
Any(): {
'interface': str,
'last_update': int,
'hops': int,
'lifetime': int,
'addr_flag': int,
'other_flag': int,
'mtu': int,
'home_agent_flag': int,
'preference': str,
'reachable_time': int,
'retransmit_time': int,
'prefix': {
Any(): {
'valid_lifetime': int,
'preferred_lifetime': int
}
}
}
}
}
# ================================================================
# Parser for:
# * 'show ipv6 routers'
# ================================================================
class ShowIpv6Routers(ShowIpv6RoutersSchema):
""" Parser for:
show ipv6 routers
"""
cli_command = ['show ipv6 routers']
def cli(self, output=None):
""" cli for:
' show ipv6 routers '
"""
if output is None:
output = self.device.execute(self.cli_command)
#Router FE80::FA7A:41FF:FE25:2502 on Vlan100, last update 0 min, CONFLICT
p1 = re.compile(r'^Router +(?P<router_link_local_ip>\S+)\s+\w+\s+(?P<interface>\S+), +last +update +(?P<last_update>\d+) +min, +CONFLICT$')
# Hops 64, Lifetime 200 sec, AddrFlag=0, OtherFlag=0, MTU=1500
p2 = re.compile(r'^Hops +(?P<hops>\d+), +Lifetime +(?P<lifetime>\d{1,4}) +sec, +AddrFlag+\=(?P<addr_flag>\d+), '
r'+OtherFlag\=(?P<other_flag>\d+), +MTU\=(?P<mtu>\d{1,4})$')
# HomeAgentFlag=0, Preference=Low
p3 = re.compile(r'^HomeAgentFlag\=(?P<home_agent_flag>\d+), +Preference\=(?P<preference>\w+)$')
# Reachable time 0 (unspecified), Retransmit time 0 (unspecified)
p4 = re.compile(r'^Reachable +time +(?P<reachable_time>\d+) \S+, +Retransmit +time +(?P<retransmit_time>\d+)')
#Prefix 111::/64 onlink autoconfig
p5 = re.compile(r'^Prefix +(?P<prefix_id>\S+) +onlink +autoconfig$')
# Valid lifetime 12, preferred lifetime 8
p6 = re.compile(r'^Valid +lifetime +(?P<valid_lifetime>\d+), +preferred +lifetime +(?P<preferred_lifetime>\d+)$')
ret_dict = {}
for line in output.splitlines():
line = line.strip()
# Router FE80::FA7A:41FF:FE25:2502 on Vlan100, last update 0 min, CONFLICT
m = p1.match(line)
if m:
router_link_local = m.groupdict()['router_link_local_ip']
router_dict = ret_dict.setdefault('router', {}).setdefault(router_link_local, {})
router_dict.update({
'interface': m.groupdict()['interface'],
'last_update':int(m.groupdict()['last_update'])
})
continue
# Hops 64, Lifetime 200 sec, AddrFlag=0, OtherFlag=0, MTU=1500
m = p2.match(line)
if m:
router_dict.update({
'hops': int(m.groupdict()['hops']),
'lifetime': int(m.groupdict()['lifetime']),
'addr_flag': int(m.groupdict()['addr_flag']),
'other_flag': int(m.groupdict()['other_flag']),
'mtu': int(m.groupdict()['mtu'])
})
continue
# HomeAgentFlag=0, Preference=Low
m = p3.match(line)
if m:
router_dict.update({
'home_agent_flag': int(m.groupdict()['home_agent_flag']),
'preference': m.groupdict()['preference']
})
continue
# Reachable time 0 (unspecified), Retransmit time 0 (unspecified)
m = p4.match(line)
if m:
router_dict.update({
'reachable_time': int(m.groupdict()['reachable_time']),
'retransmit_time': int(m.groupdict()['retransmit_time'])
})
continue
# Prefix 111::/64 onlink autoconfig
m = p5.match(line)
if m:
prefix_num = m.groupdict()['prefix_id']
prefix_dict = router_dict.setdefault('prefix', {}).setdefault(prefix_num, {})
continue
# Valid lifetime 12, preferred lifetime 8
m = p6.match(line)
if m:
prefix_dict.update({
'valid_lifetime': int(m.groupdict()['valid_lifetime']),
'preferred_lifetime': int(m.groupdict()['preferred_lifetime'])
})
continue
return ret_dict
class ShowIpv6MribSchema(MetaParser):
"""Schema for:
show ipv6 mrib route
show ipv6 mrib route {group}
show ipv6 mrib route {group} {source}
show ipv6 mrib route vrf {vrf}
show ipv6 mrib route vrf {vrf} {group}
show ipv6 mrib route vrf {vrf} {group} {source}
"""
schema = {
'vrf': {
Any(): {
'address_family': {
Any(): {
'multicast_group': {
Any(): {
'source_address': {
Any(): {
'rpf_nbr': str,
Optional('flags'): str,
'incoming_interface_list': {
Any(): {
'ingress_flags': str,
}
},
'egress_interface_list': {
Any(): {
'egress_flags': str,
Optional('egress_next_hop'): str,
}
}
}
}
}
}
}
}
}
}
}
class ShowIpv6Mrib(ShowIpv6MribSchema):
"""Parser for:
show ipv6 mrib route
show ipv6 mrib route {group}
show ipv6 mrib route {group} {source}
show ipv6 mrib route vrf {vrf}
show ipv6 mrib route vrf {vrf} {group}
show ipv6 mrib route vrf {vrf} {group} {source}"""
cli_command = ['show ipv6 mrib route',
'show ipv6 mrib route {group}',
'show ipv6 mrib route {group} {source}',
'show ipv6 mrib vrf {vrf} route',
'show ipv6 mrib vrf {vrf} route {group}',
'show ipv6 mrib vrf {vrf} route {group} {source}']
def cli(self, vrf='default', group='',source='',address_family='ipv6',output=None):
cmd="show ipv6 mrib "
if output is None:
if vrf != 'default':
cmd += " vrf {vrf} ".format(vrf=vrf)
cmd += "route"
if group:
cmd += " {group}".format(group=group)
if source:
cmd += " {source}".format(source=source)
output = self.device.execute(cmd)
# initial variables
mrib_dict = {}
sub_dict = {}
outgoing = False
# (*,172.16.31.10) RPF nbr: 10.10.10.1 Flags: C
# (3.3.3.3,172.16.31.10) RPF nbr: 10.10.10.1 Flags:
# (*,fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b) RPF nbr: 2001:150:1:1::1 Flags: C
#(2001:192:168:7::11,fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b) RPF nbr: 2001:150:1:1::1 Flags: L C
p1 = re.compile(r'^\((?P<source_address>[\w\:\.\*\/]+)\,'
'(?P<multicast_group>[\w\:\.\/]+)\)'
' +RPF nbr: (?P<RPF_nbr>[\w\:\.\/]+)'
'\s+Flags\:(?P<mrib_flags>[\w\s]+|$)')
# GigabitEthernet2/0/6 Flags: A NS
# Tunnel1 Flags: A NS
p2 = re.compile(r'^(?P<ingress_if>[\w\.\/\, ]+)'
'\s+Flags\: +(?P<ingress_flags>A[\s\w]+|[\s\w]+ +A[\s\w]+|A$)')
# LISP0.1 Flags: F NS Next-hop: 172.16.17.32
# LISP0.1 Flags: F NS Next-hop: (192.168.3.11, 192.168.127.12)
p3 = re.compile(r'^(?P<egress_if>[\w\.\/\,]+)'
'\s+Flags\:\s+(?P<egress_flags>F[\s\w]+)+Next-hop\:\s+(?P<egress_next_hop>([\w\:\.\*\/]+)|(\([\w\:\.\*\/]+\, +[\w\:\.\*\/]+\)))')
# Vlan2006 Flags: F LI NS
p4=re.compile(r'^(?P<egress_if>[\w\.\/\, ]+)'
'\s+Flags\: +(?P<egress_flags>F[\s\w]+)')
for line in output.splitlines():
line=(line.strip()).replace('\t',' ')
mrib_dict.setdefault('vrf',{})
mrib_data = mrib_dict['vrf'].setdefault(vrf,{}).setdefault('address_family',{}).setdefault(address_family,{})
# (*,172.16.31.10) Flags: C HW
# (172.16.17.32,172.16.31.10) Flags: HW
# (*,fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b) Flags: C HW
# (2001:70:1:1::10,fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b) Flags: HW
m = p1.match(line)
if m:
group = m.groupdict()
source_address = group['source_address']
multicast_group = group['multicast_group']
mrib_data.setdefault('multicast_group',{})
sub_dict = mrib_data['multicast_group']\
.setdefault(multicast_group,{})\
.setdefault('source_address',{})\
.setdefault(source_address,{})
sub_dict['rpf_nbr'] = m.groupdict()['RPF_nbr']
sub_dict['flags'] = m.groupdict()['mrib_flags']
continue
# GigabitEthernet2/0/6 Flags: A NS
# Tunnel50 Flags: A
sw_data=sub_dict
m=p2.match(line)
if m:
group = m.groupdict()
ingress_interface = group['ingress_if']
ing_intf_dict=sw_data.setdefault('incoming_interface_list',{}).setdefault(ingress_interface,{})
ing_intf_dict['ingress_flags'] = group['ingress_flags']
continue
# LISP0.1 Flags: F NS Next-hop: 172.16.17.32
# | |
"Tetum",
"teu": "Soo",
"tev": "Teor",
"tew": "Tewa",
"tex": "Tennet",
"tey": "Tulishi",
"tez": "Tetserret",
"tfi": "<NAME>",
"tfn": "Dena'ina",
"tfo": "Tefaro",
"tfr": "Teribe",
"tft": "Ternate",
"tg": "Tajik",
"tga": "Sagalla",
"tgb": "Tobilung",
"tgc": "Tigak",
"tgd": "Ciwogai",
"tge": "Eastern Gorkha Tamang",
"tgf": "Chali",
"tgh": "Tobagonian Creole English",
"tgi": "Lawunuia",
"tgn": "Tandaganon",
"tgo": "Sudest",
"tgp": "Tangoa",
"tgq": "Tring",
"tgr": "Tareng",
"tgs": "Nume",
"tgt": "Central Tagbanwa",
"tgu": "Tanggu",
"tgv": "Tingui-Boto",
"tgw": "Tag<NAME>",
"tgx": "Tagish",
"tgy": "Togoyo",
"th": "Thai",
"thc": "Tai Hang Tong",
"thd": "<NAME>ayorre",
"the": "Chitwania Tharu",
"thf": "Thangmi",
"thh": "Northern Tarahumara",
"thi": "Tai Long",
"thk": "Tharaka",
"thl": "Dangaura Tharu",
"thm": "Thavung",
"thn": "Thachanadan",
"thp": "Thompson",
"thq": "Kochila Tharu",
"thr": "Rana Tharu",
"ths": "Thakali",
"tht": "Tahltan",
"thu": "Thuri",
"thv": "Tamahaq",
"thy": "Tha",
"thz": "Tayert",
"ti": "Tigrinya",
"tic": "Tira",
"tif": "Tifal",
"tig": "Tigre",
"tih": "<NAME>",
"tii": "Tiene",
"tij": "Tilung",
"tik": "Tikar",
"til": "Tillamook",
"tim": "Timbe",
"tin": "Tindi",
"tio": "Teop",
"tip": "Trimuris",
"tiq": "Tiéfo",
"tis": "Masadiit Itneg",
"tit": "Tinigua",
"tiu": "Adasen",
"tiv": "Tiv",
"tiw": "Tiwi",
"tix": "Southern Tiwa",
"tiy": "Tiruray",
"tiz": "Tai Hongjin",
"tja": "Tajuasohn",
"tjg": "Tunjung",
"tji": "Northern Tujia",
"tjl": "Tai Laing",
"tjm": "Timucua",
"tjn": "Tonjon",
"tjs": "Southern Tujia",
"tju": "Tjurruru",
"tjw": "Chaap Wuurong",
"tk": "Turkmen",
"tka": "Truká",
"tkb": "Buksa",
"tkd": "Tukudede",
"tke": "Takwane",
"tkf": "Tukumanféd",
"tkl": "Tokelauan",
"tkm": "Takelma",
"tkn": "Toku-No-Shima",
"tkp": "Tikopia",
"tkq": "Tee",
"tkr": "Tsakhur",
"tks": "Ramandi",
"tks-cal": "Chali",
"tks-dan": "Danesfani",
"tks-ebr": "Ebrahimabadi",
"tks-esf": "Esfarvarini",
"tks-sag": "Sagzabadi",
"tks-tak": "Takestani",
"tks-xia": "Khiaraji",
"tks-xoz": "Khoznini",
"tkt": "Kathoriya Tharu",
"tku": "Upper Necaxa Totonac",
"tkv": "<NAME>",
"tkw": "Teanu",
"tkx": "Tangko",
"tkz": "Takua",
"tl": "Tagalog",
"tl-cls": "Classical Tagalog",
"tl-old": "Old Tagalog",
"tla": "Southwestern Tepehuan",
"tlb": "Tobelo",
"tlc": "Misantla Totonac",
"tld": "Talaud",
"tlf": "Telefol",
"tlg": "Tofanma",
"tlh": "Klingon",
"tli": "Tlingit",
"tlj": "Talinga-Bwisi",
"tlk": "Taloki",
"tll": "Tetela",
"tlm": "Tolomako",
"tln": "Talondo'",
"tlo": "Talodi",
"tlp": "<NAME>-<NAME>",
"tlq": "Tai Loi",
"tlr": "Talise",
"tls": "Tambotalo",
"tlt": "Teluti",
"tlu": "Tulehu",
"tlv": "Taliabu",
"tlx": "Khehek",
"tly": "Talysh",
"tly-anb": "Anbarani",
"tly-asa": "Asalemi",
"tly-aze": "Azerbaijani Talysh",
"tly-cen": "Central Talysh",
"tly-fum": "Fumani",
"tly-kar": "Karganrudi",
"tly-msa": "Masali",
"tly-msu": "Masulei",
"tly-nor": "Northern Talysh",
"tly-san": "Shandarmani",
"tly-sou": "Southern Talysh",
"tly-tal": "Taleshdulabi",
"tly-tul": "Tularudi",
"tma": "Tama (Chad)",
"tmb": "Avava",
"tmc": "Tumak",
"tmd": "Haruai",
"tme": "Tremembé",
"tmf": "Toba-Maskoy",
"tmg": "Ternateño",
"tmh": "Tuareg",
"tmh-ght": "Ghat",
"tmi": "Tutuba",
"tmj": "Samarokena",
"tmk": "Northwestern Tamang",
"tml": "Tamnim Citak",
"tmm": "Tai Thanh",
"tmn": "Taman (Indonesia)",
"tmo": "Temoq",
"tmp": "Tai Mène",
"tmq": "Tumleo",
"tmr": "Jewish Babylonian Aramaic",
"tms": "Tima",
"tmt": "Tasmate",
"tmu": "Iau",
"tmv": "Motembo",
"tmy": "Tami",
"tmz": "Tamanaku",
"tn": "Tswana",
"tna": "Tacana",
"tnb": "Western Tunebo",
"tnc": "Tanimuca-Retuarã",
"tnd": "Angosturas Tunebo",
"tne": "Tinoc Kallahan",
"tng": "Tobanga",
"tnh": "Maiani",
"tni": "Tandia",
"tnk": "Kwamera",
"tnl": "Lenakel",
"tnm": "Tabla",
"tnn": "North Tanna",
"tno": "Toromono",
"tnp": "Whitesands",
"tnq": "Taíno",
"tnr": "Bedik",
"tns": "Tenis",
"tnt": "Tontemboan",
"tnu": "Tay Khang",
"tnv": "Tangchangya",
"tnw": "Tonsawang",
"tnx": "Tanema",
"tny": "Tongwe",
"tnz": "Ten'edn",
"to": "Tongan",
"tob": "Toba",
"toc": "Coyutla Totonac",
"tod": "Toma",
"tof": "Gizrra",
"tog": "Tonga (Malawi)",
"toh": "Tonga (Mozambique)",
"toi": "Tonga (Zambia)",
"toj": "Tojolabal",
"tok": "Toki Pona",
"tol": "Tolowa",
"tom": "Tombulu",
"too": "Xicotepec de Juárez Totonac",
"top": "Papantla Totonac",
"toq": "Toposa",
"tor": "Togbo-<NAME>",
"tos": "Highland Totonac",
"tou": "Tho",
"tov": "Upper Taromi",
"tow": "Jemez",
"tox": "Tobian",
"toy": "Topoiyo",
"toz": "To",
"tpa": "Taupota",
"tpc": "Azoyú Me'phaa",
"tpe": "Tippera",
"tpf": "Tarpia",
"tpg": "Kula",
"tpi": "Tok Pisin",
"tpj": "Tapieté",
"tpk": "Tupinikin",
"tpl": "Tlacoapa Me'phaa",
"tpm": "Tampulma",
"tpn": "Tupinambá",
"tpo": "Tai Pao",
"tpp": "Pisaflores Tepehua",
"tpq": "Tukpa",
"tpr": "Tuparí",
"tpt": "Tlachichilco Tepehua",
"tpu": "Tampuan",
"tpv": "Tanapag",
"tpw": "Old Tupi",
"tpx": "Acatepec Me'phaa",
"tpy": "Trumai",
"tpz": "Tinputz",
"tqb": "Tembé",
"tql": "Lehali",
"tqm": "Turumsa",
"tqn": "Tenino",
"tqo": "Toaripi",
"tqp": "Tomoip",
"tqq": "Tunni",
"tqr": "Torona",
"tqt": "Western Totonac",
"tqu": "Touo",
"tqw": "Tonkawa",
"tr": "Turkish",
"tra": "Tirahi",
"trb": "Terebu",
"trc": "Copala Triqui",
"trd": "Turi",
"tre": "East Tarangan",
"trf": "Trinidadian Creole English",
"trg": "<NAME>",
"trh": "Turaka",
"tri": "Trió",
"trj": "Toram",
"trk": "Turkic",
"trk-cmn": "Common Turkic",
"trk-dkh": "Dukhan",
"trk-kar": "Karluk",
"trk-kbu": "Kipchak-Bulgar",
"trk-kcu": "Kipchak-Cuman",
"trk-kip": "Kipchak",
"trk-kno": "Kipchak-Nogai",
"trk-oat": "Old Anatolian Turkish",
"trk-ogr": "Oghur",
"trk-ogz": "Oghuz",
"trk-ogz-pro": "Proto-Oghuz",
"trk-pro": "Proto-Turkic",
"trk-sib": "Siberian Turkic",
"trl": "Traveller Scottish",
"trm": "Tregami",
"trn": "Trinitario",
"tro": "Tarao",
"trp": "Kokborok",
"trq": "San <NAME>",
"trr": "Taushiro",
"trs": "Chicahuaxtla Triqui",
"trt": "Tunggare",
"tru": "Turoyo",
"trv": "Taroko",
"trw": "Torwali",
"trx": "Tringgus",
"try": "Turung",
"trz": "Torá",
"ts": "Tsonga",
"tsa": "Tsaangi",
"tsb": "Tsamai",
"tsc": "Tswa",
"tsd": "Tsakonian",
"tse": "Tunisian Sign Language",
"tsf": "Southwestern Tamang",
"tsg": "Tausug",
"tsh": "Tsuvan",
"tsi": "Tsimshian",
"tsj": "Tshangla",
"tsl": "Ts'ün-Lao",
"tsm": "Turkish Sign Language",
"tsp": "Northern Toussian",
"tsq": "Thai Sign Language",
"tsr": "Akei",
"tss": "Taiwan Sign Language",
"tsu": "Tsou",
"tsv": "Tsogo",
"tsw": "Tsishingini",
"tsx": "Mubami",
"tsy": "Tebul Sign Language",
"tt": "Tatar",
"tta": "Tutelo",
"ttb": "Gaa",
"ttc": "Tektiteko",
"ttd": "Tauade",
"tte": "Bwanabwana",
"ttf": "Tuotomb",
"ttg": "Tutong",
"tth": "Upper Ta'oih",
"tti": "Tobati",
"ttj": "Tooro",
"ttk": "Totoro",
"ttl": "Totela",
"ttm": "Northern Tutchone",
"ttn": "Towei",
"tto": "Lower Ta'oih",
"ttp": "Tombelala",
"ttq": "Tawellemmet",
"ttr": "Tera",
"tts": "Isan",
"ttt": "Tat",
"ttu": "Torau",
"ttv": "Titan",
"ttw": "Long Wat",
"tty": "Sikaritai",
"ttz": "Tsum",
"tua": "Wiarumus",
"tub": "Tübatulabal",
"tuc": "Mutu",
"tud": "Tuxá",
"tue": "Tuyuca",
"tuf": "Central Tunebo",
"tug": "Tunia",
"tuh": "Taulil",
"tui": "Tupuri",
"tuj": "Tugutil",
"tul": "Tula",
"tum": "Tumbuka",
"tun": "Tunica",
"tuo": "Tucano",
"tup": "Tupian",
"tup-gua": "Tupi-Guarani",
"tup-gua-pro": "Proto-Tupi-Guarani",
"tup-kab": "Kabishiana",
"tup-pro": "Proto-Tupian",
"tuq": "Tedaga",
"tus": "Tuscarora",
"tuu": "Tututni",
"tuv": "Turkana",
"tuw": "Tungusic",
"tuw-kkl": "Kyakala",
"tuw-pro": "Proto-Tungusic",
"tuw-sol": "Solon",
"tux": "Tuxináwa",
"tuy": "Tugen",
"tuz": "Turka",
"tva": "Vaghua",
"tvd": "Tsuvadi",
"tve": "Te'un",
"tvk": "Southeast Ambrym",
"tvl": "Tuvaluan",
"tvm": "Tela-Masbuar",
"tvn": "Tavoyan",
"tvo": "Tidore",
"tvs": "Taveta",
"tvt": "Tutsa Naga",
"tvu": "Tunen",
"tvw": "Sedoa",
"tvx": "Taivoan",
"tvy": "<NAME>",
"twa": "Twana",
"twb": "Western Tawbuid",
"twc": "Teshenawa",
"twe": "Teiwa",
"twf": "Taos",
"twg": "Tereweng",
"twh": "Tai Dón",
"twm": "Tawang Monpa",
"twn": "Twendi",
"two": "Tswapong",
"twp": "Ere",
"twq": "Tasawaq",
"twr": "Southwestern Tarahumara",
"twt": "Turiwára",
"twu": "Termanu",
"tww": "Tuwari",
"twy": "Tawoyan",
"txa": "Tombonuo",
"txb": "Tocharian B",
"txc": "Tsetsaut",
"txe": "Totoli",
"txg": "Tangut",
"txh": "Thracian",
"txi": "Ikpeng",
"txj": "Tarjumo",
"txm": "Tomini",
"txn": "West Tarangan",
"txo": "Toto",
"txq": "Tii",
"txr": "Tartessian",
"txs": "Tonsea",
"txt": "Citak",
"txu": "Kayapó",
"txx": "Tatana",
"ty": "Tahitian",
"tya": "Tauya",
"tye": "Kyenga",
"tyh": "O'du",
"tyi": "Teke-Tsaayi",
"tyj": "Tai Do",
"tyl": "Thu Lao",
"tyn": "Kombai",
"typ": "Kuku-Thaypan",
"tyr": "Tai Daeng",
"tys": "Sapa",
"tyt": "Tày Tac",
"tyu": "Kua",
"tyv": "Tuvan",
"tyx": "Teke-Tyee",
"tyz": "Tày",
"tza": "Tanzanian Sign Language",
"tzh": "Tzeltal",
"tzj": "Tz'utujil",
"tzl": "Talossan",
"tzm": "Central Atlas Tamazight",
"tzn": "Tugun",
"tzo": "Tzotzil",
"tzx": "Tabriak",
"uam": "Uamué",
"uan": "Kuan",
"uar": "Tairuma",
"uba": "Ubang",
"ubi": "Ubi",
"ubl": "Buhi'non Bikol",
"ubr": "Ubir",
"ubu": "Umbu-Ungu",
"uby": "Ubykh",
"uda": "Uda",
"ude": "Udihe",
"udg": "Muduga",
"udi": "Udi",
"udj": "Ujir",
"udl": "Uldeme",
"udm": "Udmurt",
"udu": "Uduk",
"ues": "Kioko",
"ufi": "Ufim",
"ug": "Uyghur",
"uga": "Ugaritic",
"ugb": "Kuku-Ugbanh",
"uge": "Ughele",
"ugn": "Ugandan Sign Language",
"ugo": "Gong",
"ugy": "Uruguayan Sign Language",
"uha": "Uhami",
"uhn": "Damal",
"uis": "Uisai",
"uiv": "Iyive",
"uji": "Tanjijili",
"uk": "Ukrainian",
"uka": "Kaburi",
"ukg": "Ukuriguma",
"ukh": "Ukhwejo",
"ukk": "Muak Sa-aak",
"ukl": "Ukrainian Sign Language",
"ukp": "Ukpe-Bayobiri",
"ukq": "Ukwa",
"uks": "Kaapor Sign Language",
"uku": "Ukue",
"ukw": "Ukwuani-Aboh-Ndoni",
"uky": "Kuuk Yak",
"ula": "Fungwa",
"ulb": "Ulukwumi",
"ulc": "Ulch",
"ule": "Lule",
"ulf": "Afra",
"uli": "Ulithian",
"ulk": "Meriam",
"ull": "Ullatan",
"ulm": "Ulumanda'",
"uln": "Unserdeutsch",
"ulu": "Uma' Lung",
"ulw": "Ulwa",
"uma": "Umatilla",
"umb": "Umbundu",
"umc": "Marrucinian",
"umd": "Umbindhamu",
"umg": "Umbuygamu",
"umi": "Ukit",
"umm": "Umon",
"umn": "Makyan Naga",
"umo": "Umotína",
"ump": "Umpila",
"umr": "Umbugarla",
"ums": "Pendau",
"umu": "Munsee",
"una": "North Watut",
"und": "Undetermined",
"und-idn": "Idiom Neutral",
"und-isa": "Isaurian",
"und-kas": "Kassite",
"und-mil": "Milang",
"und-mmd": "Mimi of Decorse",
"und-mmn": "Mimi of Nachtigal",
"und-phi": "Philistine",
"und-tdl": "Turduli",
"und-tdt": "Turdetani",
"und-wji": "Western Jicaque",
"und-xbi": "Xianbei",
"und-xnu": "Xiongnu",
"une": "Uneme",
"ung": "Ngarinyin",
"unk": "Enawené-Nawé",
"unm": "Unami",
"unn": "Kurnai",
"unr": "Mundari",
"unu": "Unubahe",
"unx": "Munda",
"unz": "Unde Kaili",
"uok": "Uokha",
"upi": "Umeda",
"upv": "Uripiv-Wala-Rano-Atchin",
"ur": "Urdu",
"ura": "Urarina",
"urb": "Urubú-Kaapor",
"urc": "Urningangg",
"ure": "Uru",
"urf": "Uradhi",
"urg": "Urigina",
"urh": "Urhobo",
"uri": "Urim",
"urj": "Uralic",
"urj-fpr-pro": "Proto-Finno-Permic",
"urj-mdv": "Mordvinic",
"urj-mdv-pro": "Proto-Mordvinic",
"urj-prm": "Permic",
"urj-prm-pro": "Proto-Permic",
"urj-pro": "Proto-Uralic",
"urj-ugr": "Ugric",
"urj-ugr-pro": "Proto-Ugric",
"urk": "Urak Lawoi'",
"url": "Urali",
"urm": "Urapmin",
"urn": "Uruangnirin",
"uro": "Ura (New Guinea)",
"urp": "Uru-Pa-In",
"urr": "Lehalurup",
"urt": "Urat",
"uru": "Urumi",
"urv": "Uruava",
"urw": "Sop",
"urx": "Urimo",
"ury": "Orya",
"urz": "Uru-Eu-Wau-Wau",
"usa": "Usarufa",
"ush": "Ushojo",
| |
<filename>pyang/plugins/omni.py
import optparse
import sys
import re
import string
from pyang import plugin
from pyang import statements
paths_in_module = []
leafrefs = []
key = ''
class_keywords = ["container", "list", "case", "choice", "augment"]
servicepoints = ["servicepoint", "productpoint"]
classnamecolor = " {0.113725, 0.352941, 0.670588}"
mandatoryconfig = " {0.600000, 0.152941, 0.152941}"
optionalconfig = " {0.129412, 0.501961, 0.254902}"
notconfig = " {0.549020, 0.486275, 0.133333}"
#which line for containment, omnigraffles makes some bezier, override this
containsline = " tail type: \"FilledDiamond\", head type: \"None\", line type: \"Straight\" "
leafrefline = " line type: \"Straight\", head type: \"FilledArrow\" "
def pyang_plugin_init():
plugin.register_plugin(OmniPlugin())
class OmniPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['omni'] = self
def add_opts(self, optparser):
optlist = [
optparse.make_option("--omni-path",
dest="omni_tree_path",
help="Subtree to print"),
]
g = optparser.add_option_group("OmniGraffle output specific options")
g.add_options(optlist)
def setup_fmt(self, ctx):
ctx.implicit_errors = False
def emit(self, ctx, modules, fd):
if ctx.opts.omni_tree_path is not None:
path = ctx.opts.omni_tree_path.split('/')
if path[0] == '':
path = path[1:]
else:
path = None
print_omni_header(modules, fd, path, ctx)
emit_modules(modules, fd, path, ctx)
post_process(fd, ctx)
print_omni_footer(modules, fd, path, ctx)
def print_omni_header(modules, fd, path, ctx):
# Build doc name from module names
name = ''
for m in modules:
name += m.arg
name = name[:32]
fd.write("""
tell application id "com.omnigroup.OmniGraffle6"
activate
make new document with properties {name:\"%s\"}
set bounds of window 1 to {50, 50, 1200, 800}
tell first canvas of document \"%s\"
set canvasSize to {600, 600}
set name to \"YANG Model\"
set adjusts pages to true
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {32.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "leafref"}, origin: {2403.202333, 169.219094}}
make new line at end of graphics with properties {point list: {{2513.245592418806, 185.5962102698529}, {2373.745592418806, 185.3149602698529}}, draws shadow: false, head type: "FilledArrow"}
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {105.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "Schema tree, containment"}, origin: {2397.741930, 138.863190}}
make new line at end of graphics with properties {point list: {{2374.993645107464, 154.4881903780727}, {2514.493645107464, 154.4881903780727}}, draws shadow: false, tail type: "FilledDiamond"}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 14.000000}, text: {alignment: center, font: "Helvetica-Bold", text: "Legend"}, text placement: top, origin: {2366.929155, 43.937008}, vertical padding: 0}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 56.000000}, text: {{color: {0.600000, 0.152941, 0.152941}, text: "Mandatory config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Optional config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Key leaf", underlined: true}, {color: {0.129412, 0.501961, 0.254902}, text: "
"}, {color: {0.549020, 0.486275, 0.133333}, text: "Not config"}}, text placement: top, origin: {2366.929155, 57.937008}, vertical padding: 0}
assemble graphics -2 through -1 table shape { 2, 1 }
assemble graphics -5 through -1
""" %(name, name))
def post_process(fd, ctx):
for s in leafrefs:
# dont try to connect to class not given as input to pyang
if (s.strip().split(" to ")[1].split(" with ")[0]in paths_in_module):
fd.write(s)
def print_omni_footer(modules, fd, path, ctx):
fd.write("""
layout
end tell
end tell
""")
def print_module_info(module, fd, ctx):
title = module.arg
print_text(title, fd, ctx)
def emit_modules(modules, fd, path, ctx):
for module in modules:
print_module_info(module, fd, ctx)
chs = [ch for ch in module.i_children]
if path is not None and len(path) > 0:
chs = [ch for ch in chs
if ch.arg == path[0]]
path = path[1:]
# TEST
for ch in chs:
print_node(module, ch, module, fd, path, ctx, 'true')
for augment in module.search('augment'):
print_node(module, augment, module, fd, path, ctx, 'true')
def iterate_children(parent, s, module, fd, path, ctx):
if hasattr(s, 'i_children'):
for ch in s.i_children:
print_node(s, ch, module, fd, path, ctx)
def print_class_header(s, fd, ctx, root='false'):
global servicepoints
service = ""
for sub in s.substmts:
if sub.keyword[1] in servicepoints:
service = "SERVICE\n"
fd.write("make new shape at end of graphics with properties {autosizing: full, size: {187.500000, 14.000000}, text: {{alignment: center, font: \"Helvetica-Bold\", text: \"%s \"}, {alignment: center, color:%s, font: \"Helvetica-Bold\", text: \"%s \"}}, text placement: top, origin: {150.000000, 11.500000}, vertical padding: 0}\n" %(service + s.keyword, classnamecolor, s.arg))
def print_class_stuff(s, fd, ctx):
number = print_attributes(s, fd, ctx)
#print_actions(s,fd, ctx)
close_class(number, s, fd, ctx)
print_associations(s,fd, ctx)
def print_attributes(s,fd, ctx):
global key
if s.keyword == 'list':
keystring = s.search_one('key')
if keystring is not None:
key = keystring.arg.split(" ")
else:
key = ''
if hasattr(s, 'i_children'):
found_attrs = False
found_actions = False
index = False
# Search attrs
for ch in s.i_children:
index = False
if ch.keyword in ["leaf", "leaf-list"]:
if found_attrs == False:
# first attr in attr section
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{")
found_attrs = True
else:
# comma before new textitem
fd.write(", ")
if ch.keyword == "leaf-list":
str = "[]"
else:
str = ""
if ch.arg in key:
index = True
print_leaf(ch, str, index, fd, ctx)
if found_attrs:
# close attr section
fd.write("}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# Search actions
for ch in s.i_children:
if ch.keyword == ('tailf-common', 'action'):
if found_actions == False:
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{text:\"")
found_actions = True
print_action(ch, fd, ctx)
if found_actions:
fd.write("\"}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# return number of sections in class
return (found_attrs + found_actions) + 1
def close_class(number, s, fd, ctx):
fd.write("local %s\n" %fullpath(s))
fd.write("set %s to assemble ( graphics -%s through -1 ) table shape {%s, 1}\n" %(fullpath(s), str(number), str(number) ))
def print_node(parent, s, module, fd, path, ctx, root='false'):
# We have a class
if (s.keyword in class_keywords):
print_class_header(s, fd, ctx, root)
paths_in_module.append(fullpath(s))
print_class_stuff(s, fd, ctx)
# Do not try to create relationship to module
if (parent != module):
presence = s.search_one("presence")
if presence is not None:
print_aggregation(parent, s, fd, "0", "1", ctx)
else:
print_aggregation(parent, s, fd, "1", "1", ctx)
iterate_children(parent, s, module, fd, path, ctx)
def print_associations(s, fd, ctx):
# find leafrefs and identityrefs
if hasattr(s, 'i_children'):
for ch in s.i_children:
if hasattr(ch, 'i_leafref_ptr') and (ch.i_leafref_ptr is not None):
to = ch.i_leafref_ptr[0]
print_association(s, to.parent, ch, to, "leafref", fd, ctx)
def print_aggregation(parent, this, fd, lower, upper, ctx):
fd.write("connect %s to %s with properties {%s} \n" %(fullpath(parent),fullpath(this), containsline))
def print_rpc(rpc, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(rpc), rpc.arg))
def print_action(action, fd, ctx, root='false'):
fd.write("%s()\n" %action.arg)
def print_notification(notification, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(notification), notification.arg))
def print_inout(parent, s, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s-%s\' " %(fullpath(s), parent.arg, s.keyword))
def print_leaf(leaf, str, index, fd, ctx):
if leaf.i_config == True:
c = '(rw)'
color = optionalconfig
else:
c = '(ro)'
color = notconfig
m = leaf.search_one('mandatory')
if m is None or m.arg == 'false':
mand = '?'
else:
mand = ''
color = mandatoryconfig
if not index:
fd.write("{font: \"Helvetica-Oblique\", color: %s, text: \"%s%s%s %s %s\n\"}" %(color, leaf.arg, str, mand, c, get_typename(leaf)))
else:
fd.write("{font: \"Helvetica-Oblique\", color: %s, underlined: true, text: \"%s%s%s %s %s\n\"}" %(color, leaf.arg, str, mand, c, get_typename(leaf)))
def print_association(fromclass, toclass, fromleaf, toleaf, association, fd, ctx):
leafrefs.append("connect " + (fullpath(fromclass)) + " to " + fullpath(toclass) + " with properties {" + leafrefline + "}\n", )
def print_text(t, fd, ctx):
fd.write("make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {57.000000, 30.000000}, text: {size: 16, alignment: center, font: \"HelveticaNeue\", text: \"%s\"}, origin: {100, 4.500000}}\n" %t)
def get_typename(s):
t = s.search_one('type')
if t is not None:
s = t.arg
# if t.arg == 'enumeration':
# s = s + ' : {'
# for enums in t.substmts[:10]:
# s = s + enums.arg + ','
# if len(t.substmts) > 3:
# s = s + "..."
# s = s + '}'
# elif t.arg == 'leafref':
# s = s + ' : '
# p = t.search_one('path')
# if p is not None:
# s = s + p.arg
return s
def fullpath(stmt):
pathsep = "_"
path = stmt.arg
# for augment paths we need to remove initial /
if path.find("/") == 0:
path = path[1:len(path)]
else:
if stmt.keyword == 'case':
path = path + '-case'
elif stmt.keyword == 'grouping':
path = path + '-grouping'
while stmt.parent is | |
<filename>porcupine/plugins/directory_tree.py
"""Displays a directory tree on the left side of the editor.
You can navigate directories, and open files in Porcupine.
"""
from __future__ import annotations
import dataclasses
import logging
import os
import tkinter
from functools import partial
from pathlib import Path
from tkinter import ttk
from typing import Any, Callable, List
from porcupine import (
get_main_window,
get_paned_window,
get_tab_manager,
menubar,
settings,
tabs,
utils,
)
log = logging.getLogger(__name__)
# The idea: If more than this many projects are opened, then the least recently
# opened project will be closed.
#
# Gotchas:
# - Git run time is the bottleneck of refreshing, and it's proportional to
# this. For that reason, keep it small.
# - If you have more than this many files open, each from a different
# project, there will be one project for each file in the directory tree
# and this number is exceeded.
_MAX_PROJECTS = 5
# For perf reasons, we want to avoid unnecessary Tcl calls when
# looking up information by id. Easiest solution is to include the
# information in the id. It's a bit lol. The format is:
#
# "{type}:{project_number}:{path}"
#
# where:
# - type is "file", "dir", "project"
# - project_number is unique to each project
def get_path(item_id: str) -> Path:
item_type, project_number, path = item_id.split(":", maxsplit=2)
return Path(path)
@dataclasses.dataclass
class FolderRefreshed(utils.EventDataclass):
project_id: str
folder_id: str
# TODO: show long paths more nicely?
def _stringify_path(path: Path) -> str:
home = Path.home()
if path == home or home in path.parents:
return os.sep.join(["~"] + list(path.relative_to(home).parts))
return str(path)
class DirectoryTree(ttk.Treeview):
def __init__(self, master: tkinter.Misc) -> None:
super().__init__(
master,
selectmode="browse",
show="tree",
name="directory_tree",
style="DirectoryTree.Treeview",
)
# Needs after_idle because selection hasn't updated when binding runs
self.bind("<Button-1>", self._on_click, add=True)
self.bind("<<TreeviewOpen>>", self.open_file_or_dir, add=True)
self.bind("<<ThemeChanged>>", self._config_tags, add=True)
self.column("#0", minwidth=500) # allow scrolling sideways
self._config_tags()
self._last_click_time = 0 # Very long time since previous click, no double click
self._last_click_item: str | None = None
self._project_num_counter = 0
self.contextmenu = tkinter.Menu(tearoff=False)
def ordered_repr(item_id: str) -> tuple[bool, str, str]:
split_item_id = item_id.split(":", maxsplit=2)
item_type = split_item_id[0] # 'dir' or 'file'
item_path = split_item_id[2]
item_is_dotted = Path(item_path).name[0] == "." # False < True => dot items last
return item_is_dotted, item_type, item_path
self.sorting_keys: list[Callable[[str], Any]] = [ordered_repr]
def set_the_selection_correctly(self, id: str) -> None:
self.selection_set(id)
self.focus(id)
def _on_click(self, event: tkinter.Event[DirectoryTree]) -> str | None:
self.tk.call("focus", self)
# Man page says identify_row is "obsolescent" but tkinter doesn't have the new thing yet
item = self.identify_row(event.y)
if item is None:
return None
# Couldn't get <Double-Button-1> to work, so I wrote a program to
# measure max time between double clicks. It's 500ms on my system.
double_click = event.time - self._last_click_time < 500 and self._last_click_item == item
self.set_the_selection_correctly(item)
if item.startswith("file:"):
if double_click:
self.open_file_or_dir()
else:
little_arrow_clicked = self.identify_element(event.x, event.y) == "Treeitem.indicator"
if double_click or little_arrow_clicked:
self.item(item, open=(not self.item(item, "open")))
if self.item(item, "open"):
self.open_file_or_dir()
self._last_click_item = item
if double_click:
# Prevent getting two double clicks with 3 quick subsequent clicks
self._last_click_time = 0
else:
self._last_click_time = event.time
return "break"
def _config_tags(self, junk: object = None) -> None:
fg = self.tk.eval("ttk::style lookup Treeview -foreground")
bg = self.tk.eval("ttk::style lookup Treeview -background")
gray = utils.mix_colors(fg, bg, 0.5)
self.tag_configure("dummy", foreground=gray)
# This allows projects to be nested. Here's why that's a good thing:
# Consider two projects, blah/blah/outer and blah/blah/outer/blah/inner.
# If the inner project is not shown when outer project is already in the
# directory tree, and home folder somehow becomes a project (e.g. when
# editing ~/blah.py), then the directory tree will present everything
# inside the home folder as one project.
def add_project(self, root_path: Path, *, refresh: bool = True) -> None:
for existing_id in self.get_children():
if get_path(existing_id) == root_path:
# Move project first to avoid hiding it soon
self.move(existing_id, "", 0)
return
self._project_num_counter += 1
project_item_id = f"project:{self._project_num_counter}:{root_path}"
# insert to beginning, so it won't be hidden soon
self.insert("", 0, project_item_id, text=_stringify_path(root_path), open=False)
self._insert_dummy(project_item_id)
self._hide_old_projects()
if refresh:
self.refresh()
def find_project_id(self, item_id: str) -> str:
# Does not work for dummy items, because they don't use type:num:path scheme
num = item_id.split(":", maxsplit=2)[1]
[result] = [id for id in self.get_children("") if id.startswith(f"project:{num}:")]
return result
def _find_project_id_by_path(self, path: Path) -> str | None:
matching_projects = [
project_id for project_id in self.get_children() if get_path(project_id) in path.parents
]
if not matching_projects:
return None
# For ~/foo/bar/lol.py, use ~/foo/bar instead of ~/foo
return max(matching_projects, key=(lambda id: len(str(get_path(id)))))
def select_file(self, path: Path) -> None:
project_id = self._find_project_id_by_path(path)
if project_id is None:
# Happens when tab changes because a file was just opened. This
# will be called soon once the project has been added.
log.info(f"can't select '{path}' because there are no projects containing it")
return
project_root_path = get_path(project_id)
# Find the visible sub-item representing the file
file_id = project_id
subpath = project_root_path
for part in path.relative_to(project_root_path).parts:
subpath /= part
if self.item(file_id, "open"):
mypy_sucks = self.get_id_from_path(subpath, project_id)
assert mypy_sucks is not None
file_id = mypy_sucks
else:
# ...or a closed folder that contains the file
break
self.set_the_selection_correctly(file_id)
self.see(file_id)
def _insert_dummy(self, parent: str, *, text: str = "", clear: bool = False) -> None:
assert parent
if clear:
self.delete(*self.get_children(parent))
else:
assert not self.get_children(parent)
self.insert(parent, "end", text=text, tags="dummy")
def contains_dummy(self, parent: str) -> bool:
children = self.get_children(parent)
return len(children) == 1 and self.tag_has("dummy", children[0])
# TODO: it's not great how only the directory tree knows this
def project_has_open_filetabs(self, project_id: str) -> bool:
assert project_id.startswith("project:")
return any(
isinstance(tab, tabs.FileTab)
and tab.path is not None
and self._find_project_id_by_path(tab.path) == project_id
for tab in get_tab_manager().tabs()
)
def _hide_old_projects(self, junk: object = None) -> None:
for project_id in self.get_children(""):
if not get_path(project_id).is_dir():
self.delete(project_id)
# To avoid getting rid of existing projects when not necessary, we do
# shortening after deleting non-existent projects
for project_id in reversed(self.get_children("")):
if len(self.get_children("")) > _MAX_PROJECTS and not self.project_has_open_filetabs(
project_id
):
self.delete(project_id)
self.save_project_list()
def save_project_list(self) -> None:
# Settings is a weird place for this, but easier than e.g. using a cache file.
settings.set_("directory_tree_projects", [str(get_path(id)) for id in self.get_children()])
def refresh(self, junk: object = None) -> None:
log.debug("refreshing begins")
self._hide_old_projects()
self.event_generate("<<RefreshBegins>>")
for project_id in self.get_children():
self._update_tags_and_content(get_path(project_id), project_id)
# The following two methods call each other recursively.
def _update_tags_and_content(self, project_root: Path, child_id: str) -> None:
if child_id.startswith(("dir:", "project:")) and not self.contains_dummy(child_id):
self._open_and_refresh_directory(child_id)
def _open_and_refresh_directory(self, dir_id: str) -> None:
dir_path = get_path(dir_id)
if self.contains_dummy(dir_id):
self.delete(self.get_children(dir_id)[0])
project_ids = self.get_children("")
if dir_id not in project_ids and dir_path in map(get_path, project_ids):
self._insert_dummy(dir_id, text="(open as a separate project)", clear=True)
return
new_paths = set(dir_path.iterdir())
path2id = {get_path(id): id for id in self.get_children(dir_id)}
# TODO: handle changing directory to file
for path in list(path2id.keys() - new_paths):
self.delete(path2id.pop(path))
for path in list(new_paths - path2id.keys()):
project_num = dir_id.split(":", maxsplit=2)[1]
if path.is_dir():
item_id = f"dir:{project_num}:{path}"
else:
item_id = f"file:{project_num}:{path}"
self.insert(dir_id, "end", item_id, text=path.name, open=False)
path2id[path] = item_id
if path.is_dir():
self._insert_dummy(item_id)
project_id = self.find_project_id(dir_id)
project_root = get_path(project_id)
for child_path, child_id in path2id.items():
self._update_tags_and_content(project_root, child_id)
self.sort_folder_contents(dir_id)
if not self.get_children(dir_id):
self._insert_dummy(dir_id, text="(empty)")
# When binding, delete tags from previous call
self.event_generate(
"<<FolderRefreshed>>", data=FolderRefreshed(project_id=project_id, folder_id=dir_id)
)
def sort_folder_contents(self, dir_id: str) -> None:
# Empty string is root element and sorting inside it would mess with order of projects
assert dir_id
for index, child_id in enumerate(
sorted(
self.get_children(dir_id),
key=(lambda item_id: [f(item_id) for f in self.sorting_keys]),
)
):
self.move(child_id, dir_id, index)
def open_file_or_dir(self, event: object = None) -> None:
try:
[selected_id] = self.selection()
except ValueError:
# nothing selected, can happen when double-clicking something else than one of the items
return
if selected_id.startswith("file:"):
get_tab_manager().open_file(get_path(selected_id))
elif selected_id.startswith(("dir:", "project:")): # not dummy item
self._open_and_refresh_directory(selected_id)
tab = get_tab_manager().select()
if (
isinstance(tab, tabs.FileTab)
and tab.path is not None
and get_path(selected_id) in tab.path.parents
):
# Don't know why after_idle is needed
self.after_idle(self.select_file, tab.path)
def get_id_from_path(self, path: Path, project_id: str) -> str | None:
"""Find an item from the directory tree given its path.
Because the treeview loads items lazily as needed, this may return None
even if the path exists inside the project.
"""
project_num = project_id.split(":", maxsplit=2)[1]
if path.is_dir():
result = f"dir:{project_num}:{path}"
else:
result = f"file:{project_num}:{path}"
if self.exists(result):
return result
return None
def _cycle_through_items(self, event: tkinter.Event[DirectoryTree]) -> None:
if len(event.char) != 1:
return
try:
[item] = self.selection()
except | |
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# Copyright (c) 2016+ <NAME> + <NAME>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# pagebotapp.py
#
from vanilla import *
from pagebot.filepaths import getResourcesPath
from pagebot.apps.baseapp import BaseApp
from pagebot.publications import PublicationCategories
from pagebot.elements import newGroup, newText, newRect
from pagebot.constants import *
from pagebot.base.composer import Composer
from pagebot.base.typesetter import Typesetter
from pagebot.themes import ThemeClasses, BaseTheme, DEFAULT_THEME_CLASS
from pagebot import getContext
from pagebot.fonttoolbox.objects.font import findFont
from pagebot.conditions import *
from pagebot.document import Document
from pagebot.toolbox.units import inch, pt
from pagebot.toolbox.color import color
from drawBot.ui.drawView import DrawView
ADD_MENU = True
context = getContext('DrawBot')
fontRegular = findFont('PageBot-Regular')
fontBold = findFont('PageBot-Bold')
redColor = color('red')
headStyle = dict(font=fontRegular, fontSize=pt(4))
MD_SAMPLE_PATH = getResourcesPath() + '/texts/SAMPLE.md'
UNTITLED_PUBLICATION = 'Untitled Publication #%d'
MENUS = (
('File', 100, 'fileMenu', (
('New publication', 'newPublication'),
('New page', 'newPage'),
('Open...', 'openPublication'),
('Close', 'closePublication'),
('Save', 'savePublication'),
('Save as...', 'saveAsPublication'),
('Print...', 'printPublication'),
('Export...', 'exportPublication'),
('Quit', 'quitApp'),
)),
('Edit', 100, 'editMenu', (
('Undo', 'undoEdit'),
('Cut', 'cutEdit'),
('Copy', 'copyEdit'),
('Paste', 'pasteEdit'),
('Delete', 'deleteEdit'),
('Select all', 'selectAllEdit'),
('Find...', 'findEdit'),
)),
('Style', 100, 'styleMenu', (
('Publication...', 'stylePublication'),
('Metrics...', 'styleMetrics'),
('Templates...', 'styleTemplates'),
('Themes...', 'styleTheme'),
)),
('Window', 100, 'windowMenu', (
('WINDOW', 'selectWindow'),
)),
)
class PageBotApp(BaseApp):
APPS = {} # Key application.eId, value is PageBotApp instance.
def __init__(self, publication, w=None, h=None,
minW=None, maxW=None, minH=None, maxH=None, **kwargs):
uiWidth = pt(200)
w, h = w or B5[0]+uiWidth, h or B5[1]
BaseApp.__init__(self)
self.APPS[0] = self
# Key is publication type, values are UI settings
self.preferenceUI = {}
self.menuCallbacks = {}
if not publication.name:
publication.name = (UNTITLED_PUBLICATION % len(self.APPS))
self.publication = publication # Store the Magazine instance.
self.window = Window((24, 24, w, h), self.publication.name,
minSize=(minW or 200, minH or 200),
maxSize=(maxW or XXXL, maxH or XXXL))
self.buildUI(uiWidth)
def buildUI(self, uiWidth):
dy = pad = pt(6)
y = pad + dy
uiWidth = pt(230)
uiH = pt(24)
uiL = uiH + 6
uiLS = pt(18)
uiLS2 = pt(23)
if ADD_MENU:
menuHeight = 18 + 2*pad
menuX = pad
self.window.menu = Group((0, 0, -0, menuHeight))
for menuTitle, menuW, menuCallbackName, menuAttributes in MENUS:
menuItems = [menuTitle]
for menuItemTitle, menuItemCallback in menuAttributes:
self.menuCallbacks[menuItemTitle] = getattr(self, menuItemCallback)
menuItems.append(menuItemTitle)
setattr(self.window.menu, menuTitle,
PopUpButton((menuX, pad, menuW, menuHeight - 2*pad),
menuItems, callback=getattr(self, menuCallbackName), sizeStyle='small'))
menuX += menuW + pad
else:
menuHeight = 0
self.window.uiGroup = Group((0, menuHeight, uiWidth, -0))
self.window.uiGroup.tabs = Tabs((0, pad, -0, -uiH-pad), ["Document", "Content", "Hints"], sizeStyle='mini')
# D E S I G N U I
tab = self.uiDesign = self.window.uiGroup.tabs[0]
tab.documentNameLabel = Text((pad, y-12, -pad, uiLS), 'Document name', sizeStyle='mini')
tab.documentName = TextEditor((pad, y, -pad, uiLS), self.publication.name)
y += uiL-2
tab.publicationLabel = Text((pad, y-8, (uiWidth-pad)/2, uiLS),
'Publication category', sizeStyle='mini')
tab.templateLabel = Text(((uiWidth-pad)/2+pad, y-8, -pad, uiLS),
'Publication type', sizeStyle='mini')
publicationCategories = sorted(PublicationCategories.keys())
tab.publication = PopUpButton((pad, y, (uiWidth-pad)/2-pad, uiH),
publicationCategories, callback=self.selectCategory, sizeStyle='small')
tab.publication.set(publicationCategories.index('Magazine'))
templateTypes = sorted(PublicationCategories[tab.publication.getItem()])
tab.templateType = PopUpButton(((uiWidth-pad)/2+pad, y, -pad, uiH), templateTypes,
callback=self.makeSample, sizeStyle='small')
tab.templateType.set(0)
y += uiL
tab.themeLabel = Text((pad, y-8, (uiWidth-pad)*2/3, uiLS), 'Theme', sizeStyle='mini')
themeNames = sorted(ThemeClasses.keys())
tab.theme = PopUpButton((pad, y, (uiWidth-pad*2)*2/3-pad, uiH), themeNames, callback=self.makeSample,
sizeStyle='small')
tab.theme.set(themeNames.index(DEFAULT_THEME_CLASS.NAME))
tab.themeMoodLabel = Text(((uiWidth-pad)*2/3, y-8, -pad, uiLS), 'Mood', sizeStyle='mini')
themeMoods = BaseTheme.MOOD_NAMES
tab.themeMood = PopUpButton(((uiWidth-pad)*2/3, y, -pad, uiH), themeMoods,
callback=self.makeSample, sizeStyle='small')
tab.themeMood.set(themeMoods.index(BaseTheme.DEFAULT_MOOD_NAME))
y += uiL
tab.pageSizeLabel = Text((pad, y-8, -pad, uiLS), 'Page size', sizeStyle='mini')
options = sorted(self.publication.PAGE_SIZES.keys())
tab.pageSize = PopUpButton((pad, y, -pad, uiH), options, callback=self.makeSample,
sizeStyle='small')
tab.pageSize.set(2)
y += uiL-4
orientation = ('Portrait', 'Landscape')
tab.orientation = RadioGroup((pad, y, -pad, 32), orientation, callback=self.makeSample,
sizeStyle='small', isVertical=True)
tab.orientation.set(0)
tab.spread = CheckBox((uiWidth/2, y-4, -pad, uiH), 'Spread', callback=self.makeSample,
sizeStyle='small')
tab.spread.set(0)
y += uiLS-6
tab.symmetric = CheckBox((uiWidth/2, y, -pad, uiH), 'Symmetry', callback=self.makeSample,
sizeStyle='small')
tab.symmetric.set(0)
y += uiL+8
tbW = 40 # Padding text box width
tw = 10
x = pad
tab.paddingLabel = Text((pad, y-14, -pad, uiLS), 'Padding', sizeStyle='mini')
tab.paddingTopLabel = Text((x, y, tw, uiLS), 'T', sizeStyle='small')
tab.paddingTop = TextEditor((x+tw, y, tbW, uiLS), '48', callback=self.makeSample,)
x += tw + tbW + 2
tab.paddingRightLabel = Text((x, y, 12, uiLS), 'R', sizeStyle='small')
tab.paddingRight = TextEditor((x+tw, y, tbW, uiLS), '48', callback=self.makeSample,)
x += tw + tbW + 2
tab.paddingBottomLabel = Text((x, y, 12, uiLS), 'B', sizeStyle='small')
tab.paddingBottom = TextEditor((x+tw, y, tbW, uiLS), '60', callback=self.makeSample,)
x += tw + tbW + 2
tab.paddingLeftLabel = Text((x, y, 12, uiLS), 'L', sizeStyle='small')
tab.paddingLeft = TextEditor((x+tw, y, tbW, uiLS), '72', callback=self.makeSample,)
y += uiL
tab.gridLabel = Text((pad, y-8, -pad, uiLS), 'Grid', sizeStyle='mini')
columnOptions = []
for columns in range(1, 17):
columnOptions.append(str(columns))
tab.columnsLabel = Text((pad, y+5, 36, uiLS), 'Cols', sizeStyle='small')
tab.columns = PopUpButton((pad+36, y, uiWidth/5, uiH), columnOptions, callback=self.makeSample,
sizeStyle='small')
tab.columns.set(3) # 4 columns
tab.hGutterLabel = Text((uiWidth/2, y+5, 60, uiLS), 'HGutter', sizeStyle='small')
tab.hGutter = PopUpButton((-pad-uiWidth/5, y, uiWidth/5, uiH), columnOptions, callback=self.makeSample,
sizeStyle='small')
tab.hGutter.set(11) # pt(12)
y += uiLS
rowsOptions = []
for rows in range(1, 25):
rowsOptions.append(str(rows))
tab.rowssLabel = Text((pad, y+5, 36, uiLS), 'Rows', sizeStyle='small')
tab.rows = PopUpButton((pad+36, y, uiWidth/5, uiH), columnOptions, callback=self.makeSample,
sizeStyle='small')
tab.rows.set(0) # 1 row
tab.vGutterLabel = Text((uiWidth/2, y+5, 60, uiLS), 'VGutter', sizeStyle='small')
tab.vGutter = PopUpButton((-pad-uiWidth/5, y, uiWidth/5, uiH), columnOptions, callback=self.makeSample,
sizeStyle='small')
tab.vGutter.set(11) # pt(12)
y += uiL
tab.showBaselineGrid = CheckBox((pad, y, uiWidth/2, uiH), 'Baselines', callback=self.makeSample,
sizeStyle='small')
tab.showBaselineGrid.set(True)
tab.showColorBars = CheckBox((uiWidth/2, y, -pad, uiH), 'Color bars', callback=self.makeSample,
sizeStyle='small')
tab.showColorBars.set(1)
y += uiLS
tab.showGrid = CheckBox((pad, y, uiWidth/2, uiH), 'Grid', callback=self.makeSample,
sizeStyle='small')
tab.showGrid.set(1)
tab.showPagePadding = CheckBox((uiWidth/2, y, -pad, uiH), 'Page padding', callback=self.makeSample,
sizeStyle='small')
tab.showPagePadding.set(True)
y += uiLS
tab.showPageFrame = CheckBox((pad, y, uiWidth/2, uiH), 'Page frame', callback=self.makeSample,
sizeStyle='small')
tab.showPageFrame.set(True)
tab.showCropMarks = CheckBox((uiWidth/2, y, -pad, uiH), 'Cropmarks', callback=self.makeSample,
sizeStyle='small')
tab.showCropMarks.set(True)
tab.errors = EditText((pad, -50, -pad, -pad))
# C O N T E N T U I
y = pad + dy
tab = self.uiContent = self.window.uiGroup.tabs[1]
tab.contentSelectionLabel = Text((pad, y-8, -pad, uiLS),
'Content selection', sizeStyle='mini')
options = sorted(('Random content', 'Open...'))
tab.contentSelection = PopUpButton((pad, y, -pad, uiH), options, callback=self.makeSample,
sizeStyle='small')
tab.contentSelection.set(0)
y += uiL
self.window.canvas = DrawView((uiWidth, menuHeight, -0, -0))
def build(self, view=None, **kwargs):
#view = self.ui.view
#for e in self.elements:
# e.build(view, nsParent=page, **kwargs)
self.window.open()
#self.makePublication('aa')
def getPadding(self):
"""Answer the document padding."""
return pt(
int(self.uiDesign.paddingTop.get()),
int(self.uiDesign.paddingRight.get()),
int(self.uiDesign.paddingBottom.get()),
int(self.uiDesign.paddingLeft.get())
)
def getDocumentName(self):
return self.uiDesign.documentName.get()
def getPaperSize(self):
w, h = self.publication.PAGE_SIZES[self.uiDesign.pageSize.getItem()]
if self.uiDesign.orientation.get():
w, h = h, w # Flip the page
return pt(w, h)
def getGrid(self, w, h, padding):
padT, padR, padB, padL = padding
columns = int(self.uiDesign.columns.getItem())
hGutter = pt(int(self.uiDesign.hGutter.getItem()))
gridX = []
cw = (w - padR - padL - (columns-1) * hGutter)/columns
for n in range(columns):
gridX.append(pt(cw, hGutter))
rows = int(self.uiDesign.rows.getItem())
vGutter = pt(int(self.uiDesign.vGutter.getItem()))
gridY = []
ch = (h - padT - padB - (rows-1) * vGutter)/rows
for n in range(rows):
gridY.append(pt(ch, vGutter))
return gridX, gridY
def pageSizesCallback(self, sender):
pass
def getDocument(self):
"""Answer the document that fits the current UI settings."""
w, h = self.getPaperSize()
name = self.getDocumentName()
padding = self.getPadding()
gridX, gridY = self.getGrid(w, h, padding)
# Make a new Document instance for export
doc = Document(w=w, h=h, autoPages=1, padding=padding,
gridX=gridX, gridY=gridY, context=context)
view = doc.view
view.showCropMarks = showMarks = bool(self.uiDesign.showCropMarks.get())
view.showRegistrationMarks = showMarks
view.showNameInfo = showMarks
view.showColorBars = bool(self.uiDesign.showColorBars.get())
#view.showBaselineGrid = bool(self.window.group.showBaselineGrid.get())
if bool(self.uiDesign.showGrid.get()):
view.showGrid = GRID_COL
else:
view.showGrid = False
view.showPadding = bool(self.uiDesign.showPagePadding.get())
view.showFrame = bool(self.uiDesign.showPageFrame.get())
if showMarks: # Needs padding outside the page?
view.padding = pt(48)
else:
view.padding = 0
return doc
def getTheme(self):
themeName = self.uiDesign.theme.getItem()
themeMood = self.uiDesign.themeMood.getItem()
return ThemeClasses[themeName](themeMood)
def buildSample(self, doc):
page = doc[1]
theme = self.getTheme()
if doc.view.showFrame:
c = theme.mood.body_bgcolor.lessOpaque()
newRect(parent=page, fill=c, conditions=[Fit2Sides()])
# By default, the typesetter produces a single Galley with content and code blocks.
t = Typesetter(doc.context)
t.typesetFile(MD_SAMPLE_PATH)
# Create a Composer for this document, then create pages and fill content.
composer = Composer(doc)
# The composer executes the embedded Python code blocks that indicate where content should go.
# by the HtmlContext. Feedback by the code blocks is added to verbose and errors list
targets = dict(pub=self, doc=doc, page=page)
composer.compose(t.galley, targets=targets)
"""
if doc.view.showGrid:
c = theme.mood.body_color.lessOpaque()
for n in range(len(doc.gridX)):
colWidth = doc.gridX[n][0]
if n:
conditions = [Left2Col(n), Fit2Height()]
else:
conditions = [Left2Left(), Fit2Height()]
newRect(parent=page, fill=c, w=colWidth, conditions=conditions)
"""
"""
theme = self.getTheme()
newText(str(theme.mood.name), style=headStyle, parent=grp, conditions=[Fit2Width(), Top2Top()])
for colorName in sorted(theme.mood.palette.colorNames):
color = theme.mood.palette[colorName]
| |
esptool.FatalError("Writing MAC address is not supported")
def get_stored_crc(self):
return (self.esp.read_efuse(self.data_reg_offs + 1) >> 16) & 0xFF
def calc_crc(self):
"""
This algorithm is the equivalent of esp_crc8() in ESP32 ROM code
This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.
"""
mac = self.get_raw()
result = 0x00
for b in struct.unpack("B" * 6, mac):
result ^= b
for _ in range(8):
lsb = result & 1
result >>= 1
if lsb != 0:
result ^= 0x8c
return result
class EfuseKeyblockField(EfuseField):
def get_raw(self):
words = [self.esp.read_efuse(self.data_reg_offs + word) for word in range(8)]
# Reading EFUSE registers to a key string:
# endian swap each word, and also reverse
# the overall word order.
bitstring = struct.pack(">" + ("I" * 8), *words[::-1])
return bitstring
def get(self):
return hexify(self.get_raw(), " ")
def burn(self, new_value):
words = struct.unpack(">" + ("I" * 8), new_value) # endian-swap
words = words[::-1] # reverse from natural key order
write_reg_addr = efuse_write_reg_addr(self.block, self.word)
for word in words:
self.esp.write_reg(write_reg_addr, word)
write_reg_addr += 4
efuse_perform_write(self.esp)
return self.get()
class EfuseSpiPinField(EfuseField):
def get(self):
val = self.get_raw()
if val >= 30:
val += 2 # values 30,31 map to 32, 33
return val
def burn(self, new_value):
if new_value in [30, 31]:
raise esptool.FatalError("IO pins 30 & 31 cannot be set for SPI flash. 0-29, 32 & 33 only.")
if new_value > 33:
raise esptool.FatalError("IO pin %d cannot be set for SPI flash. 0-29, 32 & 33 only." % new_value)
if new_value > 30:
new_value -= 2 # values 32,33 map to 30, 31
return super(EfuseSpiPinField, self).burn(new_value)
class EfuseVRefField(EfuseField):
VREF_OFFSET = 1100 # ideal efuse value in mV
VREF_STEP_SIZE = 7 # 1 count in efuse == 7mV
VREF_SIGN_BIT = 0x10
VREF_MAG_BITS = 0x0F
def get(self):
val = self.get_raw()
# sign-magnitude format
if (val & self.VREF_SIGN_BIT):
val = -(val & self.VREF_MAG_BITS)
else:
val = (val & self.VREF_MAG_BITS)
val *= self.VREF_STEP_SIZE
return self.VREF_OFFSET + val
def burn(self, new_value):
raise RuntimeError("Writing to VRef is not supported.")
class EfuseAdcPointCalibration(EfuseField):
TP_OFFSET = { # See TP_xxxx_OFFSET in esp_adc_cal.c in ESP-IDF
"ADC1_TP_LOW": 278,
"ADC2_TP_LOW": 421,
"ADC1_TP_HIGH": 3265,
"ADC2_TP_HIGH": 3406,
}
SIGN_BIT = (0x40, 0x100) # LOW, HIGH (2s complement format)
STEP_SIZE = 4
def get(self):
idx = 0 if self.register_name.endswith("LOW") else 1
sign_bit = self.SIGN_BIT[idx]
offset = self.TP_OFFSET[self.register_name]
raw = self.get_raw()
delta = (raw & (sign_bit - 1)) - (raw & sign_bit)
return offset + (delta * self.STEP_SIZE)
def dump(esp, _efuses, args):
""" Dump raw efuse data registers """
for block in range(len(EFUSE_BLOCK_OFFS)):
print("EFUSE block %d:" % block)
offsets = [x + EFUSE_BLOCK_OFFS[block] for x in range(EFUSE_BLOCK_LEN[block])]
print(" ".join(["%08x" % esp.read_efuse(offs) for offs in offsets]))
def summary(esp, efuses, args):
""" Print a human-readable summary of efuse contents """
for category in set(e.category for e in efuses):
print("%s fuses:" % category.title())
for e in (e for e in efuses if e.category == category):
raw = e.get_raw()
try:
raw = "(0x%x)" % raw
except TypeError:
raw = ""
(readable, writeable) = (e.is_readable(), e.is_writeable())
if readable and writeable:
perms = "R/W"
elif readable:
perms = "R/-"
elif writeable:
perms = "-/W"
else:
perms = "-/-"
value = str(e.get())
print("%-22s %-50s%s= %s %s %s" % (e.register_name, e.description, "\n " if len(value) > 20 else "", value, perms, raw))
print("")
sdio_force = _get_efuse(efuses, "XPD_SDIO_FORCE")
sdio_tieh = _get_efuse(efuses, "XPD_SDIO_TIEH")
sdio_reg = _get_efuse(efuses, "XPD_SDIO_REG")
if sdio_force.get() == 0:
print("Flash voltage (VDD_SDIO) determined by GPIO12 on reset (High for 1.8V, Low/NC for 3.3V).")
elif sdio_reg.get() == 0:
print("Flash voltage (VDD_SDIO) internal regulator disabled by efuse.")
elif sdio_tieh.get() == 0:
print("Flash voltage (VDD_SDIO) set to 1.8V by efuse.")
else:
print("Flash voltage (VDD_SDIO) set to 3.3V by efuse.")
def burn_efuse(esp, efuses, args):
efuse = _get_efuse(efuses, args.efuse_name)
old_value = efuse.get()
if efuse.efuse_type == "flag":
if args.new_value not in [None, 1]:
raise esptool.FatalError("Efuse %s is type 'flag'. New value is not accepted for this efuse (will always burn 0->1)" % efuse.register_name)
args.new_value = 1
if old_value:
print("Efuse %s is already burned." % efuse.register_name)
return
elif efuse.efuse_type == "int":
if args.new_value is None:
raise esptool.FatalError("New value required for efuse %s" % efuse.register_name)
elif efuse.efuse_type == "spipin":
if args.new_value is None or args.new_value == 0:
raise esptool.FatalError("New value required for efuse %s" % efuse.register_name)
elif efuse.efuse_type == "bitcount":
if args.new_value is None: # find the first unset bit and set it
args.new_value = old_value
bit = 1
while args.new_value == old_value:
args.new_value = bit | old_value
bit <<= 1
if args.new_value & (efuse.mask >> efuse.shift) != args.new_value:
raise esptool.FatalError("Value mask for efuse %s is 0x%x. Value 0x%x is too large." % (efuse.register_name, efuse.mask >> efuse.shift, args.new_value))
if args.new_value | old_value != args.new_value:
print("WARNING: New value contains some bits that cannot be cleared (value will be 0x%x)" % (old_value | args.new_value))
confirm("Burning efuse %s (%s) 0x%x -> 0x%x" % (efuse.register_name, efuse.description, old_value, args.new_value | old_value), args)
burned_value = efuse.burn(args.new_value)
if burned_value == old_value:
raise esptool.FatalError("Efuse %s failed to burn. Protected?" % efuse.register_name)
def read_protect_efuse(esp, efuses, args):
efuse = _get_efuse(efuses, args.efuse_name)
if not efuse.is_readable():
print("Efuse %s is already read protected" % efuse.register_name)
else:
# make full list of which efuses will be disabled (ie share a read disable bit)
all_disabling = [e for e in efuses if e.read_disable_bit == efuse.read_disable_bit]
names = ", ".join(e.register_name for e in all_disabling)
confirm("Permanently read-disabling efuse%s %s" % ("s" if len(all_disabling) > 1 else "",names), args)
efuse.disable_read()
def write_protect_efuse(esp, efuses, args):
efuse = _get_efuse(efuses, args.efuse_name)
if not efuse.is_writeable():
print("Efuse %s is already write protected" % efuse.register_name)
else:
# make full list of which efuses will be disabled (ie share a write disable bit)
all_disabling = [e for e in efuses if e.write_disable_bit == efuse.write_disable_bit]
names = ", ".join(e.register_name for e in all_disabling)
confirm("Permanently write-disabling efuse%s %s" % ("s" if len(all_disabling) > 1 else "",names), args)
efuse.disable_write()
def burn_key(esp, efuses, args):
# check block choice
if args.block in ["flash_encryption", "BLK1"]:
block_num = 1
elif args.block in ["secure_boot", "BLK2"]:
block_num = 2
elif args.block == "BLK3":
block_num = 3
else:
raise RuntimeError("args.block argument not in list!")
# check keyfile
keyfile = args.keyfile
keyfile.seek(0,2) # seek t oend
size = keyfile.tell()
keyfile.seek(0)
if size != 32:
raise esptool.FatalError("Incorrect key file size %d. Key file must be 32 bytes (256 bits) of raw binary key data." % size)
# check existing data
efuse = [e for e in efuses if e.register_name == "BLK%d" % block_num][0]
original = efuse.get_raw()
EMPTY_KEY = b'\x00' * 32
if original != EMPTY_KEY:
if not args.force_write_always:
raise esptool.FatalError("Key block already has value %s." % efuse.get())
else:
print("WARNING: Key appears to have a value already. Trying anyhow, due to --force-write-always (result will be bitwise OR of new and old values.)")
if not efuse.is_writeable():
if not args.force_write_always:
raise esptool.FatalError("The efuse block has already been write protected.")
else:
print("WARNING: Key appears to be write protected. Trying anyhow, due to --force-write-always")
msg = "Write key in efuse block %d. " % block_num
if args.no_protect_key:
msg += "The key block will left readable and writeable (due to --no-protect-key)"
else:
msg += "The key block will be read and write protected (no further changes or readback)"
confirm(msg, args)
new_value = keyfile.read(32)
new = efuse.burn(new_value)
print("Burned key data. New value: %s" % (new,))
if not args.no_protect_key:
print("Disabling read/write to key efuse block...")
efuse.disable_write()
efuse.disable_read()
if efuse.is_readable():
print("WARNING: Key does not appear to have been read protected. Perhaps read disable efuse is write protected?")
if efuse.is_writeable():
print("WARNING: Key does not appear to have been write protected. Perhaps write disable efuse is write protected?")
else:
print("Key is left unprotected as per --no-protect-key argument.")
def set_flash_voltage(esp, efuses, args):
sdio_force = _get_efuse(efuses, "XPD_SDIO_FORCE")
sdio_tieh = _get_efuse(efuses, "XPD_SDIO_TIEH")
sdio_reg = _get_efuse(efuses, "XPD_SDIO_REG")
# check efuses aren't burned in a way which makes this impossible
if args.voltage == 'OFF' and sdio_reg.get() != 0:
raise esptool.FatalError("Can't set flash regulator to OFF as XPD_SDIO_REG efuse is already burned")
if args.voltage == '1.8V' and sdio_tieh.get() != 0:
raise esptool.FatalError("Can't set regulator to 1.8V is XPD_SDIO_TIEH efuse | |
Its "get_free_hyperparams" returns an empty list.
if isinstance(step, pipeline_module.PlaceholderStep):
if not utils.is_sequence(hyperparams_for_step):
raise exceptions.InvalidArgumentTypeError("Hyper-parameter values for placeholder step {step_index} of pipeline '{pipeline_id}' is not a sequence.".format(
step_index=step_index,
pipeline_id=pipeline.id,
))
elif isinstance(step, pipeline_module.SubpipelineStep):
if step.pipeline is None:
raise exceptions.InvalidStateError("Pipeline has not been resolved.")
self._check_hyperparams(step.pipeline, hyperparams_for_step)
elif isinstance(step, pipeline_module.PrimitiveStep):
if not isinstance(hyperparams_for_step, (dict, frozendict.frozendict)):
raise exceptions.InvalidArgumentTypeError("Hyper-parameter values for primitive step {step_index} of pipeline '{pipeline_id}' is not a dict.".format(
step_index=step_index,
pipeline_id=pipeline.id,
))
hyperparams_for_step_keys = set(hyperparams_for_step.keys())
free_hyperparams_keys = set(step.get_free_hyperparams().keys())
all_hyperparams_keys = set(step.get_all_hyperparams().keys())
if hyperparams_for_step_keys - all_hyperparams_keys:
raise exceptions.InvalidArgumentValueError(
"Hyper-parameter values for primitive step {step_index} of pipeline '{pipeline_id}' contain values for non-existent hyper-parameters: {hyperparams}".format(
step_index=step_index,
pipeline_id=pipeline.id,
hyperparams=sorted(hyperparams_for_step_keys - all_hyperparams_keys),
),
)
elif hyperparams_for_step_keys - free_hyperparams_keys:
raise exceptions.InvalidArgumentValueError(
"Hyper-parameter values for primitive step {step_index} of pipeline '{pipeline_id}' are overriding hyper-parameters fixed in the pipeline: {hyperparams}".format(
step_index=step_index,
pipeline_id=pipeline.id,
hyperparams=sorted(hyperparams_for_step_keys - free_hyperparams_keys),
),
)
def _get_pipeline_run_class(self) -> typing.Type[pipeline_run_module.PipelineRun]:
return pipeline_run_module.PipelineRun
def _initialize_pipeline_run(self) -> None:
if self.phase is None:
self.pipeline_run = None
return
self.pipeline_run = self._get_pipeline_run_class()(
pipeline=self.pipeline,
problem_description=self.problem_description,
phase=self.phase,
context=self.context,
previous_pipeline_run=self._previous_pipeline_run,
environment=self.environment,
random_seed=self.random_seed,
is_standard_pipeline=self.is_standard_pipeline,
users=self.users
)
# We make sure we always set this ID as soon as possible, so even if the current phase run fails
# even with an internal error which never produces a pipeline run, it can at least be visible
# that some pipeline run is missing in the sequence of phase runs.
self._previous_pipeline_run = self.pipeline_run
input_values = []
for i, input_value in sorted((int(data_reference.split('.')[1]), input_value) for data_reference, input_value in self.data_values.items() if data_reference.startswith('inputs.')):
input_values.append(input_value)
all_input_values_datasets = all(isinstance(input_value, container.Dataset) for input_value in input_values)
assert all_input_values_datasets or not self.is_standard_pipeline
# Even if the pipeline is not a standard pipeline, we still record Dataset inputs (if all are Dataset inputs)
# into pipeline run to allow generation of pipeline runs for a subset of non-standard pipelines, especially
# those computing metafeatures. Because having inputs recorded is required for a pipeline run, any other
# (for other types of inputs) pipeline run is not a valid stand-alone pipeline run and you get an error if
# you want to serialize it to JSON. This is on purpose. (We could have a better error message though.)
# You can still build a pipeline run object for non-standard pipelines. This is being used for data
# preparation or scoring pipelines.
# See: https://gitlab.com/datadrivendiscovery/metalearning/issues/64
if all_input_values_datasets:
for input_value in input_values:
self.pipeline_run.add_input_dataset(input_value)
def _clear_pipeline_run(self) -> None:
self.pipeline_run = None
def _initialize_base_temporary_directory(self) -> None:
if self.phase is None:
self._base_temporary_directory = None
self._base_temporary_directory_path = None
return
self._base_temporary_directory = tempfile.TemporaryDirectory(dir=self.scratch_dir)
self._base_temporary_directory_path = os.path.abspath(self._base_temporary_directory.name)
def _clear_base_temporary_directory(self) -> None:
if self._base_temporary_directory is not None:
self._base_temporary_directory.cleanup()
self._base_temporary_directory = None
self._base_temporary_directory_path = None
def _check_pipeline(self, inputs: typing.Sequence[typing.Any], outputs_to_expose: typing.Iterable[str]) -> typing.Iterable[str]:
"""
Check with known inputs and outputs to expose.
"""
input_types = {}
for i, input_value in enumerate(inputs):
input_types['inputs.{i}'.format(i=i)] = type(input_value)
self.pipeline.check(allow_placeholders=False, standard_pipeline=self.is_standard_pipeline, input_types=input_types)
exposable_outputs = self.pipeline.get_exposable_outputs()
outputs_to_expose_set = set(outputs_to_expose)
not_exposable_outputs = outputs_to_expose_set - exposable_outputs
if not_exposable_outputs:
raise exceptions.InvalidArgumentValueError('{not_exposable_outputs} are not exposable outputs.'.format(
not_exposable_outputs=sorted(not_exposable_outputs),
))
for i, step in enumerate(self.pipeline.steps):
if not isinstance(step, pipeline_module.PrimitiveStep):
continue
if step.primitive is None:
raise exceptions.InvalidStateError("Primitive has not been resolved.")
arguments_set = set(step.arguments.keys())
primitive_arguments_without_defaults = step._get_primitive_arguments_without_defaults()
instance_methods = step.primitive.metadata.query()['primitive_code'].get('instance_methods', {})
step_reference_prefix = 'steps.{i}.'.format(i=i)
# We iterate over "outputs_to_expose" but we modify "outputs_to_expose_set".
for output_to_expose in outputs_to_expose:
if output_to_expose.startswith(step_reference_prefix):
produce_method = output_to_expose[len(step_reference_prefix):]
# Produce method should not contain a dot.
assert '.' not in produce_method, produce_method
produce_methods = step.outputs
if produce_method not in produce_methods:
produce_method_arguments = set(instance_methods.get(produce_method, {}).get('arguments', [])) & primitive_arguments_without_defaults
missing_arguments = produce_method_arguments - arguments_set
if missing_arguments:
logger.warning(
"Additional output to expose '%(produce_method)s' does not have all necessary arguments available. Skipping exposing. Missing arguments: %(missing_arguments)s",
{
'produce_method': produce_method,
'missing_arguments': sorted(missing_arguments),
},
)
outputs_to_expose_set.remove(output_to_expose)
# We sort to have deterministic order.
return sorted(outputs_to_expose_set)
def _run_placeholder(self, step: pipeline_module.PlaceholderStep) -> None:
raise exceptions.InvalidPipelineError("Step {step_index} of pipeline '{pipeline_id}' is a placeholder but there should be no placeholders.".format(
step_index=self.current_step,
pipeline_id=self.pipeline.id,
))
# TODO: Make return type be equal to the current's class type, so that it adapts if this class is subclassed.
def _create_subpipeline(self, pipeline: pipeline_module.Pipeline, hyperparams: typing.Optional[typing.Sequence]) -> 'Runtime':
"""
Creates an instance of the subpipeline's runtime.
"""
# We change the random seed in a deterministic way so that it does not matter in which order we run steps.
# Subpipelines are generally not a standard pipeline.
return type(self)(
pipeline,
hyperparams,
# TODO: Should we pass "problem_description" as well, but make it so that it does not try to mark columns again?
problem_description=None,
context=self.context,
random_seed=self.random_seed + self.current_step,
volumes_dir=self.volumes_dir,
scratch_dir=self.scratch_dir,
is_standard_pipeline=False,
environment=self.environment,
users=self.users,
)
def _run_subpipeline(self, step: pipeline_module.SubpipelineStep) -> None:
assert self.pipeline_run is not None
if step.pipeline is None:
raise exceptions.InvalidPipelineError("Pipeline has not been resolved.")
subpipeline_inputs: typing.List[typing.Any] = []
for i, data_reference in enumerate(step.inputs):
subpipeline_inputs.append(self.data_values[data_reference])
if self.hyperparams is not None:
hyperparams = self.hyperparams[self.current_step]
# We checked this already in "_check_hyperparams".
assert utils.is_sequence(hyperparams), hyperparams
else:
hyperparams = None
subpipeline = self._create_subpipeline(step.pipeline, hyperparams)
if self.phase == metadata_base.PipelineRunPhase.FIT:
assert self.steps_state[self.current_step] is None
else:
subpipeline.set_params(typing.cast(typing.List, self.steps_state[self.current_step]))
outputs_to_expose_map = {}
outputs_to_expose = set()
for i, output_id in enumerate(step.outputs):
# "output_id" can be "None" if this output is not used and should be skipped.
if output_id is not None:
data_reference = 'outputs.{i}'.format(i=i)
outputs_to_expose.add(data_reference)
outputs_to_expose_map['steps.{i}.{output_id}'.format(i=step.index, output_id=output_id)] = data_reference
step_reference_prefix = 'steps.{i}.'.format(i=step.index)
for output_to_expose in self.outputs_to_expose:
# We process recursive data references for this subpipeline.
# We check that "output_to_expose" is not in "outputs_to_expose_map" because data
# references of the format "steps.{i}.{output_id}" have "step_reference_prefix"
# as a prefix but are not really a recursive data reference.
# But all references of that format are already in "outputs_to_expose_map".
if output_to_expose.startswith(step_reference_prefix) and output_to_expose not in outputs_to_expose_map:
data_reference = output_to_expose[len(step_reference_prefix):]
# Data reference at this point should contain at least one dot, because all with the prefix
# which do not contain a dot we filtered out by checking them against "outputs_to_expose_map".
assert '.' in data_reference, data_reference
outputs_to_expose.add(data_reference)
outputs_to_expose_map[output_to_expose] = data_reference
# We sort "outputs_to_expose" to have deterministic order.
result = subpipeline._run(subpipeline_inputs, self.phase, outputs_to_expose=sorted(outputs_to_expose))
self.pipeline_run.add_subpipeline_step(result.pipeline_run)
result.check_success()
if self.phase == metadata_base.PipelineRunPhase.FIT:
assert self.steps_state[self.current_step] is None
self.steps_state[self.current_step] = subpipeline.get_params()
for step_data_reference, subpipeline_data_reference in outputs_to_expose_map.items():
self.data_values[step_data_reference] = result.values[subpipeline_data_reference]
def _get_singleton_value(self, value: typing.Any, is_argument: bool, name: str) -> typing.Any:
"""
A helper to extract a value from a singleton value (extracting a sole element of a
container of length 1).
"""
if len(value) != 1:
if is_argument:
raise exceptions.InvalidPipelineError(
"Argument '{argument_name}' of step {step_index} of pipeline '{pipeline_id}' is singleton data, but available data is not.".format(
argument_name=name,
step_index=self.current_step,
pipeline_id=self.pipeline.id,
),
)
else:
raise exceptions.InvalidPipelineError(
"Hyper-parameter '{hyperparameter_name}' of step {step_index} of pipeline '{pipeline_id}' is singleton data, but available data is not.".format(
hyperparameter_name=name,
step_index=self.current_step,
pipeline_id=self.pipeline.id,
),
)
return get_singleton_value(value)
def _prepare_primitive_arguments(self, step: pipeline_module.PrimitiveStep) -> typing.Dict[str, typing.Any]:
arguments = {}
for argument_name, argument_description in step.arguments.items():
if argument_description['type'] == metadata_base.ArgumentType.DATA:
argument_value = self.data_values[argument_description['data']]
# We have to extract a singleton value out.
argument_value = self._get_singleton_value(argument_value, True, argument_name)
elif argument_description['type'] == metadata_base.ArgumentType.CONTAINER:
if utils.is_sequence(argument_description['data']):
values = [self.data_values[data_reference] for data_reference in argument_description['data']]
# We have to create a container List.
argument_value = self._get_list_value(values)
else:
argument_value = self.data_values[argument_description['data']]
elif argument_description['type'] == metadata_base.ArgumentType.VALUE:
argument_value = argument_description['data']
else:
raise exceptions.UnexpectedValueError("Unknown argument type: {argument_type}".format(argument_type=argument_description['type']))
arguments[argument_name] = argument_value
return arguments
def _get_list_value(self, values: typing.Sequence) -> container.List:
"""
Creates a container List from ``values``. It reuses existing metadata in ``values``
to create metadata of the container List.
"""
container_list = container.List(values, {
'schema': metadata_base.CONTAINER_SCHEMA_VERSION,
'structural_type': container.List,
'dimension': {
'length': len(values),
},
})
for value_index, value in enumerate(values):
container_list.metadata = value.metadata.copy_to(container_list.metadata, (), (value_index,))
return container_list
def _get_default_hyperparams(self, step_index: int, step: pipeline_module.PrimitiveStep) -> hyperparams_module.Hyperparams:
return step.get_primitive_hyperparams().defaults()
def _get_runtime_hyperparams(self, step_index: int, step: pipeline_module.PrimitiveStep) -> typing.Dict:
if self.hyperparams is not None:
runtime_hyperparams = self.hyperparams[step_index]
# We checked this already in "_check_hyperparams".
assert isinstance(runtime_hyperparams, (dict, frozendict.frozendict)), runtime_hyperparams
else:
runtime_hyperparams = {}
return runtime_hyperparams
def _get_pipeline_hyperparams(self, step_index: int, step: pipeline_module.PrimitiveStep) -> typing.Dict:
pipeline_hyperparams = {}
for hyperparameter_name, hyperparameter_description in step.hyperparams.items():
if hyperparameter_description['type'] == metadata_base.ArgumentType.DATA:
if utils.is_sequence(hyperparameter_description['data']):
pipeline_hyperparams[hyperparameter_name] = [
self._get_singleton_value(self.data_values[data_reference], False, hyperparameter_name)
for data_reference in hyperparameter_description['data']
]
else:
pipeline_hyperparams[hyperparameter_name] = self._get_singleton_value(self.data_values[hyperparameter_description['data']], False, | |
# import datetime
from flask import Flask,render_template,request,redirect,session,jsonify
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
from flask_mail import Mail, Message
from flask_uploads import UploadSet, configure_uploads
from random import randint
import hashlib,os
from openpyxl import *
import requests,time
import time
from bson.objectid import ObjectId
import math
from datetime import timedelta
import base64, os
import urllib2
from datetime import datetime
from datetime import date
from operator import itemgetter
from OpenSSL import SSL
from time import gmtime, strftime
app = Flask("__name__")
CORS(app)
app.config["MONGO_DBNAME"] = "mt"
mongo = PyMongo(app)
cardList=[]
context = SSL.Context(SSL.TLSv1_2_METHOD)
context.use_privatekey_file('server.key')
context.use_certificate_file('cert.pem')
# context = SSL.Context(SSL.TLSv1_2_METHOD)
# context.load_cert_chain('cert.crt','server.key')
# @app.route("/uploadtimetable", methods=["POST"])
# def uploadtimetable():
# formdata = request.get_json()['fd']
# filename =""
# target="/upload/timetables"
# #branch_selected = request.form['branch']
# for upload in request.files.getlist("file"):
# filename = upload.filename
# destination = "/".join([target, filename])
# upload.save(destination)
# os.remove(destination)
# return jsonify({"success":"true","message":"Successfully Uploaded"})
def sortlist(ar):
# print "befor"
# print ar
for i in range(0,len(ar)):
for j in range(0,len(ar)-i-1):
a=datetime.strptime(ar[j+1]["date"], "%d-%m-%Y")
b=datetime.strptime(ar[j]["date"], "%d-%m-%Y")
#print a,b
if(b>a):
temp=ar[j+1]
ar[j+1]=ar[j]
ar[j]=temp
#print 'swap is ',ar[i],ar[j]
#ar=list(reversed(ar))
# print "after"
# print ar
return ar
@app.route('/addApplyleave',methods=['POST'])
def addApplyleave():
pass
parent = {}
parent['staffid'] = request.get_json()['staffid']
parent['staffname'] = request.get_json()['staffname']
parent['staffmobile'] = request.get_json()['staffmobile']
parent['leaveDate'] = datetime.now().strftime ("%d-%m-%Y")
parent['fromDate'] = request.get_json()['fromDate']
parent['toDate'] = request.get_json()['toDate']
parent['totalDays'] = request.get_json()['totalDays']
parent['reason'] = request.get_json()['reason']
parent['action_taken'] = ""
mongo.db.leaves.insert(parent)
return jsonify({"success":"true","message":"Successfully Applied For Leave! Now Wait till admin replies!"})
@app.route('/leaveaction',methods=['POST'])
def leaveaction():
pass
print request.get_json()
#mongo.db.leaves.update_one({"staffid":request.get_json()['staffid'],"leaveDate":request.get_json()['leavedate']},{"$set":{"action_taken":request.get_json()['action']}},upsert=False)
mongo.db.leaves.update_one({"_id":ObjectId(request.get_json()["_id"])},{"$set":{"action_taken":request.get_json()['action']}},upsert=False)
if request.get_json()['action'] == 0:
return jsonify({"success":"true","message":"Approved Successfully!!"})
else:
return jsonify({"success":"true","message":"Disapproved Successfully!!"})
@app.route('/getLeavelist',methods=['POST'])
def getLeavelist():
leaves=mongo.db.leaves.find({'action_taken':""})
res = list(leaves)
for l in res:
l["_id"]=str(l["_id"])
#print res
if len(res):
return jsonify({"success":"true","leaves":res})
else:
return jsonify({"success":"false","message":"No Leaves Pending for Now Found!"})
@app.route('/uploadquestionpaper',methods=['POST'])
def uploadquestionpaper():
filename =""
target="upload/questionpapers"
destination = ''
for upload in request.files.getlist("file"):
if not upload.filename.endswith(".pdf"):
return jsonify({"success":"false","message":"Only PDF Files"})
filename = str(time.time()).split(".")[0]+"."+upload.filename.split(".")[-1]
destination = "/".join([target, filename])
upload.save(destination)
#print destination
return jsonify({"success":"true","qplink":"http://espl.in.net/"+destination})
@app.route('/uploadTimetableportion',methods=['POST'])
def uploadTimetableportion():
filename =""
target="upload/timetables"
destination = ''
for upload in request.files.getlist("file"):
if not upload.filename.endswith(".pdf"):
return jsonify({"success":"false","message":"Only PDF Files"})
filename = str(time.time()).split(".")[0]+"."+upload.filename.split(".")[-1]
destination = "/".join([target, filename])
print destination
upload.save(destination)
#print destination
return jsonify({"success":"true","ttportion":"http://espl.in.net/"+destination})
@app.route('/uploadTimetable',methods=['POST'])
def uploadTimetable():
filename =""
target="upload/timetables"
destination = ''
for upload in request.files.getlist("file"):
if not upload.filename.endswith(".pdf"):
return jsonify({"success":"false","message":"Only PDF Files"})
filename = str(time.time()).split(".")[0]+"."+upload.filename.split(".")[-1]
destination = "/".join([target, filename])
print destination
upload.save(destination)
#print destination
return jsonify({"success":"true","ttlink":"http://espl.in.net/"+destination})
@app.route("/uploadTest", methods=["POST"])
def uploadTest():
qp=request.get_json()
qp["isDeleted"]="false"
mongo.db.questionpapers.insert(qp)
return jsonify({"success":"true","message":"Successfully inserted Question Paper!"})
@app.route("/uploadTestTimetable", methods=["POST"])
def uploadTestTimetable():
qp=request.get_json()
qp["isDeleted"]="false"
mongo.db.testtimetable.insert(qp)
return jsonify({"success":"true","message":"Successfully inserted Test Timetable!"})
# @app.route('/getteachertimetable',methods=['POST'])
def getttimetable(day,teacherid,cityid):
teachertimetable=[]
temp={}
# timetableid="5a18177946465d507dcb61f2"
# teacherid="<PASSWORD>"
timetable=mongo.db.newtimetables.find_one({"city":cityid,"date":day,"isDeleted":"false"},{"_id":False});
if(timetable):
for batch in timetable["timetable"]:
for lec in batch["batchtimetable"]:
temp={}
if(lec["teacherid"]==teacherid):
temp=lec
temp["batch"]=batch["batchName"]
temp["timeinsecond"]=getsecond(lec["stime"])
teachertimetable.append(temp)
sorted(teachertimetable, key=itemgetter('timeinsecond'))
return teachertimetable
else:
return []
@app.route('/getteachertimetable',methods=['POST'])
def getteachertimetable():
teacherid=request.get_json()["teacherid"]
cityid=request.get_json()["cityid"]
tymtable=[]
temp={}
temp["date"]=datetime.now().strftime ("%d-%m-%Y")
temp["day"]="today"
# temp["timetable"]=getttimetable(datetime.now().strftime ("%d-%b-%y"),"59d09c2e46465d5db8e5dfff","59803742b5f1e7d11cccc592")
temp["timetable"]=getttimetable(datetime.now().strftime ("%d-%b-%y"),teacherid,cityid)
tymtable.append(temp)
temp={}
to=(datetime.now() + timedelta(days=1))
#to=to.strftime ("%d-%b-%y")
temp["date"]=to.strftime ("%d-%b-%y")
temp["day"]="tommorow"
temp["timetable"]=getttimetable(to.strftime ("%d-%b-%y"),teacherid,cityid)
tymtable.append(temp)
return jsonify({"success":"true","timetable":tymtable})
@app.route('/gettodaystimetable',methods=['POST'])
def gettodaystimetable():
#print "timetable function starts"
parent=[]
cityid=request.get_json()["cityid"]
#print cityid
timetable=mongo.db.newtimetables.find({"date":datetime.now().strftime ("%d-%b-%y"),"isDeleted":"false","city":cityid},{"_id":False});
#print "before error"
if timetable:
pass
timetable=list(timetable)
temp={}
temp["day"]="today"
if len(timetable):
for t in timetable[0]['timetable']:
#print t
if len(t['batchtimetable']):
t['status']="true"
else:
t['status']="false"
temp["timetable"]=timetable
parent.append(temp)
to=(datetime.now() + timedelta(days=1))
#to=to.strftime ("%d-%b-%y")
tommorow=to.strftime ("%d-%b-%y")
tommorowtimetable=mongo.db.newtimetables.find({"date":tommorow,"city":cityid,"isDeleted":"false"},{"_id":False});
if tommorowtimetable:
temp={}
temp["day"]="tommorow"
tymtable=list(tommorowtimetable)
if(tymtable):
for t in tymtable[0]['timetable']:
#print t
if len(t['batchtimetable']):
t['status']="true"
else:
t['status']="false"
temp["timetable"]=tymtable
parent.append(temp)
to=(datetime.now() + timedelta(days=2))
#to=to.strftime ("%d-%b-%y")
tommorow2=to.strftime ("%d-%b-%y")
dayatommorowtimetable=mongo.db.newtimetables.find({"date":tommorow2,"city":cityid,"isDeleted":"false"},{"_id":False});
if dayatommorowtimetable:
temp={}
temp["day"]="dayatommorow"
dayatymtable=list(dayatommorowtimetable)
if(dayatymtable):
for t in dayatymtable[0]['timetable']:
#print t
if len(t['batchtimetable']):
t['status']="true"
else:
t['status']="false"
temp["timetable"]=dayatymtable
parent.append(temp)
if len(temp):
return jsonify({"success":"true","timetable":parent})
else:
return jsonify({"success":"false"})
def getsecond(t):
timeinsecond = time.strptime(t, "%I:%M %p")
timeinsecond=timedelta(hours=timeinsecond.tm_hour, minutes=timeinsecond.tm_min, seconds=timeinsecond.tm_sec).seconds
return timeinsecond
@app.route('/rpitest',methods=['POST'])
def rpitest():
print request.get_json()["time"]
tym1=request.get_json()["time"]
t=mongo.db.tym.find_one({},{"_id":False})
tt=t["time"]
tt.append(tym1)
result = mongo.db.tym.update_one({},{"$set":{"time":tt}},upsert=False)
return jsonify({"success":"true","time":t["time"]})
# @app.route('/checkovellaping',methods=['GET'])
# @app.route("/getstaffattendence",methods=["POST"])
# def getstaffattendence():
# staffid="59d09c2e46465d5db8e5dfff"
# lectures=mongo.db.find({"staffid":staffid}).sort({date:1},{"_id":False})
# lectures=list(lectures)
# # pass
# # formdata = request.get_json()
# # city = formdata['city']
# # branchName = formdata['branchName']
# # course = formdata['course']
# # standard = formdata['standard']
# # batchName = formdata['batchName']
# # time = formdata['time']
# # for batch in batchName:
# # print batch['value']
# # mongo.db.batches.insert({'city':city,'branchName':branchName,'course':course,'standard':standard,'time':time,'batchName':batch['value']})
# # #print batchName[0]['value']
# if len(lectures):
# return jsonify({"success":"true","lectures":lectures})
# else:
# return jsonify({"success":"false"})
@app.route('/newgetTimetable',methods=['POST'])
def newgetTimetable():
if(request.get_json()):
timetables = mongo.db.newtimetables.find({"_id":ObjectId(request.get_json()['id']),"isDeleted":"false"})
else:
#timetables = mongo.db.newtimetables.find({"isDeleted":"false"}).sort('_id',-1).limit(50)
timetables = mongo.db.newtimetables.find({"isDeleted":"false"}).sort('_id',-1)
timetables = list(timetables)
for timetable in timetables:
timetable["_id"]=str(timetable["_id"])
for t in timetable['timetable']:
if len(t['batchtimetable']):
t['status']="true"
else:
t['status']="false"
cityname=mongo.db.cities.find({"_id":ObjectId(timetable["city"])})
cityname=list(cityname)
timetable["cityname"]=cityname[0]["cityname"]
if len(timetables):
return jsonify({"success":"true","timetables":timetables})
else:
return jsonify({"success":"false","message":"No schedule Found. Please add it!"})
@app.route('/newcreatetimetable', methods=['POST'])
def newcreatetimetable():
if request.get_json():
cityid = request.get_json()['ttCity']
timetable_date = request.get_json()['ttDate']
copyDate = request.get_json()['ttCopy']
if copyDate == "noCopy":
pass
batchresult = mongo.db.coursedetails.find({"cityid":cityid},{"_id":False,"batchname":True})
batchresult = list(batchresult)
parent={}
parent["date"]=timetable_date
parent["city"]=cityid
timetable=[]
for batch in batchresult:
temp={}
temp["batchName"]=batch['batchname']
temp["batchtimetable"]=[]
timetable.append(temp)
parent["timetable"]=timetable
parent["isDeleted"]="false"
result = mongo.db.newtimetables.insert(json.loads(json.dumps(parent)))
else:
pass
ttresult = mongo.db.newtimetables.find({"city":cityid,"date":copyDate},{"_id":False,"timetable":True})
ttresult = list(ttresult)
print ttresult[0]['timetable']
parent={}
parent["isDeleted"]="false"
parent["date"] = timetable_date
parent['city'] = cityid
parent['timetable'] = ttresult[0]['timetable']
#print parent
result = mongo.db.newtimetables.insert(json.loads(json.dumps(parent)))
if len(parent):
return jsonify({"success":"true","timetable":parent})
else:
return jsonify({"success":"false","message":"No terms Found. Please add it!"})
@app.route('/createtimetable', methods=['POST'])
def createtimetable():
if request.get_json():
cityid = request.get_json()['ttCity']
timetable_date = request.get_json()['ttDate']
batchresult = mongo.db.coursedetails.find({"cityid":cityid},{"_id":False,"batchname":True})
batchresult = list(batchresult)
parent = {}
parent['date'] = timetable_date
parent['city'] = cityid
timetable = list()
for batches in batchresult:
batch = {}
batch['batchname'] = batches['batchname']
batch['hours'] = list()
print batch['batchname']
for i in range(6):
batchdata={}
batchdata["stime"]=""
batchdata["etime"]=""
batchdata["teacher"]="N/A"
batchdata['subject']="N/A"
batchdata["teacherid"]="N/A"
batchdata["teachersubject"]="N/A"
batch['hours'].append(batchdata)
timetable.append(batch)
parent['timetable'] = timetable
parent["isDeleted"]="false"
result = mongo.db.timetables.insert(json.loads(json.dumps(parent)))
if len(parent):
return jsonify({"success":"true","timetable":parent})
else:
return jsonify({"success":"false","message":"No terms Found. Please add it!"})
@app.route('/shiftBatch',methods=['POST'])
def shiftBatch():
fromBatchName=request.get_json()["fromBatchName"]
toBranchName=request.get_json()["toBranchName"]
toBranchId=request.get_json()["toBranchId"]
result = mongo.db.coursedetails.update_one({"batchname":fromBatchName} ,{"$set":{"branchname":toBranchName,"branchid":toBranchId}},upsert=False)
return jsonify({"success":"true","message":"Batch Shifted successfully!"})
@app.route('/countDashboard', methods=['POST'])
def countDashboard():
dashCount = {}
studentCount = mongo.db.student_info.find({"isDeleted":'false'}).count()
batch = mongo.db.coursedetails.find({}).distinct("batchname")
batchCount = len(batch)
branchCount = mongo.db.branches.find({}).count()
productCount = mongo.db.feestructure.find({}).count()
dashCount['studentCount'] = studentCount
dashCount['batchCount'] = batchCount
dashCount['branchCount'] = branchCount
dashCount['productCount'] = productCount
return jsonify({"success":"true","message":"count generated!","dashCount":dashCount})
@app.route('/deletestudent',methods=['POST'])
def deletestudent():
studid=request.get_json()["studId"]
result = mongo.db.student_info.update_one({"_id":ObjectId(studid)} ,{"$set":{"isDeleted":"True"}},upsert=False)
return jsonify({"success":"true","message":"Student Deleted successfully!"})
@app.route('/shiftBatchStudent',methods=['POST'])
def shiftBatchStudent():
fromBatchName=request.get_json()["fromBatchName"]
toBranchName=request.get_json()["toBranchName"]
print fromBatchName
print toBranchName
result = mongo.db.student_info.update({"Batch":fromBatchName} ,{"$set":{"Center":toBranchName}},multi=True,upsert=False)
return jsonify({"success":"true","message":"Students Shifted successfully!"})
@app.route('/chktimetableexist',methods=['POST'])
def chktimetableexist():
cityid=request.get_json()["ttCity"]
tdate=request.get_json()["ttDate"]
timetables = mongo.db.newtimetables.find({"city":cityid,"date":tdate,"isDeleted":"false"},{"_id":False})
timetables=list(timetables)
if len(timetables):
return jsonify({"success":"false","message":"allready exists"})
else:
return jsonify({"success":"true"})
def checkovellaping(s1,e1,s2,e2):
# slot1=["11:05 AM","12:20 PM"]
# slot2=["1:19 PM","02:00 PM"]
# #time=datetime.strptime(slot1[0], '%H:%M %p')
# time1=datetime.strptime(slot1[0], '%I:%M %p').strftime('%I:%M %p')
# time2=datetime.strptime(slot1[1], '%I:%M %p').strftime('%I:%M %p')
delta=min(getsecond(e1),getsecond(e2))-max(getsecond(s1),getsecond(s2))
if delta <=0:
overlap="false"
else:
overlap="true"
return overlap
@app.route('/editDailyTimetable',methods=['POST'])
def editDailyTimetable():
timetables=request.get_json()["timetables"]
for timetable in timetables[0]["timetable"]:
for lec in timetable["batchtimetable"]:
lec["overlap"]="false"
res="false"
for teacher in request.get_json()["teacherData2"]:
for i in range(0,len(teacher)):
if(i<len(teacher)-1):
for j in range(i+1,len(teacher)):
res=checkovellaping(teacher[i]["stime"],teacher[i]["etime"],teacher[j]["stime"],teacher[j]["etime"])
if res =="true":
for timetable in timetables[0]["timetable"]:
for lec in timetable["batchtimetable"]:
if(lec["teacherid"]==teacher[i]["teacherid"]):
lec["overlap"]="true"
if(lec["teacherid"]==teacher[j]["teacherid"]):
lec["overlap"]="true"
return jsonify({"success":"true","overlap":"true","timetables":timetables,"message":"Teacher Lecture Timing overlap"})
if(res=="false"):
for timetable in timetables[0]["timetable"]:
for i in range(0,len(timetable["batchtimetable"])):
for j in range(i+1,len(timetable["batchtimetable"])):
res=checkovellaping(timetable["batchtimetable"][i]["stime"],timetable["batchtimetable"][i]["etime"],timetable["batchtimetable"][j]["stime"],timetable["batchtimetable"][i]["etime"])
if(res=="true"):
timetable["batchtimetable"][i]["overlap"]="true"
timetable["batchtimetable"][j]["overlap"]="true"
return jsonify({"success":"true","overlap":"true","timetables":timetables,"message":"Batch Lecture Timing overlap"})
if(res=="false"):
return jsonify({"success":"true","overlap":"false","timetables":timetables})
@app.route('/updatetimetable',methods=['POST'])
def updatetimetable():
tt=request.get_json()['timetables'][0]
print request.get_json()['timetables'][0]['timetable']
mongo.db.newtimetables.update_one({"_id":ObjectId(tt['_id'])} ,{"$set":{"timetable":tt['timetable']}},upsert=False)
return jsonify({"success":"true","message":"Update successfully!"})
@app.route('/getTodaysDate',methods=['POST'])
def getTodaysDate():
todate = datetime.now().date().strftime("%d-%m-%Y")
return jsonify({"success":"true","todate":todate})
@app.route('/testUserExistence',methods=['POST'])
def testUserExistence():
staffname=request.get_json()["staffname"]
usertype=request.get_json()["usertype"]
exists = mongo.db.userlogins.find({"staffname":staffname,"usertype":usertype,"statusActive":"1","isDeleted":"false"},{"_id":False})
exist = list(exists)
if len(exist):
return jsonify({"success":"true"})
else:
return jsonify({"success":"false"})
@app.route('/getCurrentCityName',methods=['POST'])
def getCurrentCityName():
cityid=request.get_json()["cityid"]
print cityid
currentCity = mongo.db.cities.find({"_id":ObjectId(cityid)},{"_id":False})
currentCity=list(currentCity)
return jsonify({"success":"true","currentCity":currentCity})
@app.route('/getTimetable',methods=['POST'])
def getTimetable():
if(request.get_json()):
timetables = mongo.db.timetables.find({"_id":ObjectId(request.get_json()['id'])})
else:
timetables = mongo.db.timetables.find()
timetables = list(timetables)
for timetable in timetables:
timetable["_id"]=str(timetable["_id"])
cityname=mongo.db.cities.find({"_id":ObjectId(timetable["city"])})
cityname=list(cityname)
timetable["cityname"]=cityname[0]["cityname"]
if len(timetables):
return jsonify({"success":"true","timetables":timetables})
else:
return jsonify({"success":"false","message":"No schedule Found. Please add it!"})
@app.route('/registeredMobile',methods=['POST'])
def registeredmobile():
print "in registeredmobile"
print request.get_json()["mobile"]
if(request.get_json()["type"]=="Teacher / Staff"):
mobile = mongo.db.staffdetails.find({"mobile":int(request.get_json()["mobile"]),"isDeleted":"false"})
#mobile = mongo.db.staffdetails.find({"mobile":9960136918})
mobile=list(mobile)
print mobile
for mo in mobile:
mo["_id"]=str(mo["_id"]);
if len(mobile):
return jsonify({"success":"true","staff":mobile})
else:
return jsonify({"success":"false","message":"No mobile Found!"})
else:
print "hello"
studentlist=[]
studmobile = mongo.db.student_info.find({"studentMobile":request.get_json()["mobile"],"isDeleted":"false"})
studmobile=list(studmobile)
for mo in studmobile:
mo["_id"]=str(mo["_id"]);
if mo not in studentlist:
studentlist.append(mo)
mommobile = mongo.db.student_info.find({"motherMobile":request.get_json()["mobile"],"isDeleted":"false"})
mommobile=list(mommobile)
for mo in mommobile:
mo["_id"]=str(mo["_id"]);
if mo not in studentlist:
studentlist.append(mo)
dadmobile = mongo.db.student_info.find({"fatherMobile":request.get_json()["mobile"],"isDeleted":"false"})
dadmobile=list(dadmobile)
for mo in dadmobile:
mo["_id"]=str(mo["_id"]);
if mo not in studentlist:
studentlist.append(mo)
if len(studentlist):
return jsonify({"success":"true","staff":studentlist})
else:
return jsonify({"success":"false","message":"No mobile Found!"})
#ObjectId("59fc59db46465d01ed9c30fb")
@app.route('/summarypayment',methods=['POST'])
def summarypayment():
#staffid="59d09c2e46465d5db8e5dfff"
staffid=request.get_json()["id"]
print "staffid"
print staffid
mode=""
lectures=mongo.db.staffattendancedetails.find({"staffid":staffid,"isDeleted":"false"},{"_id":False})
lectures=list(lectures)
staffdetails=mongo.db.staffdetails.find({"_id":ObjectId(staffid),"isDeleted":"false"},{"_id":False})
staffdetails=list(staffdetails)
monthwise=[]
if(len(staffdetails)):
if(len(staffdetails[0]["salarydetails"])):
if(staffdetails[0]["salarydetails"]["mode"]=="Contract"):
mode="Contract"
for lec in lectures:
templist={}
currentmonth=lec["date"][3:]
f=0;
for month in monthwise:
if(month["month"]==currentmonth):
month["totalduration"]+=lec["duration"]
print "present"
f=1;
batch=lec["batch"]
batchdetail=mongo.db.coursedetails.find({"batchname":batch},{"_id":False})
batchdetail=list(batchdetail)
for item in staffdetails[0]["salarydetails"]["contractlist"]:
if(item["std"]==batchdetail[0]["std"] and item["course"]==batchdetail[0]["coursename"]):
lec["rate"]=item["rate"]
month["totalpayment"]=month["totalpayment"]+(float(lec["duration"]/60.0)*float(lec["rate"]))
month["detail"].append(lec)
break
#templist["detail"].append(lec)
if(f==0):
templist["month"]=currentmonth
templist["totalduration"]=lec["duration"]
templist["detail"]=[]
batch=lec["batch"]
batchdetail=mongo.db.coursedetails.find({"batchname":batch},{"_id":False})
batchdetail=list(batchdetail)
for item in staffdetails[0]["salarydetails"]["contractlist"]:
if(item["std"]==batchdetail[0]["std"] and item["course"]==batchdetail[0]["coursename"]):
lec["rate"]=item["rate"]
print lec
templist["totalpayment"]=float(lec["rate"])*float(lec["duration"]/60.0)
templist["detail"].append(lec)
monthwise.append(templist)
if(staffdetails[0]["salarydetails"]["mode"]=="Salary"):
mode="Salary"
for lec in lectures:
templist={}
currentmonth=lec["date"][3:]
f=0;
for month in monthwise:
if(month["month"]==currentmonth):
f=1;
if(f==0):
templist["month"]=currentmonth
templist["salary"]= staffdetails[0]["salarydetails"]["permonthsalary"]
monthwise.append(templist)
if len(monthwise):
return jsonify({"success":"true","payment":monthwise,"mode":mode})
else:
return jsonify({"success":"false","message":"No mobile Found!"})
# @app.route('/paymentcalculation',methods=['POST'])
# def paymentcalculation():
# staffid=request.get_json()["id"]
# print "staffid"
# print staffid
# lectures=mongo.db.staffattendancedetails.find({"staffid":staffid},{"_id":False})
# lectures=list(lectures)
# staffdetails=mongo.db.staffdetails.find({"_id":ObjectId(staffid)},{"_id":False})
# staffdetails=list(staffdetails)
# if(len(staffdetails)):
# if(len(staffdetails[0]["salarydetails"])):
# if(staffdetails[0]["salarydetails"]["mode"]=="Contract"):
# for lec in lectures:
# batch=lec["batch"]
# batchdetail=mongo.db.coursedetails.find({"batchname":batch},{"_id":False})
# batchdetail=list(batchdetail)
# for item in staffdetails[0]["salarydetails"]["contractlist"]:
# if(item["std"]==batchdetail[0]["std"] and item["course"]==batchdetail[0]["coursename"]):
# lec["rate"]=item["rate"]
# print lec
# if len(lectures):
# return jsonify({"success":"true","payment":lectures})
# else:
# return jsonify({"success":"false","message":"No mobile Found!"})
@app.route("/getstaffattendance",methods=["POST"])
def getstaffattendance():
#staffid="59d09c2e46465d5db8e5dfff"
staffid=request.get_json()["id"]
lectures=mongo.db.staffattendancedetails.find({"staffid":staffid,"isDeleted":"false"})
lectures=list(lectures)
for l in lectures:
l["_id"]=str(l["_id"])
lectures= sortlist(lectures)
if len(lectures):
return jsonify({"success":"true","lectures":lectures,"message":"successfully"})
else:
return jsonify({"success":"false","message":"Something went wrong"})
@app.route("/getemployeeattendance",methods=["POST"])
def getemployeeattendance():
staffid=request.get_json()["id"]
lectures=mongo.db.employeeattendancedetails.find({"staffid":staffid})
lectures=list(lectures)
for l in lectures:
l["_id"]=str(l["_id"])
if len(lectures):
return jsonify({"success":"true","lectures":lectures})
else:
return jsonify({"success":"false"})
@app.route("/setData",methods=["POST"])
def setData():
print "hdj"
data = request.get_json()["cardno"]
print data
return jsonify({"success":"true"})
# if data=='32114725':
# mongo.db.rfiddetails.insert({'name':'Shubham','time':str(datetime.now())})
# cardList.append('Shubham')
# if data=='229111155187':
# mongo.db.rfiddetails.insert({'name':'Ankita','time':str(datetime.now())})
# cardList.append('Ankita')
# return data
@app.route("/getCards",methods=["POST"])
def getcardsNo():
cardlist = mongo.db.rfiddetails.find({})
cardlist = list(cardlist)
for card in cardlist:
card["_id"] = str(card["_id"])
card = list(cardlist)
return jsonify({"success":"true","list":card})
@app.route("/transferStudent",methods=["POST"])
def transferStudent():
print "inside transferStudent"
print request.get_json()['RollNo']
mongo.db.student_info.update_one({"_id":ObjectId(request.get_json()['id'])} ,{"$set":{"Center":request.get_json()['Center'],"Batch":request.get_json()['Batch'],"RollNo":int(request.get_json()['RollNo'])}},upsert=False)
return jsonify({"success":"true","message":"Student Transfer successfully!"})
@app.route("/getTotalRollNumbers",methods=['POST'])
def getTotalRollNumbers():
count = mongo.db.student_info.find({"Batch": request.get_json()['Batch'],"isDeleted":"false"}).count()
#count = list(count)
return jsonify({"success":"true","totalcount":count})
@app.route('/createpdf',methods=['POST'])
def createpdf():
data = request.get_json()["pdfdata"]
name=request.get_json()["pdfname"]
folder=request.get_json()["foldername"]
file_path = "pdf/directory"
directory = os.path.dirname(file_path)
try:
os.mkdir("pdf/"+str(folder))
except:
print "allready present"
print ('pdf/'+str(folder)+"/"+str(name)+'.pdf')
with open(os.path.expanduser('pdf/'+str(folder)+"/"+str(name)+'.pdf'), 'wb') as fout:
fout.write(base64.decodestring(data))
if 1:
return jsonify({"success":"true"})
else:
return jsonify({"success":"false","message":"No Batches Found. Please Add Batch!"})
@app.route("/addBatch",methods=["POST"])
def addBatch():
pass
formdata = request.get_json()
city = formdata['city']
branchName = formdata['branchName']
course = formdata['course']
standard = formdata['standard']
batchName = formdata['batchName']
time = formdata['time']
for batch in batchName:
print batch['value']
mongo.db.batches.insert({'city':city,'branchName':branchName,'course':course,'standard':standard,'time':time,'batchName':batch['value']})
#print batchName[0]['value']
return jsonify({"success":"true","message":"Added Successfully"})
@app.route('/getBatches',methods=['POST'])
def getBatches():
batches = mongo.db.coursedetails.find({})
batches = list(batches)
for batch in batches:
batch["_id"] = str(batch["_id"])
batch = list(batches)
if len(batch):
return jsonify({"success":"true","batches":batch})
else:
return jsonify({"success":"false","message":"No Batches Found. Please Add Batch!"})
#=================
@app.route('/createSyllabus',methods=['POST'])
def createSyllabus():
syllabus=request.get_json()
syllabus["syllabus"]={"subject":[]}
syllabus["isDeleted"]="false"
mongo.db.syllabus.insert(syllabus)
return jsonify({"success":"true","message":"syllabus created successfully"})
@app.route('/getSyllabus',methods=['POST'])
def getSyllabus():
if request.get_json():
syllabus=mongo.db.syllabus.find({"_id":ObjectId(request.get_json()["id"]),"isDeleted":"false"})
else:
syllabus=mongo.db.syllabus.find({"isDeleted":"false"})
syllabus=list(syllabus)
for s in syllabus:
s["_id"] = str(s["_id"])
if len(syllabus):
return jsonify({"success":"true","syllabus":syllabus})
else:
return jsonify({"success":"false","message":"no syllabus found"})
@app.route('/updatesyllabus',methods=['POST'])
def updatesyllabus():
schedule=request.get_json()['syllabus'][0]
mongo.db.syllabus.update_one({"_id":ObjectId(schedule['_id'])} ,{"$set":{"syllName":schedule['syllName'],"syllabus":schedule['syllabus'],"syllyear":schedule['syllyear']}},upsert=False)
return jsonify({"success":"true","message":"Update successfully!"})
@app.route('/getsyllabusname',methods=['POST'])
def getsyllabusname():
syllabusname= mongo.db.syllabus.find({"isDeleted":"false"},{"syllName":True,"syllyear":True,"_id":False})
syllabusname=list(syllabusname)
if len(syllabusname):
return jsonify({"success":"true","syllabusname":syllabusname})
else:
return jsonify({"success":"false","message":"No syllabus Found. Please Add syllabus!"})
# @app.route('/getsubjectforbatches',methods=['POST'])
# def getsubjectforbatches():
# #noty4aws
# subjects = mongo.db.dailyreport.find({"batch":request.get_json()["batch"]})
# subjects=list(subjects)
# subject=[]
# if len(subjects):
# for s in subjects[0]["syllabusdetails"]["syllabus"]["subject"]:
# subject.append(s["subject"])
# if len(subjects):
# return jsonify({"success":"true","subjects": subject})
# else:
# return jsonify({"success":"false","message":"No subjects Found. Please Add subjects!"})
@app.route('/getsubjectforbatches',methods=['POST'])
def getsubjectforbatches():
syllid = mongo.db.newdailyreport.find_one({"batch":request.get_json()["batch"],"isDeleted":"false"},{"syllid":True})
subjects = mongo.db.syllabus.find({"_id":ObjectId(syllid["syllid"])},{"_id":False})
subjects=list(subjects)
subject=[]
subject=[]
if len(subjects):
for s in subjects[0]["syllabus"]["subject"]:
subject.append(s["subject"])
if len(subjects):
return jsonify({"success":"true","subjects": subject})
else:
return jsonify({"success":"false","message":"No subjects Found. Please Add subjects!"})
@app.route('/gettopicsforsubject',methods=['POST'])
def gettopicsforsubject():
#noty4aws
syllid = mongo.db.newdailyreport.find_one({"batch":request.get_json()["batch"],"isDeleted":"false"},{"syllid":True})
subjects = mongo.db.syllabus.find({"_id":ObjectId(syllid["syllid"])},{"_id":False})
subjects=list(subjects)
subject=[]
subject=[]
if len(subjects):
for s in subjects[0]["syllabus"]["subject"]:
if (s["subject"]==request.get_json()["subject"]):
#print s["subject"]
topics=s["topic"]
if len(subjects):
return jsonify({"success":"true","topics": topics})
else:
return jsonify({"success":"false","message":"No subjects Found. Please Add subjects!"})
# @app.route('/gettopicsforsubject',methods=['POST'])
# def gettopicsforsubject():
# #noty4aws
# subjects = mongo.db.dailyreport.find({"batch":request.get_json()["batch"]})
# subjects=list(subjects)
# topics=[]
# if len(subjects):
# for s in subjects[0]["syllabusdetails"]["syllabus"]["subject"]:
# if (s["subject"]==request.get_json()["subject"]):
# #print s["subject"]
# topics=s["topic"]
# if len(subjects):
# return jsonify({"success":"true","topics": topics})
# else:
# return jsonify({"success":"false","message":"No subjects Found. Please Add subjects!"})
@app.route('/createdailyreport',methods=['POST'])
def createdailyreport():
dr={}
dr["batch"]=request.get_json()["syllBatch"]
dr["syllabusname"]=request.get_json()["syllName"]
dr["syllyear"]=request.get_json()["syllyear"]
syllabus= mongo.db.syllabus.find_one({"syllName":request.get_json()["syllName"],"syllyear":request.get_json()["syllyear"],"isDeleted":"false"})
syllabus["_id"]=str(syllabus["_id"])
dr["syllid"]=syllabus["_id"]
dr["isDeleted"]="false"
mongo.db.newdailyreport.insert(dr)
if len(syllabus):
return jsonify({"success":"true"})
else:
return jsonify({"success":"false","message":"No syllabus Found. Please Add syllabus!"})
# @app.route('/createdailyreport',methods=['POST'])
# def createdailyreport():
# # syllabusname="SSC SEMI 10th"
# # syllyear="2017-2018"
# # batch="Volcano"
# syllabusname=request.get_json()["syllName"]
# syllyear=request.get_json()["syllyear"]
# batch=request.get_json()["syllBatch"]
# syllabus= mongo.db.syllabus.find({"syllName":syllabusname,"syllyear":syllyear,})
# syllabus=list(syllabus)
# for s in syllabus:
# s["syllid"] = str(s["_id"])
# del s["_id"]
# dailyreport={};
# dailyreport["batch"]=batch
# dailyreport["syllabusdetails"]=syllabus[0]
# one_topic_max_lecture=[]
# temp={"hours":[],"teacherlist":[],"lecrecord":[]};
# for i in xrange(1,36):
# pass
# one_topic_max_lecture.append("")
# for sub in syllabus[0]["syllabus"]['subject']:
# for topic in sub["topic"]:
# temp["hours"].append("")
# temp["teacherlist"].append("")
# temp["lecrecord"].append(one_topic_max_lecture)
# dailyreport["lecturedetails"]=temp
# mongo.db.dailyreport.insert(dailyreport)
# if len(syllabus):
# return jsonify({"success":"true"})
# else:
# return jsonify({"success":"false","message":"No syllabus Found. Please Add syllabus!"})
@app.route('/editdailyreport',methods=['POST'])
def editdailyreport():
dailyreport=request.get_json()['dailyreport'][0]
mongo.db.schedules.update_one({"_id":ObjectId(dailyreport['_id'])} ,{"$set":{"lecturedetails":dailyreport['lecturedetails']}},upsert=False)
return jsonify({"success":"true","message":"Update successfully!"})
@app.route('/getdailyreport',methods=['POST'])
def getdailyreport():
if(request.get_json()):
dailyreport=mongo.db.dailyreport.find({"_id":ObjectId(request.get_json()['id'])})
else:
dailyreport=mongo.db.dailyreport.find({})
dailyreport = list(dailyreport)
for daily in dailyreport:
print "hello"
daily["_id"]=str(daily["_id"])
if len(dailyreport):
return jsonify({"success":"true","dailyreport":dailyreport})
else:
return jsonify({"success":"false","message":"No dailyreport Found. Please add it!"})
@app.route('/getnewdailyreport',methods=['POST'])
def getnewdailyreport():
fd=[]
if(request.get_json()):
dr={}
dailyreport=mongo.db.newdailyreport.find_one({"_id":ObjectId(request.get_json()['id'])},{"_id":False})
dr["batch"]=dailyreport["batch"]
dr["syllabusdetails"]=mongo.db.syllabus.find_one({"_id":ObjectId(dailyreport["syllid"])},{"_id":False})
one_topic_max_lecture=[]
lectures=mongo.db.staffattendancedetails.find({"batch":dr["batch"],"isDeleted":"false"},{"_id":False,"date":True,"staffid":True,"topic":True,"duration":True,"subject":True})
lectures=list(lectures)
temp={"teacherlist":[],"lecrecord":[]};
idx=0
for sub in dr["syllabusdetails"]["syllabus"]['subject']:
subname=sub["subject"]
tp=[]
t=[]
for topic in sub["topic"]:
temp["lecrecord"].append([])
temp["teacherlist"].append([])
topicname=topic["topicname"]
#lec =[]
lec =[]
for ll in lectures:
if(ll["topic"]==topicname and ll["subject"]==subname):
lec.append(ll)
# lec=mongo.db.staffattendancedetails.find({"topic":topicname,"subject":subname,"batch":dr["batch"],"isDeleted":"false"},{"_id":False,"date":True,"staffid":True,"topic":True,"duration":True})
# lec=list(lec)
templec={}
for l in lec:
if(l['duration']-60>20):
templec=l
templec["duration"]=l["duration"]-60
lec.append(l)
lec=sortlist(lec)
staff=[]
for l in lec:
if l["staffid"] not in staff:
staff.append(l["staffid"])
newlec=lec
for l in range(len(lec),20):
newlec.append({"date":"","topic":topicname})
tp.append(newlec)
tname=""
for i in range(0,len(staff)):
sd=mongo.db.staffdetails.find_one({"_id":ObjectId(staff[i])},{"_id":False,"fname":True,"lname":True})
#sd=list(sd)
if(i==0):
tname=tname+sd["fname"]+" " +sd["lname"]+""
else:
tname=tname+","+sd["fname"]+" " +sd["lname"]+""
t.append(tname)
temp["teacherlist"][idx]=t
temp["lecrecord"][idx]=tp
idx=idx+1
dr["lecturedetails"]=temp
fd.append(dr)
else:
temp={}
dailyreport=mongo.db.newdailyreport.find({"isDeleted":"false"})
dailyreport = list(dailyreport)
for daily in dailyreport:
print "hello"
temp={}
daily["_id"]=str(daily["_id"])
temp["_id"]=daily["_id"]
temp["batch"]=daily["batch"]
temp["syllabusdetails"]=mongo.db.syllabus.find_one({"isDeleted":"false","_id":ObjectId(daily['syllid'])},{"syllName":True,"syllyear":True,"_id":False})
fd.append(temp)
if len(fd):
return jsonify({"success":"true","dailyreport":fd})
else:
return jsonify({"success":"false","message":"No dailyreport Found. Please add it!"})
@app.route("/addcity",methods=["POST"])
def addcity():
cityname = request.get_json()["cityname"]
district = request.get_json()["district"]
state = request.get_json()["state"]
mongo.db.cities.insert({"cityname":cityname,"district":district,"state":state})
return jsonify({"success":"true","message":"Added Successfully"})
@app.route('/getcities',methods=['GET'])
def getcities():
cities = mongo.db.cities.find({})
cities = list(cities)
for city in cities:
city["_id"] = str(city["_id"])
city = list(cities)
if len(city):
return jsonify({"success":"true","cities":city})
else:
return jsonify({"success":"false","message":"No City Found. Please add City!"})
@app.route('/getstaffnames',methods=['POST'])
def getstaffnames():
staff = mongo.db.staffdetails.find({"isDeleted":"false"},{"fname":True,"lname":True,"_id":False})
staffs = list(staff)
names=[]
for staff in staffs:
names.append(staff['fname']+" "+staff['lname']);
if len(staffs):
return jsonify({"success":"true","staffs":names})
else:
return jsonify({"success":"false","message":"No Staff Found!"})
@app.route('/getsubjects',methods=['POST'])
def getsubjects():
pass
subjId = request.get_json()["id"]
subject = mongo.db.staffdetails.find({"_id":ObjectId(subjId),"isDeleted":"false"},{"subject":True,"_id":False})
subjects = list(subject)
subjectlist = list(subjects)
if len(subjectlist):
return jsonify({"success":"true","subjectlist":subjectlist})
else:
return jsonify({"success":"false","message":"No Subject Found!"})
@app.route('/addcityadmin',methods=['POST'])
def addcityadmin():
pass
email = request.get_json()["email"]
mobile = request.get_json()["mobile"]
username = request.get_json()["username"]
cityid=request.get_json()["cityid"]
mongo.db.cityadmin.insert({"email":email,"mobile":mobile,"username":username,"cityid":cityid})
return jsonify({"success":"true","message":"Added Successfully"})
@app.route('/getcityadmin',methods=['POST'])
def getcityadmin():
cityadmins = mongo.db.cityadmin.find({})
admins = list(cityadmins)
for | |
need polynomially many parameters and circuit evaluations.
# This is much cheaper than the :math:`3^m` we would need if we naively tried to construct the cost landscape exactly, without chopping after second order.
#
# Now this should be enough theory, so let's visualize the model that results from our trigonometric expansion.
# We'll use the coefficients and the ``model_cost`` function from above and sample a new random parameter position.
from mpl_toolkits.mplot3d import Axes3D
from itertools import product
# We actually make the plotting a function because we will reuse it below.
def plot_cost_and_model(f, model, params, shift_radius=5 * np.pi / 8, num_points=20):
"""Plot a function and a model of the function as well as its deviation."""
coords = np.linspace(-shift_radius, shift_radius, num_points)
X, Y = np.meshgrid(coords + params[0], coords + params[1])
# Compute the original cost function and the model on the grid.
Z_original = np.array([[f(params + np.array([t1, t2])) for t2 in coords] for t1 in coords])
Z_model = np.array([[model(np.array([t1, t2])) for t2 in coords] for t1 in coords])
# Prepare sampled points for plotting rods.
shifts = [-np.pi / 2, 0, np.pi / 2]
samples = []
for s1, s2 in product(shifts, repeat=2):
shifted_params = params + np.array([s1, s2])
samples.append([*(params+np.array([s2, s1])), f(shifted_params)])
# Display landscapes incl. sampled points and deviation.
alpha = 0.6
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, subplot_kw={"projection": "3d"}, figsize=(10, 4))
green = "#209494"
orange = "#ED7D31"
red = "xkcd:brick red"
surf = ax0.plot_surface(X, Y, Z_original, color=green, alpha=alpha)
ax0.set_title("Original energy and samples")
ax1.plot_surface(X, Y, Z_model, color=orange, alpha=alpha)
ax1.set_title("Model energy")
ax2.plot_surface(X, Y, Z_original - Z_model, color=red, alpha=alpha)
ax2.set_title("Deviation")
for s in samples:
ax0.plot([s[0]] * 2, [s[1]] * 2, [np.min(Z_original) - 0.2, s[2]], color="k")
for ax, z in zip((ax0, ax1), (f(params), model(0 * params))):
ax.plot([params[0]] * 2, [params[1]] * 2, [np.min(Z_original) - 0.2, z], color="k")
ax.scatter([params[0]], [params[1]], [z], color="k", marker="o")
plt.tight_layout(pad=2, w_pad=2.5)
# Get some fresh random parameters and the model coefficients
parameters = np.random.random(2) * 2 * np.pi
coeffs = get_model_data(circuit, parameters)
# Define a mapped model that has the model coefficients fixed.
mapped_model = lambda params: model_cost(params, *coeffs)
plot_cost_and_model(circuit, mapped_model, parameters)
###############################################################################
# In the first two plots, we see the true landscape, and the approximate model.
# The vertical rods indicate the points at which the original cost function
# was evaluated in order to obtain the model coefficients (we skip the additional
# evaluations for :math:`E^{(C)}`, though, for clarity of the plot).
# The rod with the bead on top indicates the reference point around which the model
# is built and at which it coincides with the original cost function up to second
# order. This is underlined in the third plot, where we see the difference between
# the model and true landscapes.
# Around the reference point the difference is very small and changes very slowly,
# only growing significantly for large simultaneous perturbations in both
# parameters. This already hints at the value of the model for local optimization.
#
# Quantum Analytic Descent
# ------------------------
#
# The underlying idea we will now try to exploit for optimization in VQEs is the following:
# if we can model the cost around the reference point well enough, we will be able to find a rough
# estimate of where the minimum of the landscape is.
# Granted, our model represents the true landscape less accurately the further we go away from the
# reference point :math:`\boldsymbol{\theta}_0`, but nonetheless the minimum *of the model*
# will bring us much closer to the minimum *of the true cost* than a random choice.
# Recall the complete strategy from above:
#
# #. Set an initial reference point :math:`\boldsymbol{\theta}_0`.
# #. Build the model :math:`\hat{E}(\boldsymbol{\theta})\approx E(\boldsymbol{\theta}_0+\boldsymbol{\theta})` at this point.
# #. Find the minimum :math:`\boldsymbol{\theta}_\text{min}` of the model.
# #. Set :math:`\boldsymbol{\theta}_0+\boldsymbol{\theta}_\text{min}` as the new reference point :math:`\boldsymbol{\theta}_0`, go back to Step 2.
# #. After convergence or a fixed number of models built, output the last minimum :math:`\boldsymbol{\theta}_\text{opt}=\boldsymbol{\theta}_0+\boldsymbol{\theta}_\text{min}`.
#
# This provides an iterative strategy which will take us to a good enough solution
# in fewer iterations than, for example, regular stochastic gradient descent (SGD).
# The procedure of Quantum Analytic Descent is also shown in the following flowchart. Note that the minimization
# of the model in Step 3 is carried out via an inner optimization loop.
#
# .. figure:: ../demonstrations/quantum_analytic_descent/flowchart.png
# :align: center
# :width: 80%
# :target: javascript:void(0)
#
# Using the functions from above, we now can implement the loop between Steps 2 and 4.
# Indeed, for a relatively small number of iterations we should already find a low enough value.
# If we look back at the circuit we defined, we notice that we are measuring the observable
#
# .. math ::
#
# Z\otimes Z=\begin{pmatrix}
# 1 & 0 & 0 & 0 \\
# 0 & -1 & 0 & 0 \\
# 0 & 0 & -1 & 0 \\
# 0 & 0 & 0 & 1 \end{pmatrix},
#
# which has the eigenvalues :math:`1` and :math:`-1`.
# This means our function is bounded and takes values in the range :math:`[-1,1]`, so that the global minimum should be around :math:`-1` if our circuit is expressive enough.
# Let's try it and apply the full optimization strategy:
import copy
# Set the number of iterations of Steps 2, 3, and 4
N_iter_outer = 3
N_iter_inner = 50
past_coeffs = []
past_parameters = []
circuit_log = [circuit(parameters)]
model_logs = []
for iter_outer in range(N_iter_outer):
# Model building phase of outer iteration - step 2.
coeffs = get_model_data(circuit, parameters)
past_coeffs.append(copy.deepcopy(coeffs))
past_parameters.append(parameters.copy())
# Map the model to be only depending on the parameters, not the coefficients.
mapped_model = lambda params: model_cost(params, *coeffs)
if iter_outer == 0:
print(f"True energy at initial parameters: {np.round(coeffs[0], decimals=4)}\n")
opt = qml.AdamOptimizer(0.05)
# Recall that the parameters of the model are relative coordinates.
# Correspondingly, we initialize at 0, not at parameters.
relative_parameters = np.zeros_like(parameters)
model_log = [mapped_model(relative_parameters)]
print(f"-Iteration {iter_outer+1}-")
# Run the optimizer for N_iter_inner epochs - Step 3.
for iter_inner in range(N_iter_inner):
relative_parameters = opt.step(mapped_model, relative_parameters)
circuit_log.append(circuit(parameters + relative_parameters))
model_log.append(mapped_model(relative_parameters))
if (iter_inner + 1) % 50 == 0:
E_model = mapped_model(relative_parameters)
print(
f"Epoch {iter_inner+1:4d}: Model cost = {np.round(E_model, 4)}",
f" at relative parameters {np.round(relative_parameters, 4)}",
)
# Store the relative parameters that minimize the model by adding the shift - Step 4.
parameters += relative_parameters
E_original = circuit(parameters)
print(f"True energy at the minimum of the model: {E_original}")
print(f"New reference parameters: {np.round(parameters, 4)}\n")
model_logs.append(model_log)
###############################################################################
# This looks great! Quantum Analytic Descent found the minimum.
#
# Inspecting the models
# ^^^^^^^^^^^^^^^^^^^^^
#
# Let us take a look at the intermediate models QAD built:
mapped_model = lambda params: model_cost(params, *past_coeffs[0])
plot_cost_and_model(circuit, mapped_model, past_parameters[0])
###############################################################################
# **Iteration 1:** We see the cost function and the model around our starting point. This is the same as the plot before.
mapped_model = lambda params: model_cost(params, *past_coeffs[1])
plot_cost_and_model(circuit, mapped_model, past_parameters[1])
###############################################################################
# **Iteration 2:** Now we observe the model better resembles the original landscape. In addition, the minimum of the model is within the displayed range -- we're getting closer.
mapped_model = lambda params: model_cost(params, *past_coeffs[2])
plot_cost_and_model(circuit, mapped_model, past_parameters[2])
###############################################################################
# **Iteration 3:** Both the model and the original cost function now show a minimum close to our parameter position--- Quantum Analytic Descent converged.
# Note how the larger deviations of the model close to the boundaries are not a problem at all because we only use the model in the central area
# in which both the original energy and the model form a convex bowl and the deviation plateaus at zero.
#
# Optimization behaviour
# ^^^^^^^^^^^^^^^^^^^^^^
#
# If we pay close attention to the values printed during the optimization, we can identify a curious phenomenon.
# At the last epochs within some iterations, the *model cost* goes beyond :math:`-1`.
# Could we visualize this behavior more clearly, please?
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
ax.plot(circuit_log, color="#209494", label="True")
for i in range(N_iter_outer):
x = range(i * N_iter_inner, (i + 1) * N_iter_inner + 1)
(line,) = ax.plot(x, model_logs[i], ls="--", color="#ED7D31")
if i == 0:
line.set_label("Model")
ax.plot([0, N_iter_outer * N_iter_inner], [-1.0, -1.0], lw=0.6, color="0.6", label="Solution")
ax.set_xlabel("epochs")
ax.set_ylabel("cost")
leg = ax.legend()
###############################################################################
# Each of the orange lines corresponds to minimizing the model constructed at a
# different reference point.
# We can now more easily appreciate the phenomenon we just described:
# towards the end of each | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CustomPluginArgs', 'CustomPlugin']
@pulumi.input_type
class CustomPluginArgs:
def __init__(__self__, *,
content_type: pulumi.Input[str],
location: pulumi.Input['CustomPluginLocationArgs'],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CustomPlugin resource.
:param pulumi.Input[str] content_type: The type of the plugin file. Allowed values are `ZIP` and `JAR`.
:param pulumi.Input['CustomPluginLocationArgs'] location: Information about the location of a custom plugin. See below.
:param pulumi.Input[str] description: A summary description of the custom plugin.
:param pulumi.Input[str] name: The name of the custom plugin..
"""
pulumi.set(__self__, "content_type", content_type)
pulumi.set(__self__, "location", location)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Input[str]:
"""
The type of the plugin file. Allowed values are `ZIP` and `JAR`.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: pulumi.Input[str]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def location(self) -> pulumi.Input['CustomPluginLocationArgs']:
"""
Information about the location of a custom plugin. See below.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: pulumi.Input['CustomPluginLocationArgs']):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A summary description of the custom plugin.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the custom plugin..
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _CustomPluginState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
latest_revision: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input['CustomPluginLocationArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CustomPlugin resources.
:param pulumi.Input[str] arn: the Amazon Resource Name (ARN) of the custom plugin.
:param pulumi.Input[str] content_type: The type of the plugin file. Allowed values are `ZIP` and `JAR`.
:param pulumi.Input[str] description: A summary description of the custom plugin.
:param pulumi.Input[int] latest_revision: an ID of the latest successfully created revision of the custom plugin.
:param pulumi.Input['CustomPluginLocationArgs'] location: Information about the location of a custom plugin. See below.
:param pulumi.Input[str] name: The name of the custom plugin..
:param pulumi.Input[str] state: the state of the custom plugin.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if description is not None:
pulumi.set(__self__, "description", description)
if latest_revision is not None:
pulumi.set(__self__, "latest_revision", latest_revision)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
the Amazon Resource Name (ARN) of the custom plugin.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the plugin file. Allowed values are `ZIP` and `JAR`.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A summary description of the custom plugin.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="latestRevision")
def latest_revision(self) -> Optional[pulumi.Input[int]]:
"""
an ID of the latest successfully created revision of the custom plugin.
"""
return pulumi.get(self, "latest_revision")
@latest_revision.setter
def latest_revision(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "latest_revision", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input['CustomPluginLocationArgs']]:
"""
Information about the location of a custom plugin. See below.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input['CustomPluginLocationArgs']]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the custom plugin..
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
the state of the custom plugin.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
class CustomPlugin(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[pulumi.InputType['CustomPluginLocationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an Amazon MSK Connect Custom Plugin Resource.
## Example Usage
### Basic configuration
```python
import pulumi
import pulumi_aws as aws
example_bucket_v2 = aws.s3.BucketV2("exampleBucketV2")
example_bucket_objectv2 = aws.s3.BucketObjectv2("exampleBucketObjectv2",
bucket=example_bucket_v2.id,
key="<KEY>",
source=pulumi.FileAsset("debezium.zip"))
example_custom_plugin = aws.mskconnect.CustomPlugin("exampleCustomPlugin",
content_type="ZIP",
location=aws.mskconnect.CustomPluginLocationArgs(
s3=aws.mskconnect.CustomPluginLocationS3Args(
bucket_arn=example_bucket_v2.arn,
file_key=example_bucket_objectv2.key,
),
))
```
## Import
MSK Connect Custom Plugin can be imported using the plugin's `arn`, e.g.,
```sh
$ pulumi import aws:mskconnect/customPlugin:CustomPlugin example 'arn:aws:kafkaconnect:eu-central-1:123456789012:custom-plugin/debezium-example/abcdefgh-1234-5678-9abc-defghijklmno-4'
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] content_type: The type of the plugin file. Allowed values are `ZIP` and `JAR`.
:param pulumi.Input[str] description: A summary description of the custom plugin.
:param pulumi.Input[pulumi.InputType['CustomPluginLocationArgs']] location: Information about the location of a custom plugin. See below.
:param pulumi.Input[str] name: The name of the custom plugin..
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CustomPluginArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Amazon MSK Connect Custom Plugin Resource.
## Example Usage
### Basic configuration
```python
import pulumi
import pulumi_aws as aws
example_bucket_v2 = aws.s3.BucketV2("exampleBucketV2")
example_bucket_objectv2 = aws.s3.BucketObjectv2("exampleBucketObjectv2",
bucket=example_bucket_v2.id,
key="<KEY>",
source=pulumi.FileAsset("debezium.zip"))
example_custom_plugin = aws.mskconnect.CustomPlugin("exampleCustomPlugin",
content_type="ZIP",
location=aws.mskconnect.CustomPluginLocationArgs(
s3=aws.mskconnect.CustomPluginLocationS3Args(
bucket_arn=example_bucket_v2.arn,
file_key=example_bucket_objectv2.key,
),
))
```
## Import
MSK Connect Custom Plugin can be imported using the plugin's `arn`, e.g.,
```sh
$ pulumi import aws:mskconnect/customPlugin:CustomPlugin example 'arn:aws:kafkaconnect:eu-central-1:123456789012:custom-plugin/debezium-example/abcdefgh-1234-5678-9abc-defghijklmno-4'
```
:param str resource_name: The name of the resource.
:param CustomPluginArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CustomPluginArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[pulumi.InputType['CustomPluginLocationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CustomPluginArgs.__new__(CustomPluginArgs)
if content_type is None and not opts.urn:
raise TypeError("Missing required property 'content_type'")
__props__.__dict__["content_type"] = content_type
__props__.__dict__["description"] = description
if location is None and not opts.urn:
raise TypeError("Missing required property 'location'")
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["arn"] = None
__props__.__dict__["latest_revision"] = None
__props__.__dict__["state"] = None
super(CustomPlugin, __self__).__init__(
'aws:mskconnect/customPlugin:CustomPlugin',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
latest_revision: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[pulumi.InputType['CustomPluginLocationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None) -> 'CustomPlugin':
"""
Get an existing CustomPlugin resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: the Amazon Resource Name (ARN) of the custom plugin.
:param pulumi.Input[str] content_type: The type of the plugin file. Allowed values are `ZIP` and `JAR`.
:param pulumi.Input[str] description: A summary description of the custom plugin.
:param pulumi.Input[int] latest_revision: an ID of the latest successfully created revision of the custom plugin.
:param pulumi.Input[pulumi.InputType['CustomPluginLocationArgs']] location: Information about the location of a custom plugin. See below.
:param pulumi.Input[str] name: The name of the custom plugin..
:param pulumi.Input[str] state: the state of the custom plugin.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CustomPluginState.__new__(_CustomPluginState)
__props__.__dict__["arn"] = arn
__props__.__dict__["content_type"] = content_type
__props__.__dict__["description"] = description
__props__.__dict__["latest_revision"] = latest_revision
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["state"] = state
return CustomPlugin(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
the Amazon Resource Name (ARN) of the custom plugin.
"""
| |
production quantities')
resid = Float(2.0, desc='Residual value at end of lifetime')
roi = Float(10.0, desc='Return on investment (Triggers calculation of required fare)')
sfc = Float(0.6, units='lb/h/lb', desc='Engine specific fuel consumption')
taxrat = Float(0.33, desc='Corporate tax rate for ROI calculations')
temp = Float(1800.0, units='degF', desc='Maximum turbine inlet temperature')
class FlopsWrapper_input_costin(VariableTree):
"""Container for input.costin"""
# VariableTrees
Basic = VarTree(FlopsWrapper_input_costin_Basic())
Cost_Technology = VarTree(FlopsWrapper_input_costin_Cost_Technology())
Mission_Performance = VarTree(FlopsWrapper_input_costin_Mission_Performance())
class FlopsWrapper_input_confin_Objective(VariableTree):
"""Container for input.confin.Objective"""
# OpenMDAO Public Variables
ofg = Float(0.0, desc='Objective function weighting factor for gross weight \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
off = Float(1.0, desc='Objective function weighting factor for mission fuel \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofm = Float(0.0, desc='Objective function weighting factor for Mach*(L/D), should be negative to maximize \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofr = Float(0.0, desc='Objective function weighting factor for Range, should be negative to maximize. \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofc = Float(0.0, desc='Objective function weighting factor for Cost \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
osfc = Float(0.0, desc='Objective function weighting factor for Specific Fuel Consumption at the engine design point. Generally used only for engine design cases (IANAL = 4). \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofnox = Float(0.0, desc='Objective function weighting factor for NOx emissions \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofnf = Float(0.0, desc='Objective function weighting factor for flyover noise (used primarily for contour plots) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofns = Float(0.0, desc='Objective function weighting factor for sideline noise (used primarily for contour plots) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofnfom = Float(0.0, desc='Objective function weighting factor for noise figure of merit \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
oarea = Float(0.0, desc='Objective function weighting factor for area of noise footprint (not implemented) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
ofh = Float(0.0, desc='Objective function weighting factor for hold time for segment NHOLD (See Namelist &MISSIN) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)')
class FlopsWrapper_input_confin_Design_Variables(VariableTree):
"""Container for input.confin.Design_Variables"""
# OpenMDAO Public Variables
gw = Array(dtype=numpy_float64, units='lb', desc='GW(0)=Ramp weight (Required. If IRW = 1, a good initial guess must be input.)\nGW(1)=Activity status, active if > 0\nGW(2)=Lower bound\nGW(3)=Upper bound\nGW(4)=Optimization scale factor')
ar = Array(dtype=numpy_float64, desc='AR(0)=Wing aspect ratio\nAR(1)=Activity status, active if > 0\nAR(2)=Lower bound\nAR(3)=Upper bound\nAR(4)=Optimization scale factor')
thrust = Array(dtype=numpy_float64, units='lb', desc='THRUST(0)=Maximum rated thrust per engine, or thrust-weight ratio if TWR = -1.\nTHRUST(1)=Activity status, active if > 0\nTHRUST(2)=Lower bound\nTHRUST(3)=Upper bound\nTHRUST(4)=Optimization scale factor')
sw = Array(dtype=numpy_float64, units='ft*ft', desc='SW(0)=Reference wing area, or wing loading if WSR = -1.\nSW(1)=Activity status, active if > 0\nSW(2)=Lower bound\nSW(3)=Upper bound\nSW(4)=Optimization scale factor')
tr = Array(dtype=numpy_float64, desc='TR(0)=Taper ratio of the wing (Required)\nTR(1)=Activity status, active if > 0\nTR(2)=Lower bound\nTR(3)=Upper bound\nTR(4)=Optimization scale factor')
sweep = Array(dtype=numpy_float64, units='deg', desc='SWEEP(0)=Quarter-chord sweep angle of the wing (Required)\nSWEEP(1)=Activity status, active if > 0\nSWEEP(2)=Lower bound\nSWEEP(3)=Upper bound\nSWEEP(4)=Optimization scale factor')
tca = Array(dtype=numpy_float64, desc='TCA(0)=Wing thickness-chord ratio (weighted average) (Required)\nTCA(1)=Activity status, active if > 0\nTCA(2)=Lower bound\nTCA(3)=Upper bound\nTCA(4)=Optimization scale factor')
vcmn = Array(dtype=numpy_float64, desc='VCMN(0)=Cruise Mach number (Required)\nVCMN(1)=Activity status, active if > 0\nVCMN(2)=Lower bound\nVCMN(3)=Upper bound\nVCMN(4)=Optimization scale factor')
ch = Array(dtype=numpy_float64, units='ft', desc='CH(0)=Maximum cruise altitude (Required)\nCH(1)=Activity status, active if > 0\nCH(2)=Lower bound\nCH(3)=Upper bound\nCH(4)=Optimization scale factor')
varth = Array(dtype=numpy_float64, desc='VARTH(0)=Thrust derating factor for takeoff noise Fraction of full thrust used in takeoff\nVARTH(1)=Activity status, active if > 0\nVARTH(2)=Lower bound\nVARTH(3)=Upper bound\nVARTH(4)=Optimization scale factor')
rotvel = Array(dtype=numpy_float64, desc='ROTVEL(0)=Rotation velocity for takeoff noise abatement (default is minimum required to meet takeoff performance constraints)\nROTVEL(1)=Activity status, active if > 0\nROTVEL(2)=Lower bound\nROTVEL(3)=Upper bound\nROTVEL(4)=Optimization scale factor')
plr = Array(dtype=numpy_float64, desc='PLR(0)=Thrust fraction after programmed lapse rate (default thrust is specified in each segment)\nPLR(1)=Activity status, active if > 0\nPLR(2)=Lower bound\nPLR(3)=Upper bound\nPLR(4)=Optimization scale factor')
etit = Array(dtype=numpy_float64, units='degR', desc='ETIT(0)=Engine design point turbine entry temperature\nETIT(1)=Activity status, active if > 0\nETIT(2)=Lower bound\nETIT(3)=Upper bound\nETIT(4)=Optimization scale factor')
eopr = Array(dtype=numpy_float64, desc='EOPR(0)=Overall pressure ratio\nEOPR(1)=Activity status, active if > 0\nEOPR(2)=Lower bound\nEOPR(3)=Upper bound\nEOPR(4)=Optimization scale factor')
efpr = Array(dtype=numpy_float64, desc='EFPR(0)=Fan pressure ratio (turbofans only)\nEFPR(1)=Activity status, active if > 0\nEFPR(2)=Lower bound\nEFPR(3)=Upper bound\nEFPR(4)=Optimization scale factor')
ebpr = Array(dtype=numpy_float64, desc='EBPR(0)=Bypass ratio (turbofans only)\nEBPR(1)=Activity status, active if > 0\nEBPR(2)=Lower bound\nEBPR(3)=Upper bound\nEBPR(4)=Optimization scale factor')
ettr = Array(dtype=numpy_float64, desc='ETTR(0)=Engine throttle ratio defined as the ratio of the maximum allowable turbine inlet temperature divided by the design point turbine inlet temperature.\nIf ETTR is greater than ETIT, it is assumed to be the maximum allowable turbine inlet temperature.\nETTR(1)=Activity status, active if > 0\nETTR(2)=Lower bound\nETTR(3)=Upper bound\nETTR(4)=Optimization scale factor')
ebla = Array(dtype=numpy_float64, units='deg', desc='EBLA(0)=Blade angle for fixed pitch propeller\nEBLA(1)=Activity status, active if > 0\nEBLA(2)=Lower bound\nEBLA(3)=Upper bound\nEBLA(4)=Optimization scale factor')
class FlopsWrapper_input_confin_Basic(VariableTree):
"""Container for input.confin.Basic"""
# OpenMDAO Public Variables
desrng = Float(0.0, desc='Design range (or endurance). See INDR in Namelist &MISSIN)\nRequired - if IRW = 2 in Namelist &MISSIN, the range is computed, but a reasonable guess must still be input')
wsr = Float(0.0, desc='Required wing loading if > 0.\nDo not set WSR > 0 during optimization or if wing area is being varied.\nInterpret SW as wing loading for parametric | |
0] - origin) # NB. IJK ordering
if det == 0.0:
log.warning('indeterminate handedness in cell ijk0 [{}, {}, {}]'.format(cell_kji[2], cell_kji[1], cell_kji[0]))
return None
if det > 0.0:
ijk_is_left_handed = xyz_is_left_handed
else:
ijk_is_left_handed = not xyz_is_left_handed
if ijk_is_left_handed:
return 'left'
return 'right'
# end of def determine_corp_ijk_handedness()
##########################################################################################
##########################################################################################
# def determine_corp_extent():
def determine_corp_extent(corner_points, tolerance = 0.003):
"""Returns extent of grid derived from 7D corner points with all cells temporarily in I."""
def neighbours(corner_points, sextuple_cell_a_p1, sextuple_cell_a_p2, sextuple_cell_b_p1, sextuple_cell_b_p2,
tolerance):
# allows for reversal of points (or not) in neighbouring cell
if ((vec.manhatten_distance(corner_points[sextuple_cell_a_p1], corner_points[sextuple_cell_b_p1]) <= tolerance)
and (vec.manhatten_distance(corner_points[sextuple_cell_a_p2], corner_points[sextuple_cell_b_p2]) <=
tolerance)):
return True
if ((vec.manhatten_distance(corner_points[sextuple_cell_a_p1], corner_points[sextuple_cell_b_p2]) <= tolerance)
and (vec.manhatten_distance(corner_points[sextuple_cell_a_p2], corner_points[sextuple_cell_b_p1]) <=
tolerance)):
return True
return False
assert (corner_points.ndim == 7 and corner_points.shape[:2] == (1, 1))
confirmation = 3 # number of identical results needed for each of NI and NJ
max_failures = 100 # maximum number of failed random cells for each of NI and NJ
min_cell_length = 10.0 * tolerance
cell_count = corner_points.shape[2]
prime_factorization = factors.factorize(cell_count)
log.debug('cell count is ' + str(cell_count) + '; prime factorization: ' + str(prime_factorization))
possible_extents = factors.all_factors_from_primes(prime_factorization)
log.debug('possible extents are: ' + str(possible_extents))
ni = None
redundancy = confirmation
remaining_attempts = max_failures
while redundancy:
kji_cell = random_cell(corner_points, tolerance = min_cell_length)
found = False
for e in possible_extents:
candidate = kji_cell[2] + e
if candidate >= cell_count:
continue
if neighbours(corner_points, (0, 0, kji_cell[2], 0, 1, 0), (0, 0, kji_cell[2], 0, 1, 1),
(0, 0, candidate, 0, 0, 0), (0, 0, candidate, 0, 0, 1), tolerance):
if ni is not None and ni != e:
log.error('inconsistent NI values of {} and {} determined from corner points'.format(ni, e))
return None
found = True
ni = e
redundancy -= 1
break
if not found:
remaining_attempts -= 1
if remaining_attempts <= 0:
log.error('failed to determine NI from corner points (out of tries)') # could assume NJ = 1 here
return None
log.info('NI determined from corner points to be ' + str(ni))
if ni > 1:
ni_prime_factors = factors.factorize(ni)
factors.remove_subset(prime_factorization, ni_prime_factors)
log.debug('remaining prime factors after accounting for NI are: ' + str(prime_factorization))
possible_extents = factors.all_factors_from_primes(prime_factorization)
log.debug('possible extents for NJ & NK are: ' + str(possible_extents))
nj = None
redundancy = confirmation
remaining_attempts = max_failures
while redundancy:
kji_cell = random_cell(corner_points)
found = False
for e in possible_extents:
candidate = kji_cell[2] + (e * ni)
if candidate >= cell_count:
continue
if vec.manhatten_distance(corner_points[0, 0, kji_cell[2], 1, 0, 0], corner_points[0, 0, candidate, 0, 0,
0]) <= tolerance:
if nj is not None and nj != e:
log.error('inconsistent NJ values of {} and {} determined from corner points'.format(nj, e))
return None
found = True
nj = e
redundancy -= 1
break
if not found:
remaining_attempts -= 1
if remaining_attempts <= 0:
log.error(
'failed to determine NJ from corner points (out of tries)') # could assume or check if NK = 1 here
return None
log.info('NJ determined from corner points to be ' + str(nj))
nk, remainder = divmod(cell_count, ni * nj)
assert (remainder == 0)
log.info('NK determined from corner points to be ' + str(nk))
assert (nk in possible_extents)
return [nk, nj, ni]
# end def determine_corp_extent():
##########################################################################################
##########################################################################################
# def translate_corp():
def translate_corp(corner_points, x_shift = None, y_shift = None, min_xy = None, preserve_digits = None):
"""Adjusts x and y values of corner points by a constant offset."""
assert (corner_points.ndim == 7)
if min_xy is None:
minimum_xy = 0.0
else:
minimum_xy = min_xy
if x_shift is None:
x_sub = np.min(corner_points[:, :, :, :, :, :, 0]) - minimum_xy
else:
x_sub = -x_shift
if y_shift is None:
y_sub = np.min(corner_points[:, :, :, :, :, :, 1]) - minimum_xy
else:
y_sub = -y_shift
if preserve_digits is not None:
divisor = maths.pow(10.0, preserve_digits)
x_sub = divisor * maths.floor(x_sub / divisor)
y_sub = divisor * maths.floor(y_sub / divisor)
log.info('translating corner points by %3.1f in x and %3.1f in y', -x_sub, -y_sub)
corner_points[:, :, :, :, :, :, 0] -= x_sub
corner_points[:, :, :, :, :, :, 1] -= y_sub
# end of def translate_corp()
##########################################################################################
def triangles_for_cell_faces(cp):
"""Returns numpy array of shape (3, 2, 4, 3, 3) with axes being kji, -+, triangle within face, triangle corner, xyz.
args:
cp (numpy float array of shape (2, 2, 2, 3)): single cell corner point array in pagoda protocol
returns:
numpy float array of shape (3, 2, 4, 3, 3) holding triangle corner coordinates for cell faces represented with
quad triangles
note:
resqpy.surface also contains methods for working with cell faces as triangulated sets
"""
tri = np.empty((3, 2, 4, 3, 3))
# create face centre points and assign as one vertex in each of 4 trangles for face
tri[0, :, :, 0] = np.mean(cp, axis = (1, 2)).reshape((2, 1, 3)).repeat(4, axis = 1).reshape(
(2, 4, 3)) # k face centres
tri[1, :, :, 0] = np.mean(cp, axis = (0, 2)).reshape((2, 1, 3)).repeat(4, axis = 1).reshape(
(2, 4, 3)) # j face centres
tri[2, :, :, 0] = np.mean(cp, axis = (0, 1)).reshape((2, 1, 3)).repeat(4, axis = 1).reshape(
(2, 4, 3)) # i face centres
# k faces
tri[0, :, 0, 1] = cp[:, 0, 0]
tri[0, :, 0, 2] = cp[:, 0, 1]
tri[0, :, 1, 1] = cp[:, 0, 1]
tri[0, :, 1, 2] = cp[:, 1, 1]
tri[0, :, 2, 1] = cp[:, 1, 1]
tri[0, :, 2, 2] = cp[:, 1, 0]
tri[0, :, 3, 1] = cp[:, 1, 0]
tri[0, :, 3, 2] = cp[:, 0, 0]
# j faces
tri[1, :, 0, 1] = cp[0, :, 0]
tri[1, :, 0, 2] = cp[0, :, 1]
tri[1, :, 1, 1] = cp[0, :, 1]
tri[1, :, 1, 2] = cp[1, :, 1]
tri[1, :, 2, 1] = cp[1, :, 1]
tri[1, :, 2, 2] = cp[1, :, 0]
tri[1, :, 3, 1] = cp[1, :, 0]
tri[1, :, 3, 2] = cp[0, :, 0]
# i faces
tri[2, :, 0, 1] = cp[0, 0, :]
tri[2, :, 0, 2] = cp[0, 1, :]
tri[2, :, 1, 1] = cp[0, 1, :]
tri[2, :, 1, 2] = cp[1, 1, :]
tri[2, :, 2, 1] = cp[1, 1, :]
tri[2, :, 2, 2] = cp[1, 0, :]
tri[2, :, 3, 1] = cp[1, 0, :]
tri[2, :, 3, 2] = cp[0, 0, :]
return tri
# end of grid_functions module
##########################################################################################
def actual_pillar_shape(pillar_points, tolerance = 0.001):
"""Returns 'curved', 'straight' or 'vertical' for shape of fully defined points array of shape (nk + k_gaps + 1,
..., 3)."""
assert pillar_points.ndim >= 3 and pillar_points.shape[-1] == 3
pp = pillar_points.reshape((pillar_points.shape[0], -1, 3))
from_top = pp - pp[0]
xy_drift = np.abs(from_top[:, :, 0]) + np.abs(
from_top[:, :, 1]) # use Manhattan distance as cheap proxy for true distance
if np.max(xy_drift) <= tolerance:
return 'vertical'
if np.max(xy_drift[-1]) <= tolerance:
return 'curved' # top & bottom are vertically aligned, so pillar must be curved
# where z variation is tiny (null pillar), don't interpolate, just treat these pillars as straight
# elsewhere find drift from vector defined by from_top[-1]
null_pillar_mask = (abs(from_top[-1, :, 2]) <= tolerance)
from_top[-1, :, 2] = np.where(null_pillar_mask, tolerance, from_top[-1, :, 2]) # avoid divide by zero issues
z_fraction = from_top[:, :, 2] / from_top[-1, :, 2]
xy_drift = from_top[:, :, :2] - z_fraction.reshape((pp.shape[0], pp.shape[1], 1)) * from_top[-1, :, :2].reshape(
(1, pp.shape[1], 2))
straight = (np.max(np.sum(np.abs(xy_drift), axis = -1), axis = 0) <= tolerance)
masked_straight = np.where(null_pillar_mask, True, straight)
if np.all(masked_straight):
return 'straight'
return 'curved'
##########################################################################################
def columns_to_nearest_split_face(grid):
"""Returns a numpy integer array of shape (NJ, NI) being number of cells to nearest split edge (Manhattan
distance)."""
if not grid.has_split_coordinate_lines:
return None
j_col_faces_split, i_col_faces_split = grid.split_column_faces()
abutting = np.zeros((grid.nj, grid.ni), dtype = bool)
abutting[:-1, :] = j_col_faces_split
abutting[1:, :] = np.logical_or(abutting[1:, :], j_col_faces_split)
abutting[:, :-1] = np.logical_or(abutting[:, :-1], i_col_faces_split)
abutting[:, 1:] = np.logical_or(abutting[:, 1:], i_col_faces_split)
framed = np.full((grid.nj + 2, grid.ni + 2), grid.nj + grid.ni, dtype = int)
framed[1:-1, 1:-1] = np.where(abutting, 0, grid.nj + grid.ni)
while True:
| |
None: ['Image'],
},
# IVUSPullbackRate
0x00183101L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'US IMAGE IOD': ['Image'],
None: ['Image'],
},
# TypeOfFilters
0x00181161L: {
'XRF IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
None: ['Image'],
},
# PlanningLandmarkLineSequence
0x00686510L: {
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
None: ['Implant Template'],
},
# VisualFieldVerticalExtent
0x00240011L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# XRayImageReceptorAngle
0x3002000EL: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# DetectorConditionsNominalFlag
0x00187000L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# StudyID
0x00200010L: {
'BASIC STRUCTURED DISPLAY IOD': ['Study'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Study'],
'RT BRACHY TREATMENT RECORD IOD': ['Study'],
'RT STRUCTURE SET IOD': ['Study'],
'RT PLAN IOD': ['Study'],
'CR IMAGE IOD': ['Study'],
'RAW DATA IOD': ['Study'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Study'],
'ENHANCED MR IMAGE IOD': ['Study'],
'BASIC CARDIAC EP IOD': ['Study'],
'RT TREATMENT SUMMARY RECORD IOD': ['Study'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Study'],
'RESPIRATORY WAVEFORM IOD': ['Study'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Study'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Study'],
'BASIC VOICE AUDIO IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Study'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Study'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Study'],
'BASIC TEXT SR IOD': ['Study'],
'NM IMAGE IOD': ['Study'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'LENSOMETRY MEASUREMENTS IOD': ['Study'],
'MR SPECTROSCOPY IOD': ['Study'],
'ENCAPSULATED PDF IOD': ['Study'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CHEST CAD SR IOD': ['Study'],
'HEMODYNAMIC IOD': ['Study'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Study'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Study'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Study'],
'ENHANCED MR COLOR IMAGE IOD': ['Study'],
'ENHANCED CT IMAGE IOD': ['Study'],
'X-RAY RADIATION DOSE SR IOD': ['Study'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Study'],
'PROCEDURE LOG IOD': ['Study'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Study'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Study'],
'STEREOMETRIC RELATIONSHIP IOD': ['Study'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Study'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Study'],
'VL ENDOSCOPIC IMAGE IOD': ['Study'],
'KERATOMETRY MEASUREMENTS IOD': ['Study'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Study'],
'COMPREHENSIVE SR IOD': ['Study'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Study'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Study'],
'SPATIAL FIDUCIALS IOD': ['Study'],
'RT ION PLAN IOD': ['Study'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CT IMAGE IOD': ['Study'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Study'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Study'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Study'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'RT DOSE IOD': ['Study'],
'AMBULATORY ECG IOD': ['Study'],
'SURFACE SEGMENTATION IOD': ['Study'],
'MAMMOGRAPHY CAD SR IOD': ['Study'],
'VL MICROSCOPIC IMAGE IOD': ['Study'],
'RT BEAMS TREATMENT RECORD IOD': ['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study', 'Modality Performed Procedure Step'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# GantryPitchAngle
0x300A014AL: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# FovealSensitivityMeasured
0x00240086L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# DetectorActiveShape
0x00187024L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# CatheterRotationalRate
0x00520013L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ReferencedRealWorldValueMappingInstanceSequence
0x0008114BL: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# ImageRotation
0x00700042L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Presentation State'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
},
# RequestedSubsequentWorkitemCodeSequence
0x00404031L: {
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
None: ['General Purpose Performed Procedure Step'],
},
# ExcludedIntervalsSequence
0x00189803L: {
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
None: ['Image'],
},
# DecoupledNucleus
0x00189060L: {
'MR SPECTROSCOPY IOD': ['Equipment'],
None: ['Equipment'],
},
# IVUSGatedRate
0x00183102L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'US IMAGE IOD': ['Image'],
None: ['Image'],
},
# CountsAccumulated
0x00180070L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# StartCumulativeMetersetWeight
0x300C0008L: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ImageOrientationSlide
0x00480102L: {
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
None: ['Image'],
},
# SubjectiveRefractionLeftEyeSequence
0x00460098L: {
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Equipment'],
None: ['Equipment'],
},
# OphthalmicAxialMeasurementsLeftEyeSequence
0x00221008L: {
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
None: ['Series'],
},
# StimuliRetesting
0x00240042L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# ViewCodeSequence
0x00540220L: {
None: ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# FovealSensitivity
0x00240087L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# RadiationMachineSAD
0x30020022L: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# PixelDataProviderURL
0x00287FE0L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# ExposureTime
0x00181150L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# CassetteID
0x00181007L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# NonDICOMOutputCodeSequence
0x00404032L: {
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
None: ['General Purpose | |
<filename>elf/transformation/converter.py
import numpy as np
from . import elastix_parser
from .affine import affine_matrix_2d, affine_matrix_3d
# Converter functions to translate in between different representations of affine transformations.
# Currently supports the following representations:
# - native: the representation expected by elf transformation functions (same as scipy);
# transformation is represented by 4x4 affine matrix
# transformation is always given in voxel space
# transformation is given in backward direction
# - bdv: the representation in big dataviewer
# transformation is represented by parameter vector of length 12
# transformation is given in (variable) physical unit
# transformation is given in forward direction
# - elastix: the representation in elastix;
# affine transformations and metadata are stored in a text file
# physical unit is milimeter
# transformation is given in backward direction
# TODO
# - support converting transformations to elastix
#
# General
#
def pretty_print_trafo(trafo):
if isinstance(trafo, np.ndarray) and trafo.ndim == 2:
trafo = matrix_to_parameters(trafo)
trafo = " ".join([f"{param:.4f}" for param in trafo])
print(trafo)
def matrix_to_parameters(matrix):
""" Affine matrix to parameter vector.
Returns parameter vector layed out as
2d:
[a00, a01, a02, a10, a11, a12]
3d:
[a00, a01, a02, a03, a10, a11, a12, a13, a20, a21, a22, a23]
"""
if matrix.shape[0] == 4:
assert matrix.shape == (4, 4)
trafo = matrix[0].tolist() + matrix[1].tolist() + matrix[2].tolist()
else:
assert matrix.shape == (3, 3)
trafo = matrix[0].tolist() + matrix[1].tolist()
return trafo
def parameters_to_matrix(trafo):
""" Parameter vector to affine matrix.
Assumes parameter vector layed out as
2d:
[a00, a01, a02, a10, a11, a12]
3d:
[a00, a01, a02, a03, a10, a11, a12, a13, a20, a21, a22, a23]
"""
if len(trafo) == 12:
sub_matrix = np.zeros((3, 3), dtype='float64')
sub_matrix[0, 0] = trafo[0]
sub_matrix[0, 1] = trafo[1]
sub_matrix[0, 2] = trafo[2]
sub_matrix[1, 0] = trafo[4]
sub_matrix[1, 1] = trafo[5]
sub_matrix[1, 2] = trafo[6]
sub_matrix[2, 0] = trafo[8]
sub_matrix[2, 1] = trafo[9]
sub_matrix[2, 2] = trafo[10]
shift = [trafo[3], trafo[7], trafo[11]]
matrix = np.zeros((4, 4))
matrix[:3, :3] = sub_matrix
matrix[:3, 3] = shift
matrix[3, 3] = 1
elif len(trafo) == 6:
sub_matrix = np.zeros((2, 2), dtype='float64')
sub_matrix[0, 0] = trafo[0]
sub_matrix[0, 1] = trafo[1]
sub_matrix[1, 0] = trafo[3]
sub_matrix[1, 1] = trafo[4]
shift = [trafo[2], trafo[5]]
matrix = np.zeros((3, 3))
matrix[:2, :2] = sub_matrix
matrix[:2, 2] = shift
matrix[2, 2] = 1
else:
raise ValueError(f"Invalid number of parameters {len(trafo)}")
return matrix
#
# Elastix
#
def _elastix_affine_to_bdv(trafo):
if len(trafo) == 12: # 3d transformation
sub_matrix = np.zeros((3, 3), dtype='float64')
sub_matrix[0, 0] = trafo[0]
sub_matrix[0, 1] = trafo[1]
sub_matrix[0, 2] = trafo[2]
sub_matrix[1, 0] = trafo[3]
sub_matrix[1, 1] = trafo[4]
sub_matrix[1, 2] = trafo[5]
sub_matrix[2, 0] = trafo[6]
sub_matrix[2, 1] = trafo[7]
sub_matrix[2, 2] = trafo[8]
shift = [trafo[9], trafo[10], trafo[11]]
matrix = np.zeros((4, 4))
matrix[:3, :3] = sub_matrix
matrix[:3, 3] = shift
matrix[3, 3] = 1
elif len(trafo) == 6: # 2d transformation
sub_matrix = np.zeros((2, 2), dtype='float64')
sub_matrix[0, 0] = trafo[0]
sub_matrix[0, 1] = trafo[1]
sub_matrix[1, 0] = trafo[2]
sub_matrix[1, 1] = trafo[3]
shift = [trafo[4], trafo[5]]
matrix = np.zeros((3, 3))
matrix[:2, :2] = sub_matrix
matrix[:2, 2] = shift
matrix[2, 2] = 1
else:
raise ValueError(f"Invalid number of parameters for affine transformation: {len(trafo)}")
return matrix
def _elastix_euler_to_bdv(trafo):
nparam = len(trafo)
if nparam == 6:
matrix = affine_matrix_3d(rotation=trafo[:3],
translation=trafo[3:],
angles_in_degree=False)
elif nparam == 3:
matrix = affine_matrix_2d(rotation=trafo[0],
translation=trafo[1:],
angles_in_degree=False)
else:
raise ValueError(f"Invalid number of parameters for euler transform: {nparam}")
return matrix
def _elastix_similarity_to_bdv(trafo):
nparam = len(trafo)
if nparam == 7:
scale = 3 * [trafo[-1]]
matrix = affine_matrix_3d(scale=scale,
rotation=trafo[:3],
translation=trafo[3:6],
angles_in_degree=False)
elif nparam == 4:
scale = 2 * [trafo[0]]
matrix = affine_matrix_2d(scale=scale,
rotation=trafo[1],
translation=trafo[2:],
angles_in_degree=False)
else:
raise ValueError(f"Invalid number of parameters for similarity transform: {nparam}")
return matrix
def _elastix_translation_to_bdv(trafo):
nparam = len(trafo)
if nparam == 3:
matrix = affine_matrix_3d(translation=trafo)
elif nparam == 2:
matrix = affine_matrix_2d(translation=trafo)
else:
raise ValueError(f"Invalid number of parameters for similarity transform: {nparam}")
return matrix
def elastix_parameter_to_bdv_matrix(trafo, trafo_type):
""" Convert elastix parameters to affine matrix in bdv convention.
Note that the elastix parameter use a different convention than
what is used natively and by bdv.
"""
if trafo_type == 'AffineTransform':
matrix = _elastix_affine_to_bdv(trafo)
elif trafo_type == 'EulerTransform':
matrix = _elastix_euler_to_bdv(trafo)
elif trafo_type == 'SimilarityTransform':
matrix = _elastix_similarity_to_bdv(trafo)
elif trafo_type == 'TranslationTransform':
matrix = _elastix_translation_to_bdv(trafo)
else:
raise ValueError(f"Invalid transformation type {trafo_type}")
return matrix
def _convert_elastix_trafo(trafo_file):
"""Based on:
https://github.com/image-transform-converters/image-transform-converters/blob/c405a8c820a3a3e0e35a40183384da7372687d4a/src/main/java/itc/converters/ElastixAffine3DToAffineTransform3D.java
"""
trafo = elastix_parser.get_transformation(trafo_file)
trafo_type = elastix_parser.get_transformation_type(trafo_file)
def convert2d(trafo):
# initialize the resulting affine matrix with the identity
matrix = affine_matrix_2d()
# load the rotation center from the elastix transformation definition
rot_center = elastix_parser.get_rotation_center(trafo_file)
if rot_center is not None:
rot_center_neg = [-ce for ce in rot_center]
# translate to the rotation center
translate_to_rot = affine_matrix_2d(translation=rot_center_neg)
matrix = translate_to_rot @ matrix
# apply rotation and scale
rot_and_scale = trafo.copy()
rot_and_scale[2, :2] = 0
matrix = rot_and_scale @ matrix
# translate back from the rotation center
if rot_center is not None:
translate_from_rot = affine_matrix_2d(translation=rot_center)
matrix = translate_from_rot @ matrix
return matrix
def convert3d(trafo):
# initialize the resulting affine matrix with the identity
matrix = affine_matrix_3d()
# load the rotation center from the elastix transformation definition
rot_center = elastix_parser.get_rotation_center(trafo_file)
if rot_center is not None:
rot_center_neg = [-ce for ce in rot_center]
# translate to the rotation center
translate_to_rot = affine_matrix_3d(translation=rot_center_neg)
matrix = translate_to_rot @ matrix
# apply rotation and scale
rot_and_scale = trafo.copy()
rot_and_scale[3, :3] = 0
matrix = rot_and_scale @ matrix
# translate back from the rotation center
if rot_center is not None:
translate_from_rot = affine_matrix_3d(translation=rot_center)
matrix = translate_from_rot @ matrix
return matrix
# go fron the transformation vector to affine matrix. we can use the
# bdv functionality, because both bdv and elastix have the same axis convention
trafo = elastix_parameter_to_bdv_matrix(trafo, trafo_type)
# convert in 2d or 3d
if trafo.shape[0] == 3:
return convert2d(trafo)
else:
return convert3d(trafo)
def _combine_elastix_trafos_bdv(trafos, resolution, scale_factor):
is_2d = trafos[0].shape[0] == 3
# transformation to scale from voxel space to millimeter
# (which is the fixed physical unit in elastix)
if is_2d: # 2d case
vox_to_mm = affine_matrix_2d(scale=2 * [scale_factor])
else: # 3d case
vox_to_mm = affine_matrix_3d(scale=3 * [scale_factor])
# transformation to scale from millimiter to the physicial unit we use
# usually we use micrometer and then scale_factor = 10^3
# for nanometer it would be 10^6 etc.
if is_2d: # 2d case
mm_to_unit = affine_matrix_2d(scale=[res / scale_factor for res in resolution])
else: # 3d case
mm_to_unit = affine_matrix_3d(scale=[res / scale_factor for res in resolution])
# combine the scaling transfomraions and the actual elastix transformations
matrix = vox_to_mm
for trafo in trafos:
# elastix uses the opposite transformation direction as bdv,
# so we need to invert the elastix transformation here
matrix = matrix @ np.linalg.inv(trafo)
matrix = matrix @ mm_to_unit
return matrix
def _get_elastix_trafo_files(trafo_file, load_initial_trafos):
trafo_type = elastix_parser.get_transformation_type(trafo_file)
if trafo_type is None or trafo_type not in elastix_parser.AFFINE_COMPATIBLE:
msg = f"Transormation type in {trafo_file}: {trafo_type} is not compatible with affine transformation"
raise ValueError(msg)
trafo_files = [trafo_file]
if load_initial_trafos:
initial_trafo_file = elastix_parser.get_initial_transform_file(trafo_file)
else:
initial_trafo_file = None
# load all transformations that need to be concatenated from the elastix transformation file
while initial_trafo_file is not None:
trafo_files.append(initial_trafo_file)
initial_trafo_file = elastix_parser.get_initial_transform_file(initial_trafo_file)
if initial_trafo_file is not None:
trafo_type = elastix_parser.get_transformation_type(trafo_file)
if trafo_type is None or trafo_type not in elastix_parser.AFFINE_COMPATIBLE:
msg = (f"Transormation type in {initial_trafo_file}: {trafo_type}"
"is not compatible with affine transformation")
raise ValueError(msg)
# reverse the order of transformations
return trafo_files[::-1]
def elastix_to_bdv(trafo_file, resolution, scale_factor=1e3, load_initial_trafos=True):
""" Convert elastix transformation in text file to bdv transformation.
Arguments:
trafo_file [str] - the file defining the elastix transformation
resolution [list[float]] - resolution of the data in physical units
scale_factor [float] - scale factor of physical units compared to millimeter, which is
the default unit for elastix tranformations. By default, assume that physical
units is in micrometer, which corresponds to a scale of 10^3 (default: 1e3)
load_initial_trafos [bool] - whether to load the initial transformations (default: True)
Returns:
list - parameter vector for bdv transformation
"""
# get elastix trafos in bdv matrix format
trafo_files = _get_elastix_trafo_files(trafo_file, load_initial_trafos)
trafos = [_convert_elastix_trafo(trafo) for trafo in trafo_files]
# combine the transformations and apply the scaling transformations to
| |
<reponame>qq2016/kubeflow_learning<filename>github_issue_summarization/notebooks/seq2seq_utils.py<gh_stars>1000+
import logging
import dill as dpickle
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from IPython.display import SVG, display
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from keras.utils.vis_utils import model_to_dot
from annoy import AnnoyIndex
from tqdm import tqdm, tqdm_notebook
from nltk.translate.bleu_score import corpus_bleu
def load_text_processor(fname='title_pp.dpkl'):
"""
Load preprocessors from disk.
Parameters
----------
fname: str
file name of ktext.proccessor object
Returns
-------
num_tokens : int
size of vocabulary loaded into ktext.processor
pp : ktext.processor
the processor you are trying to load
Typical Usage:
-------------
num_decoder_tokens, title_pp = load_text_processor(fname='title_pp.dpkl')
num_encoder_tokens, body_pp = load_text_processor(fname='body_pp.dpkl')
"""
# Load files from disk
with open(fname, 'rb') as f:
pp = dpickle.load(f)
num_tokens = max(pp.id2token.keys()) + 1
print('Size of vocabulary for {}: {}'.format(fname, num_tokens))
return num_tokens, pp
def load_decoder_inputs(decoder_np_vecs='train_title_vecs.npy'):
"""
Load decoder inputs.
Parameters
----------
decoder_np_vecs : str
filename of serialized numpy.array of decoder input (issue title)
Returns
-------
decoder_input_data : numpy.array
The data fed to the decoder as input during training for teacher forcing.
This is the same as `decoder_np_vecs` except the last position.
decoder_target_data : numpy.array
The data that the decoder data is trained to generate (issue title).
Calculated by sliding `decoder_np_vecs` one position forward.
"""
vectorized_title = np.load(decoder_np_vecs)
# For Decoder Input, you don't need the last word as that is only for prediction
# when we are training using Teacher Forcing.
decoder_input_data = vectorized_title[:, :-1]
# Decoder Target Data Is Ahead By 1 Time Step From Decoder Input Data (Teacher Forcing)
decoder_target_data = vectorized_title[:, 1:]
print('Shape of decoder input: {}'.format(decoder_input_data.shape))
print('Shape of decoder target: {}'.format(decoder_target_data.shape))
return decoder_input_data, decoder_target_data
def load_encoder_inputs(encoder_np_vecs='train_body_vecs.npy'):
"""
Load variables & data that are inputs to encoder.
Parameters
----------
encoder_np_vecs : str
filename of serialized numpy.array of encoder input (issue title)
Returns
-------
encoder_input_data : numpy.array
The issue body
doc_length : int
The standard document length of the input for the encoder after padding
the shape of this array will be (num_examples, doc_length)
"""
vectorized_body = np.load(encoder_np_vecs)
# Encoder input is simply the body of the issue text
encoder_input_data = vectorized_body
doc_length = encoder_input_data.shape[1]
print('Shape of encoder input: {}'.format(encoder_input_data.shape))
return encoder_input_data, doc_length
def viz_model_architecture(model):
"""Visualize model architecture in Jupyter notebook."""
display(SVG(model_to_dot(model).create(prog='dot', format='svg')))
def free_gpu_mem():
"""Attempt to free gpu memory."""
K.get_session().close()
cfg = K.tf.ConfigProto()
cfg.gpu_options.allow_growth = True
K.set_session(K.tf.Session(config=cfg))
def test_gpu():
"""Run a toy computation task in tensorflow to test GPU."""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
hello = tf.constant('Hello, TensorFlow!')
print(session.run(hello))
def plot_model_training_history(history_object):
"""Plots model train vs. validation loss."""
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def extract_encoder_model(model):
"""
Extract the encoder from the original Sequence to Sequence Model.
Returns a keras model object that has one input (body of issue) and one
output (encoding of issue, which is the last hidden state).
Input:
-----
model: keras model object
Returns:
-----
keras model object
"""
encoder_model = model.get_layer('Encoder-Model')
return encoder_model
def extract_decoder_model(model):
"""
Extract the decoder from the original model.
Inputs:
------
model: keras model object
Returns:
-------
A Keras model object with the following inputs and outputs:
Inputs of Keras Model That Is Returned:
1: the embedding index for the last predicted word or the <Start> indicator
2: the last hidden state, or in the case of the first word the hidden state from the encoder
Outputs of Keras Model That Is Returned:
1. Prediction (class probabilities) for the next word
2. The hidden state of the decoder, to be fed back into the decoder at the next time step
Implementation Notes:
----------------------
Must extract relevant layers and reconstruct part of the computation graph
to allow for different inputs as we are not going to use teacher forcing at
inference time.
"""
# the latent dimension is the same throughout the architecture so we are going to
# cheat and grab the latent dimension of the embedding because that is the same as what is
# output from the decoder
latent_dim = model.get_layer('Decoder-Word-Embedding').output_shape[-1]
# Reconstruct the input into the decoder
decoder_inputs = model.get_layer('Decoder-Input').input
dec_emb = model.get_layer('Decoder-Word-Embedding')(decoder_inputs)
dec_bn = model.get_layer('Decoder-Batchnorm-1')(dec_emb)
# Instead of setting the intial state from the encoder and forgetting about it, during inference
# we are not doing teacher forcing, so we will have to have a feedback loop from predictions back
# into the GRU, thus we define this input layer for the state so we can add this capability
gru_inference_state_input = Input(shape=(latent_dim,), name='hidden_state_input')
# we need to reuse the weights that is why we are getting this
# If you inspect the decoder GRU that we created for training, it will take as input
# 2 tensors -> (1) is the embedding layer output for the teacher forcing
# (which will now be the last step's prediction, and will be _start_ on the
# first time step)
# (2) is the state, which we will initialize with the encoder on the first time step
# but then grab the state after the first prediction and feed that back in again.
gru_out, gru_state_out = model.get_layer('Decoder-GRU')([dec_bn, gru_inference_state_input])
# Reconstruct dense layers
dec_bn2 = model.get_layer('Decoder-Batchnorm-2')(gru_out)
dense_out = model.get_layer('Final-Output-Dense')(dec_bn2)
decoder_model = Model([decoder_inputs, gru_inference_state_input],
[dense_out, gru_state_out])
return decoder_model
class Seq2Seq_Inference(object):
# pylint: disable=too-many-instance-attributes
def __init__(self,
encoder_preprocessor,
decoder_preprocessor,
seq2seq_model):
self.pp_body = encoder_preprocessor
self.pp_title = decoder_preprocessor
self.seq2seq_model = seq2seq_model
self.seq2seq_model._make_predict_function() # pylint: disable=protected-access
self.encoder_model = extract_encoder_model(seq2seq_model)
self.encoder_model._make_predict_function() # pylint: disable=protected-access
self.decoder_model = extract_decoder_model(seq2seq_model)
self.decoder_model._make_predict_function() # pylint: disable=protected-access
self.default_max_len_title = self.pp_title.padding_maxlen
self.nn = None
self.rec_df = None
def generate_issue_title(self,
raw_input_text,
max_len_title=None):
"""
Use the seq2seq model to generate a title given the body of an issue.
Inputs
------
raw_input: str
The body of the issue text as an input string
max_len_title: int (optional)
The maximum length of the title the model will generate
"""
if max_len_title is None:
max_len_title = self.default_max_len_title
# get the encoder's features for the decoder
raw_tokenized = self.pp_body.transform([raw_input_text])
body_encoding = self.encoder_model.predict(raw_tokenized)
# we want to save the encoder's embedding before its updated by decoder
# because we can use that as an embedding for other tasks.
original_body_encoding = body_encoding
state_value = np.array(self.pp_title.token2id['_start_']).reshape(1, 1)
decoded_sentence = []
stop_condition = False
while not stop_condition:
preds, st = self.decoder_model.predict([state_value, body_encoding])
# We are going to ignore indices 0 (padding) and indices 1 (unknown)
# Argmax will return the integer index corresponding to the
# prediction + 2 b/c we chopped off first two
pred_idx = np.argmax(preds[:, :, 2:]) + 2
# retrieve word from index prediction
pred_word_str = self.pp_title.id2token[pred_idx]
if pred_word_str == '_end_' or len(decoded_sentence) >= max_len_title:
stop_condition = True
break
decoded_sentence.append(pred_word_str)
# update the decoder for the next word
body_encoding = st
state_value = np.array(pred_idx).reshape(1, 1)
return original_body_encoding, ' '.join(decoded_sentence)
def print_example(self,
i,
body_text,
title_text,
url,
threshold):
"""
Prints an example of the model's prediction for manual inspection.
"""
if i:
print('\n\n==============================================')
print('============== Example # {} =================\n'.format(i))
if url:
print(url)
print("Issue Body:\n {} \n".format(body_text))
if title_text:
print("Original Title:\n {}".format(title_text))
emb, gen_title = self.generate_issue_title(body_text)
print("\n****** Machine Generated Title (Prediction) ******:\n {}".format(gen_title))
if self.nn:
# return neighbors and distances
n, d = self.nn.get_nns_by_vector(emb.flatten(), n=4,
include_distances=True)
neighbors = n[1:]
dist = d[1:]
if min(dist) <= threshold:
cols = ['issue_url', 'issue_title', 'body']
dfcopy = self.rec_df.iloc[neighbors][cols].copy(deep=True)
dfcopy['dist'] = dist
similar_issues_df = dfcopy.query('dist <= {}'.format(threshold))
print("\n**** Similar Issues (using encoder embedding) ****:\n")
display(similar_issues_df)
def demo_model_predictions(self,
n,
issue_df,
threshold=1):
"""
Pick n random Issues and display predictions.
Input:
------
n : int
Number of issues to display from issue_df
issue_df : pandas DataFrame
DataFrame that contains two columns: `body` and `issue_title`.
threshold : float
distance threshold for recommendation of similar issues.
Returns:
--------
None
Prints the original issue body and the model's prediction.
"""
# Extract body and title from DF
body_text = issue_df.body.tolist()
title_text = issue_df.issue_title.tolist()
url = issue_df.issue_url.tolist()
demo_list = np.random.randint(low=1, high=len(body_text), size=n)
for i in demo_list:
self.print_example(i,
body_text=body_text[i],
title_text=title_text[i],
url=url[i],
threshold=threshold)
def prepare_recommender(self, vectorized_array, original_df):
"""
Use the annoy library to build recommender
Parameters
----------
vectorized_array : List[List[int]]
This is the list of list of integers that represents | |
not (self.data.openingLog or self.blockdropdown):
# instance.dropdown.open(instance)
# except:
# pass
#
# return True
# @staticmethod
# def staticdropdowncallbacktext(self, instance, value):
# """
# Called from a dropdown - just selects the value entered
# """
# Logger.info("CRV:staticdropdowncallbacktext")
#
# if self.data.openingLog:
# #if not self.data.haveopenedlog:
# Logger.info("CRV:staticdropdowncallbacktext: refusing during initialisation")
# return True
#
# l = len(value)
# instance.dismissdropdown()
# dopts = instance.dropoptions
# if l > 0:
# showdrop = False
# else:
# showdrop = True
# instance.setdropoptions(dopts)
#
# if showdrop:
# try:
# instance.dropdown.open(instance)
# except:
# pass
#
# return True
#
# def logcallbacktext(self, instance, value):
# Logger.info("CRV:logcallbacktext")
#
# if self.data.openingLog:
# #if not self.data.haveopenedlog:
# Logger.info("CRV:logcallbacktext: refusing during initialisation")
# return True
#
# self.dynamicdropdowncallbacktext(instance, value)
# self.changelogbytype(value)
def changelogbytype(self, value):
try:
if len(value) == 0:
value = 'Launch'
sexc = 'len ' + value
if value == 'Incident':
if modglobal.crvincidentmain is None: # no incident - create it
# modglobal.crvincidentmain = CrvIncidentMain(self, self.data)
# crvincident = modglobal.crvincidentmain.setup_main_screen()
# self.data.sm.add_widget(crvincident)
sexc = 'initialising incident'
self.crv_new_incident(None)
return
sexc = 'getting loggroup of ' + value
index = self.data.logrecord.loggroup['logtypesdisp'][value]
# get the top level object for 'logentrybox'
# It should have 2 children - 'logcommonlog' and something else.
# Delete the something else and replace it with the object pointed to by
# index.
sexc = 'getting widgets'
toplevel = self.data.logrecord.getobject('logentrybox')
#currenttype = self.data.logrecord.getobject('logcurrenttype')
commonlog = self.data.logrecord.getobject('logcommonlog')
sexc = 'getting req ' + index
reqtype = self.data.logrecord.getobject(index)
# if currenttype is not None:
# toplevel.clear_widgets(currenttype)
sexc = 'clear'
toplevel.clear_widgets()
toplevel.add_widget(commonlog)
sexc = 'add'
toplevel.add_widget(reqtype)
except:
Logger.info('CRV:LogType exception at ' + sexc)
return True
def changeactionbytype(self, invalue):
try:
sexc = 'len ' + invalue
if len(invalue) > 0:
value = invalue[:1].upper() + invalue[1:]
sexc = 'getting index of ' + invalue
index = self.data.logrecord.loggroup['logtypesincdisp'][value]
# get the top level object for 'logentrybox'
# It should have 2 children - 'logcommonlog' and something else.
# Delete the something else and replace it with the object pointed to by
# index.
sexc = 'getting widgets'
toplevel = self.data.logrecord.getobject('logentrybox')
commonlog = self.data.logrecord.getobject('logcommonlog')
sexc = 'getting req from ' + index
reqtype = self.data.logrecord.getobject(index)
# if currenttype is not None:
# toplevel.clear_widgets(currenttype)
sexc = 'clear widget'
toplevel.clear_widgets()
sexc = 'add widget'
toplevel.add_widget(commonlog)
toplevel.add_widget(reqtype)
except:
Logger.info('CRV:INC:ActionType exception at ' + sexc)
return True
# def eactionbytype(self, value):
# if len(value) > 0:
# index = self.data.logrecord.loggroup['logtypesincdisp'][value]
# # get the top level object for 'logentrybox'
# # It should have 2 children - 'logcommonlog' and something else.
# # Delete the something else and replace it with the object pointed to by
# # index.
#
# toplevel = self.data.logrecord.getobject('logentrybox')
# commonlog = self.data.logrecord.getobject('logcommonlog')
# reqtype = self.data.logrecord.getobject(index)
#
# # if currenttype is not None:
# # toplevel.clear_widgets(currenttype)
# toplevel.clear_widgets()
# toplevel.add_widget(commonlog)
# toplevel.add_widget(reqtype)
# return True
# @staticmethod
# def dticallbackfocus(self, instance, value):
# #Logger.debug('POS: focus ' + str(value))
# if instance.collide_point(*value.pos):
# Logger.debug('CRV: dti collide')
# instance.dropdown.open(instance)
# self.data.shelf_save_current()
#
# return True
#
# #@staticmethod
# @staticmethod
# def dticallbackunfocus(instance):
# instance.dismissdropdown()
# return True
#
def screenupdateheader(self, inindex):
# index comes in as screen name (screen_...)
# the timestamp label is index by time_...
# i.e. change ^screen_ to time_
if inindex is None:
index = self.data.sm.current
else:
index = inindex
#tact = self.data.datarecord.getobject('activitylabel')
tmstamp = self.data.datarecord.getobjectvariable('time', 'text', '')
timeindex = index.replace('screen_', 'time_')
if timeindex in self.data.datarecord.record:
try:
#
# If an Incident or Incidents are active, then this takes Precedance.
# In this case - instead of the Log time being displayed, display:
# Incident (n of n) - TimeStarted Vesselname
#
# Otherwise - if no incident, display eith log time or inactive.
#
[tlab, tdat, tinfo] = self.data.datarecord.getobject(timeindex)
if modglobal.crvincidentmain is not None and modglobal.crvincidentmain.numincidents > 0:
tlab.text = 'Incident Active (' + str(modglobal.crvincidentmain.currentincident+1) + ' of ' +\
str(modglobal.crvincidentmain.numincidents) + ')'
tdat.text = modglobal.crvincidentmain.currentincidents[modglobal.crvincidentmain.currentincident]['record'].getobjecttext('incalerttime')
ete, dist = modglobal.crvincidentmain.etedistance()
tmp = ''
if len(ete) > 0:
tmp = 'ete ' + ete + ' (mins) '
if len(dist) > 0:
tmp += 'distance ' + dist + ' (nm)'
if len(tmp) > 0: tinfo.text = tmp
else:
if len(tmstamp) > 0:
tlab.text = '[b]Log Start:[/b]'
tdat.text = tmstamp
else:
tlab.text = '[b]Boatlog not open[/b]'
tdat.text = ''
except:
pass
return
def crv_image_click(self, instance):
"""
Clicked on the image to open incident screen.
If its not created then create it.
If number of incidents is 0 then create new incident
If number of incidents is 1 then view incident
"""
if instance.last_touch.is_double_tap:
if not self.data.getlogactive():
self.data.dopopup('Log must be opened before an incident can be started', bindto=instance)
else:
instance.opacity = 1.0
self.crv_new_incident(instance)
return True
def crv_new_incident(self, instance):
"""
Clicked on the image to open incident screen.
If its not created then create it.
If number of incidents is 0 then create new incident
If number of incidents is 1 then view incident
"""
if not self.data.sm.has_screen('screen_crvincident_main'):
modglobal.crvincidentmain = CrvIncidentMain(self, self.data, imageinstance=instance)
crvincident = modglobal.crvincidentmain.setup_main_screen()
self.data.sm.add_widget(crvincident)
try:
if instance is not None: instance.clickup()
except:
pass
self.data.lastscreen = self.data.sm.current
if modglobal.crvincidentmain.numincidents == 0:
# create new incident - note this will create the screen for each incident
# This could be quite heavy - but most of the time it will be 1
modglobal.crvincidentmain.newincident(imageinstance=instance)
# let the screenmanager_callback_onenter handle where the incident screen goes
self.data.sm.current = 'screen_crvincident_main'
return True
def screencreateheader(self, index, forceheader=None, alttext=None, notime=False):
# Creates a standard screen header.
# if alttext is set, dont put image at top left - put text instead
#szh = [4, 1]
szy = .1
b = BoxLayout(pos_hint_x=0, size_hint_y=szy)
if 'screen_crvincident' in index:
# dont display image for any incidents
if index == 'screen_crvincident_main':
ilab = MyLabel(text='[b]CURRENT INCIDENTS (the top one is always the active incident)[/b]', color=self.crvcolor.getfgcolor(), size_hint=[1, 1], halign='left', markup=True,
font_size=modglobal.default_large_font_size)
else:
if forceheader is not None:
s = '[b]Incident ' + forceheader + '[/b]'
else:
s = '[b]Current Incident[/b]'
ilab = MyLabel(text=s, color=self.crvcolor.getfgcolor(), size_hint=[1, 1], halign='left', markup=True,
font_size=modglobal.default_large_font_size)
b.add_widget(ilab)
else:
if alttext is not None:
b.add_widget(MyLabel(text='[b]'+alttext+'[/b]', color=self.crvcolor.getfgcolor(), size_hint=[.75, 1], halign='left', markup=True,
font_size=modglobal.default_large_font_size))
else:
image1 = ImageButton(source='images/cg.png', allow_stretch='true', opacity=10)
image1.bind(on_release=self.crv_image_click)
b.add_widget(image1)
b1 = MyBoundBox(orientation='horizontal')
timeindex = index.replace('screen_', 'time_')
if timeindex not in self.data.datarecord.record:
Logger.debug("CRV: logcreateheader: TIME INDEX DOES NOT EXIST: " + timeindex)
else:
if notime:
pass # we dont want a time field at top right
else:
b2 = MyBoundBox(orientation='vertical')
b3 = MyBoundBox(orientation='horizontal')
tlab = MyLabel(color=self.crvcolor.getfgcolor(), size_hint=[4, 1], halign='left', markup=True)
b3.add_widget(tlab)
tdate = MyLabel(color=self.crvcolor.getfgcolor(), size_hint=[4, 1], halign='left', markup=True)
b3.add_widget(tdate)
b2.add_widget(b3)
tinfo = MyLabel(color=self.crvcolor.getfgcolor())
b2.add_widget(tinfo)
self.data.datarecord.setobject(timeindex, (tlab, tdate, tinfo), 'list')
b1.add_widget(b2)
b.add_widget(b1)
return b
# Checkbox with a label in a bordered boxlayout
# returns layout, checkbox, label
def boxcheckbox(self, cstr, szh=None, boxhint=None):
if not szh:
szh = [26, 1]
if boxhint is None:
b = MyBoundBox(orientation='horizontal')
else:
b = MyBoundBox(orientation='horizontal', size_hint=boxhint)
l = MyLabel(text=cstr, size_hint=szh, halign='left')
b.add_widget(l)
c = CCheckBox()
b.add_widget(c)
return b, c, l
# Checkbox with a label and a textbox in a bordered boxlayout
# returns layout, checkbox, label, textbox
def boxchecktextbox(self, cstr, szh=None, boxhint=None, infilter=''):
if not szh:
szh = [26, 1]
if boxhint is None:
b = MyBoundBox(orientation='horizontal')
else:
b = MyBoundBox(orientation='horizontal', size_hint=boxhint)
bl = BoxLayout(orientation='horizontal', size_hint_x=.8)
br = BoxLayout(orientation='horizontal', size_hint_x=.2)
b.add_widget(bl)
b.add_widget(br)
halign='left'
l = MyLabel(text=cstr, size_hint=szh, halign=halign)
bl.add_widget(l)
c = CCheckBox()
bl.add_widget(c)
t = MyTextInput()
br.add_widget(t)
return b, c, l, t
# Textinput with a label in a bordered boxlayout
# If no text in label, then make input readonly and color it.
# Returns: layout, widget, label
def boxtextbox(self, tstr, szh=None, boxhint=None, nolabel=False, nobox=False, infilter='', multiline=False, orientation='horizontal'):
if not szh:
szh = [6, 1]
if nobox:
if boxhint is None:
b = BoxLayout(orientation=orientation)
else:
b = BoxLayout(orientation=orientation, size_hint=boxhint)
else:
if boxhint is None:
b = MyBoundBox(orientation=orientation)
else:
b = MyBoundBox(orientation=orientation, size_hint=boxhint)
if nolabel:
l = None
| |
import gzip
import json
import re
from collections import defaultdict
from fuzzywuzzy.process import extractOne
from fuzzywuzzy.fuzz import QRatio
from fuzzywuzzy.utils import full_process
from mycroft.skills.core import intent_file_handler
from mycroft.util.log import LOG
from mycroft.skills.common_play_skill import CommonPlaySkill, CPSMatchLevel
from mycroft.util import play_wav
from os.path import dirname, join, abspath, isfile
from os import stat
from .lms_client import LMSClient
__author__ = "johanpalmqvist"
class SqueezeBoxMediaSkill(CommonPlaySkill):
def __init__(self):
super(SqueezeBoxMediaSkill, self).__init__("SqueezeBox Media Skill")
def initialize(self):
LOG.info("Initializing SqueezeBox Media skill")
super().initialize()
# Setup handlers for playback control messages
self.add_event("mycroft.audio.service.next", self.handle_nexttrack)
self.add_event("mycroft.audio.service.prev", self.handle_previoustrack)
self.add_event("mycroft.audio.service.pause", self.handle_pause)
self.add_event("mycroft.audio.service.resume", self.handle_resume)
if not self.settings:
raise ValueError("Could not load settings")
LOG.debug("Settings: {}".format(self.settings))
try:
self.lms = LMSClient(
self.settings["server"],
self.settings["port"],
self.settings["username"],
self.settings["password"],
)
except Exception as e:
LOG.error(
"Could not load server configuration. Exception: {}".format(e)
)
raise ValueError("Could not load server configuration.")
try:
self.default_player_name = self.settings["default_player_name"]
except Exception as e:
LOG.error("Default player name not set. Exception: {}".format(e))
raise ValueError("Default player name not set.")
self.speak_dialog_enabled = self.settings.get(
"speak_dialog_enabled", False
)
self.media_library_source_enabled = self.settings.get(
"media_library_source_enabled", True
)
self.favorite_source_enabled = self.settings.get(
"favorite_source_enabled", True
)
self.playlist_source_enabled = self.settings.get(
"playlist_source_enabled", True
)
self.podcast_source_enabled = self.settings.get(
"podcast_source_enabled", True
)
self.sources_cache_filename = join(
abspath(dirname(__file__)), "sources_cache.json.gz"
)
self.library_cache_filename = join(
abspath(dirname(__file__)), "library_cache.json.gz"
)
self.library_total_duration_state_filename = join(
abspath(dirname(__file__)), "library_total_duration_state.json.gz"
)
self.scorer = QRatio
self.processor = full_process
self.regexes = {}
self.get_sources("connecting...")
# Regex handler
def translate_regex(self, regex):
if regex not in self.regexes:
path = self.find_resource(regex + ".regex")
if path:
with open(path) as f:
string = f.read().strip()
self.regexes[regex] = string
return self.regexes[regex]
# Get sources
def get_sources(self, message):
LOG.info("Loading content")
self.sources = defaultdict(dict)
LOG.debug("Selecting default backend")
default_backend, default_playerid = self.get_playerid(None)
# Album, Artist, Genre, Title sources (cache server response)
if self.media_library_source_enabled:
self.update_sources_cache()
self.load_sources_cache()
else:
LOG.info("Media Library source disabled. Skipped.")
# Favorite sources (query server)
if self.favorite_source_enabled:
self.sources["favorite"] = defaultdict(dict)
favorites = self.lms.get_favorites()
for favorite in favorites:
try:
if not self.sources["favorite"][favorite["name"]]:
if (
"audio" in favorite["type"]
and favorite["isaudio"] == 1
):
self.sources["favorite"][favorite["name"]][
"favorite_id"
] = favorite["id"]
LOG.debug(
"Loaded favorite: {}".format(favorite["name"])
)
except Exception as e:
LOG.warning(
"Failed to load favorite. Exception: {}".format(e)
)
LOG.info("Loaded favorites")
else:
LOG.info("Favorite source disabled. Skipped.")
# Playlist sources (query server)
if self.playlist_source_enabled:
self.sources["playlist"] = defaultdict(dict)
playlists = self.lms.get_playlists()
for playlist in playlists:
try:
if not self.sources["playlist"][playlist["playlist"]]:
self.sources["playlist"][playlist["playlist"]][
"playlist_id"
] = playlist["id"]
LOG.debug(
"Loaded playlist: {}".format(playlist["playlist"])
)
except Exception as e:
LOG.warning(
"Failed to load playlist. Exception: {}".format(e)
)
LOG.info("Loaded playlists")
else:
LOG.info("Playlist source disabled. Skipped.")
# Podcast sources (query server)
if self.podcast_source_enabled:
self.sources["podcast"] = defaultdict(dict)
podcasts = self.lms.get_podcasts(default_playerid)
for podcast in podcasts:
try:
if not self.sources["podcast"][podcast["name"]]:
if (
not podcast["hasitems"] == 0
and podcast["isaudio"] == 0
):
self.sources["podcast"][podcast["name"]][
"podcast_id"
] = podcast["id"]
LOG.debug(
"Loaded podcast: {}".format(podcast["name"])
)
except Exception as e:
LOG.warning(
"Failed to load podcast. Exception: {}".format(e)
)
LOG.info("Loaded podcasts")
else:
LOG.info("Podcast source disabled. Skipped.")
LOG.info("Loaded content")
# Get playerid matching input (fallback to default_player_name setting)
def get_playerid(self, backend):
if backend is None:
backend = self.default_player_name.title()
LOG.debug("Requested backend: {}".format(backend))
players = self.lms.get_players()
player_names = []
for player in players:
LOG.debug(
"Playerid={}, Name={}".format(
player["playerid"], player["name"]
)
)
player_names.append(player["name"])
key, confidence = extractOne(
backend,
player_names,
processor=self.processor,
scorer=self.scorer,
score_cutoff=0,
)
confidence = confidence / 100.0
LOG.debug("Player confidence: {}".format(confidence))
if confidence > 0.5:
extracted_player_name = key
LOG.debug("Extracted backend: {}".format(extracted_player_name))
else:
LOG.error("Couldn't find player matching: {}".format(backend))
data = {"backend": backend}
self.play_dialog("playernotfound.wav", "playernotfound", data)
return None, None
for player in players:
if extracted_player_name == player["name"]:
backend = player["name"]
playerid = player["playerid"]
return backend, playerid
# Get backend name from phrase
def get_backend(self, phrase):
LOG.debug("Backend match phrase: {}".format(phrase))
match = re.search(self.translate_regex("backend"), phrase)
LOG.debug("Backend match regex: {}".format(match))
if match:
backend = match.group("backend")
LOG.debug("Backend match found: {}".format(backend))
else:
backend = None
LOG.debug("Backend match not found: {}".format(backend))
return backend
# Load library cache file
def load_library_cache(self):
LOG.info("Loading library cache")
try:
with gzip.GzipFile(self.library_cache_filename) as f:
self.results = json.loads(f.read().decode("utf-8"))
LOG.info("Loaded library cache")
except Exception as e:
LOG.error("Library cache not found. Exception: {}".format(e))
# Get library total duration from state file
def load_library_total_duration(self):
LOG.info("Loading library total duration state")
try:
with gzip.GzipFile(
self.library_total_duration_state_filename
) as f:
library_total_duration = json.loads(f.read().decode("utf-8"))
LOG.info("Loaded library total duration state")
return library_total_duration
except Exception as e:
LOG.warning(
"Creating missing duration file. Exception: {}".format(e)
)
self.save_library_total_duration()
return self.lms.get_library_total_duration()
# Load sources cache file
def load_sources_cache(self):
LOG.info("Loading sources cache")
try:
with gzip.GzipFile(self.sources_cache_filename) as f:
self.sources = json.loads(f.read().decode("utf-8"))
LOG.info("Loaded sources cache")
except Exception as e:
LOG.error("Sources cache does not exist. Exception: {}.".format(e))
# Save library cache file
def save_library_cache(self):
LOG.info("Saving library cache")
payload = {
"id": 1,
"method": "slim.request",
"params": ["query", ["titles", "0", "-1", "tags:aegilpstu"]],
}
with gzip.GzipFile(self.library_cache_filename, "w") as f:
f.write(
json.dumps(
self.lms.lms_request(payload)["result"]["titles_loop"],
sort_keys=True,
indent=4,
ensure_ascii=False,
).encode("utf-8")
)
LOG.info("Saved library cache")
# Save library total duration to state file
def save_library_total_duration(self):
LOG.info("Saving library total duration state")
with gzip.GzipFile(
self.library_total_duration_state_filename, "w"
) as f:
f.write(
json.dumps(
self.lms.get_library_total_duration(),
sort_keys=True,
indent=4,
ensure_ascii=False,
).encode("utf-8")
)
LOG.info("Saved library total duration state")
# Save sources cache file
def save_sources_cache(self):
self.update_library_cache()
self.load_library_cache()
# Artist sources
self.sources["artist"] = defaultdict(dict)
for result in self.results:
try:
if not self.sources["artist"][result["artist"]]:
self.sources["artist"][result["artist"]][
"artist_id"
] = result["artist_id"]
self.sources["artist"][result["artist"]]["album"] = []
except Exception as e:
LOG.warning("Failed to load artist. Exception: {}".format(e))
LOG.info("Loaded artists")
# Album sources
self.sources["album"] = defaultdict(dict)
self.sources["title"] = defaultdict(dict)
for result_albums in self.results:
try:
if not self.sources["album"][result_albums["album"]]:
self.sources["album"][result_albums["album"]][
"album_id"
] = result_albums["album_id"]
self.sources["album"][result_albums["album"]]["title"] = []
self.sources["album"][
"{} by {}".format(
result_albums["album"], result_albums["artist"]
)
]["album_id"] = result_albums["album_id"]
self.sources["album"][
"{} by {}".format(
result_albums["album"], result_albums["artist"]
)
]["title"] = []
self.sources["artist"][result_albums["artist"]][
"album"
].append(result_albums["album_id"])
# Title sources
for result_title in self.results:
try:
if (
result_title["album_id"]
== result_albums["album_id"]
):
self.sources["title"][
result_title["title"]
] = {
"title_id": result_title["id"],
"url": result_title["url"],
}
artist_title = "{} by {}".format(
result_title["title"],
result_title["artist"],
)
self.sources["title"][artist_title] = {
"title_id": result_title["id"],
"url": result_title["url"],
}
self.sources["album"][result_albums["album"]][
"title"
].append(result_title["id"])
self.sources["album"][
"{} by {}".format(
result_albums["album"],
result_albums["artist"],
)
]["title"].append(result_title["id"])
except Exception as e:
LOG.warning(
"Failed to load album. Exception: {}".format(e)
)
LOG.debug(
"Loaded titles for album: {}".format(
result_albums["album"]
)
)
except Exception as e:
LOG.warning("Failed to load album. Exception: {}".format(e))
LOG.info("Loaded albums")
# Genre sources
self.sources["genre"] = defaultdict(dict)
for result_genres in self.results:
try:
if not self.sources["genre"][result_genres["genre"]]:
self.sources["genre"][result_genres["genre"]][
"genre_id"
] = result_genres["genre_id"]
LOG.debug(
"Loaded genre: {}".format(result_genres["genre"])
)
except Exception as e:
LOG.warning("Failed to load genre. Exception: {}".format(e))
LOG.info("Loaded genres")
LOG.info("Saving sources cache")
with gzip.GzipFile(self.sources_cache_filename, "w") as f:
f.write(
json.dumps(
self.sources, sort_keys=True, indent=4, ensure_ascii=False
).encode("utf-8")
)
LOG.info("Saved sources cache")
# Update library cache file if LMS library seems to differ depending on
# library total duration
def update_library_cache(self):
library_cache = False
if isfile(self.library_cache_filename):
if stat(self.library_cache_filename).st_size > 26:
library_cache = True
if (
self.lms.get_library_total_duration()
== self.load_library_total_duration()
and library_cache
):
LOG.info("Library total duration unchanged. Not updating cache.")
return False
else:
LOG.info("Library total duration changed. Updating cache.")
self.save_library_cache()
self.save_library_total_duration()
return True
# Update sources cache file if LMS library seems to differ depending on
# library total duration
def update_sources_cache(self):
sources_cache = False
if isfile(self.sources_cache_filename):
if stat(self.sources_cache_filename).st_size > 26:
sources_cache = True
if (
self.lms.get_library_total_duration()
== self.load_library_total_duration()
and sources_cache
):
LOG.info("Library total duration unchanged. Not updating cache.")
return False
else:
LOG.info("Library total duration changed. Updating cache.")
self.save_sources_cache()
self.save_library_total_duration()
return True
# Play speech dialogue or sound feedback
# (fallback to speech if sound is None)
def play_dialog(self, sound_dialog, speak_dialog_name, data):
if self.speak_dialog_enabled == "True" or not sound_dialog:
self.speak_dialog(speak_dialog_name, data=data)
else:
path = join(abspath(dirname(__file__)), "sounds", sound_dialog)
if isfile(path):
play_wav(path)
else:
self.speak_dialog(speak_dialog_name, data=data)
# Get best playlist match and confidence
def get_best_playlist(self, playlist):
LOG.debug("get_best_playlist: playlist={}".format(playlist))
key, confidence = extractOne(
playlist.lower(),
self.sources["playlist"].keys(),
processor=self.processor,
scorer=self.scorer,
score_cutoff=0,
)
confidence = confidence / 100.0
LOG.debug(
"get_best_playlist: Chose key={}, confidence={}".format(
key, confidence
)
)
return key, confidence
# Get best album match and confidence
def get_best_album(self, album):
LOG.debug("get_best_album: album={}".format(album))
key, confidence = extractOne(
album.lower(),
self.sources["album"].keys(),
processor=self.processor,
scorer=self.scorer,
score_cutoff=0,
)
confidence = confidence / 100.0
LOG.debug(
"get_best_album: Chose key={}, confidence={}".format(
key, confidence
)
)
return key, confidence
# Get best artist match and confidence
def get_best_artist(self, artist):
LOG.debug("get_best_artist: artist={}".format(artist))
key, confidence = extractOne(
artist.lower(),
self.sources["artist"].keys(),
processor=self.processor,
scorer=self.scorer,
score_cutoff=0,
)
confidence = confidence / 100.0
LOG.debug(
"get_best_artist: Chose key={}, confidence={}".format(
key, confidence
)
)
return key, confidence
# Get best favorite match and confidence
def get_best_favorite(self, favorite):
LOG.debug("get_best_favorite: favorite={}".format(favorite))
key, confidence = extractOne(
favorite.lower(),
self.sources["favorite"].keys(),
| |
"is_managed": 1,
"platform": "tdw",
"tags":{"manage":{"geog_area":[{"code":"NA","alias":"北美"}]}},
"data_processing": {
"project_id": 2331,
"processing_id": "2_output",
"processing_alias": "xxx",
"processing_type": "clean",
"created_by ": "xxx",
"created_at": "xxx",
"updated_by": "xxx",
"updated_at": "xxx",
"description": "xxx"
},
"fields": [
{
"id": 13730,
"field_index": 1,
"field_name": "timestamp",
"field_alias": "时间",
"description": "",
"field_type": "timestamp",
"is_dimension": 0,
"origins": "",
"created_by": "admin",
"created_at": null,
"updated_by": null,
"updated_at": null
}
],
"storages": {
"tspider": {
"id": 2,
"storage_cluster": {
"storage_cluster_config_id": 1,
"cluster_name": "xxx",
"cluster_type": "tspider",
"cluster_domain": "xxx",
"cluster_group": "xxx",
"connection_info": "{}",
"priority": 234,
"version": "23423",
"belongs_to": "bkdata"
},
"physical_table_name": "xxx",
"expires": "xxx",
"storage_config": "xxx",
"priority": 1,
"generate_type": "user",
"description": "xxx",
"created_by": "admin",
"created_at": null,
"updated_by": null,
"updated_at": null
},
"kafka": {
"id": 1,
"storage_cluster": {},
"storage_channel": {
"channel_cluster_config_id": 1,
"cluster_name": "xxx",
"cluster_type": "kafka",
"cluster_role": "inner",
"cluster_domain": "xxx",
"cluster_backup_ips": "xxx",
"cluster_port": 2432,
"zk_domain": "127.0.0.1",
"zk_port": 3481,
"zk_root_path": "/abc/defg",
"priority": 234,
"attribute": "bkdata",
"description": "sdfdsf"
},
"physical_table_name": "xxx",
"expires": "xxx",
"storage_config": "xxx",
"priority": 1,
"generate_type": "user",
"description": "xxx",
"created_by": "admin",
"created_at": null,
"updated_by": null,
"updated_at": null
}
}
}
@apiError 1521020 结果表不存在
"""
erp = request.query_params.get("erp", None)
result_format = request.query_params.get("result_format", False)
p_related = request.query_params.getlist("related")
get_extra = request.query_params.get("extra", False)
tdw_related = request.query_params.getlist("tdw_related")
check_usability = request.query_params.get("check_usability", False)
tdw_std_access = True if result_table_id.startswith("{}_".format(settings.TDW_STD_BIZ_ID)) else False
related = []
if p_related:
related.extend(p_related)
if not result_table_id:
return Response({})
result_table_ids = [result_table_id]
# 优先用erp语句查询
if erp:
erp_expression = json.loads(erp)
if erp_expression:
if tdw_std_access:
erp_expression["result_table_name"] = "true"
erp_expression["bk_biz_id"] = "true"
backend_type = "dgraph_cold" if tdw_std_access else "dgraph"
erp_args = {"ResultTable": {"expression": erp_expression, "starts": result_table_ids}}
rpc_response = self.entity_query_via_erp(erp_args, backend_type=backend_type)
result = rpc_response.result
if result["ResultTable"]:
return_result = result["ResultTable"][0]
if result_format == "classic":
self.format_classic(return_result)
return Response(return_result)
else:
return Response({})
query_result = parseresult.get_result_table_infos_v3(
result_table_ids=result_table_ids, related=related, only_queryable=False
)
return_result = query_result[0] if query_result else {}
if not return_result:
return Response({})
if settings.ENABLED_TDW and get_extra in ("True", "true"):
return_result["extra"] = {}
from meta.extend.tencent.tdw.views.utils_result_table_mixin import (
check_and_mixin_extra_info,
)
check_and_mixin_extra_info(return_result, tdw_related, check_usability, request)
parseresult.add_manage_tag_to_result_table(return_result)
translate_project_name(return_result)
return Response(return_result)
@staticmethod
def format_classic(return_result):
optimized_result = return_result
if "~ResultTableField.result_table" in optimized_result:
optimized_result["fields"] = []
for item in optimized_result["~ResultTableField.result_table"]:
optimized_result["fields"].append(item)
optimized_result.pop("~ResultTableField.result_table")
if "~TdwTable.result_table" in optimized_result:
optimized_result["extra"] = {}
optimized_result["extra"]["tdw"] = optimized_result["~TdwTable.result_table"][0]
optimized_result.pop("~TdwTable.result_table")
if "~StorageResultTable.result_table" in optimized_result:
optimized_result["storages"] = {}
for item in optimized_result["~StorageResultTable.result_table"]:
if "storage_cluster" in item:
if not item.get("active", True):
continue
storage_instance = (
item["storage_cluster"][0]
if isinstance(item["storage_cluster"], list)
else item["storage_cluster"]
)
optimized_result["storages"][storage_instance["cluster_type"]] = item
item["storage_cluster"] = storage_instance
elif "storage_channel" in item:
if not item.get("active", True):
continue
storage_instance = (
item["storage_channel"][0]
if isinstance(item["storage_channel"], list)
else item["storage_channel"]
)
optimized_result["storages"][storage_instance["cluster_type"]] = item
item["storage_channel"] = storage_instance
# if not optimized_result['storages']:
# raise ERPCriterionError(_('erp表达式未到cluster信息。'))
optimized_result.pop("~StorageResultTable.result_table")
@params_valid(serializer=ResultTableUpdateSerializer)
def update(self, request, result_table_id, params):
"""
@api {put} /meta/result_tables/:result_table_id/ 修改结果表信息
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName update_result_table
@apiParam {String} bk_username 用户名
@apiParam {String} [result_table_name_alias] 结果表中文名
@apiParam {String="user", "system"} [generate_type] 生成类型
@apiParam {String="public","private","sensitive"} [sensitivity] 敏感度
@apiParam {Number} [count_freq] 统计频率
@apiParam {String} [description] 结果表描述信息
@apiParam {Object[]} [fields] 结果表字段信息
@apiParam {String='bkdata','tdw'} [platform='bkdata'] RT平台归属。
@apiParam {String} [is_managed==1] 是否被托管
@apiParam {Object[]} extra 其他平台额外信息更新。
@apiParamExample {json} 参数样例:
{
"bk_username": "zhangshan",
"result_table_name_alias ": "xxx",
"generate_type": "system",
"sensitivity": "public",
"count_freq": 60,
"description": "xxx",
"fields": [
{
"id": 11,
"field_index": 1,
"field_name": "timestamp",
"field_alias": "时间",
"description": "",
"field_type": "timestamp",
"is_dimension": 0,
"origins": ""
}
],
"extra": {
"tdw": {
"pri_part_key": "tdbank_imp_date",
"pri_part_type": "range",
"table_comment": "test1",
"usability":"Grab",
"associated_lz_id": {
"import": 211,
"check": 403
},
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"data": "591_test_table",
"result": true,
"message": "",
"code": 1500200,
"errors": null
}
@apiError 1500001 参数校验失败
@apiError 1521020 结果表不存在
@apiError 1521021 结果表字段冲突
"""
try:
result_table = ResultTable.objects.get(result_table_id=result_table_id)
except ResultTable.DoesNotExist:
raise meta_errors.ResultTableNotExistError(message_kv={"result_table_id": result_table_id})
with auto_meta_sync(using="bkdata_basic"):
self.run_update(request, result_table, params)
return Response(result_table_id)
@params_valid(serializer=DestroySerializer)
def destroy(self, request, result_table_id, params):
"""
@api {delete} /meta/result_tables/:result_table_id/ 删除结果表
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName delete_result_table
@apiParam {String} bk_username 用户名
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"data": "591_test_table",
"result": true,
"message": "",
"code": 1500200,
"errors": null
}
@apiError 1521020 结果表不存在
@apiError 1521024 该结果表的数据处理尚未删除
"""
try:
result_table = ResultTable.objects.get(result_table_id=result_table_id)
except ResultTable.DoesNotExist:
raise meta_errors.ResultTableNotExistError(message_kv={"result_table_id": result_table_id})
# 删除结果表前必须先删除关联的数据处理
relations = DataProcessingRelation.objects.filter(data_set_type="result_table", data_set_id=result_table_id)
if relations.count() > 0:
raise meta_errors.ResultTableHasRelationError()
# 把待删除的结果表信息组装后写入result_table_del表中,并从result_table和result_table_field中删除记录
with auto_meta_sync(using="bkdata_basic"):
self.run_destroy(request, result_table)
return Response(result_table_id)
@action(detail=True, methods=["get"], url_path="geog_area")
def get_geog_area(self, request, result_table_id):
"""
@api {get} /meta/result_tables/:result_table_id/geog_area/ 获取rt所属地域
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName get_geog_area_result_table
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"code": "inland"
}
]
}
@apiError 1521020 结果表不存在
"""
if not str(result_table_id).strip():
raise meta_errors.ResultTableNotExistError(message_kv={"result_table_id": result_table_id})
return Response(parseresult.get_result_table_geog_area("'" + parseresult.escape_string(result_table_id) + "'"))
@action(detail=True, methods=["get"], url_path="fields")
def get_fields(self, request, result_table_id):
"""
@api {get} /meta/result_tables/:result_table_id/fields/ 获取结果表字段信息
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName get_fields_result_table
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"id": 13730,
"field_index": 1,
"field_name": "timestamp",
"field_alias": "时间",
"description": "",
"field_type": "timestamp",
"is_dimension": 0,
"origins": "",
"created_by":"admin",
"created_at": null,
"updated_by": null,
"updated_at": null
}
]
}
@apiError 1521020 结果表不存在
"""
if not str(result_table_id).strip():
raise meta_errors.ResultTableNotExistError(message_kv={"result_table_id": result_table_id})
return Response(parseresult.get_result_table_fields_v3("'" + parseresult.escape_string(result_table_id) + "'"))
@action(detail=True, methods=["get"], url_path="storages")
def get_storages(self, request, result_table_id):
"""
@api {get} /meta/result_tables/:result_table_id/storages/ 获取结果表的存储信息
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName get_storages_result_table
@apiParam {String} [cluster_type] 存储类型<如Druid/TSDB>[可选]
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"tspider": {
"id": 2,
"storage_cluster": {
"storage_cluster_config_id": 1,
"cluster_name": "xxx",
"cluster_type": "tspider",
"cluster_domain": "xxx",
"cluster_group": "xxx",
"connection_info": "{}",
"priority": 234,
"version": "23423",
"belongs_to": "bkdata"
},
"physical_table_name": "xxx",
"expires": "xxx",
"storage_config": "xxx",
"priority": 1,
"generate_type": "user",
"description": "xxx",
"created_by":"admin",
"created_at": null,
"updated_by": null,
"updated_at": null
},
"kafka": {
"id": 1,
"storage_cluster": {},
"storage_channel": {
"channel_cluster_config_id": 1,
"cluster_name": "xxx",
"cluster_type": "kafka",
"cluster_role": "inner",
"cluster_domain": "xxx",
"cluster_backup_ips": "xxx",
"cluster_port": 2432,
"zk_domain": "127.0.0.1",
"zk_port": 3481,
"zk_root_path": "/abc/defg",
"priority": 234,
"attribute": "bkdata",
"description": "sdfdsf"
},
"physical_table_name": "xxx",
"expires": "xxx",
"storage_config": "xxx",
"priority": 1,
"generate_type": "user",
"description": "xxx",
"created_by":"admin",
"created_at": null,
"updated_by": null,
"updated_at": null
}
}
}
@apiError 1521020 结果表不存在
"""
cluster_type = request.query_params.get("cluster_type")
query_result_dict = parseresult.get_result_table_storages_v3(
"'" + parseresult.escape_string(result_table_id) + "'", cluster_type=cluster_type
)
storages_result_dict = query_result_dict.get(result_table_id, {})
return Response(storages_result_dict)
@action(detail=True, methods=["get"], url_path="extra")
def get_extra(self, request, result_table_id):
"""
@api {get} /meta/result_tables/:result_table_id/extra 获取单个结果表实例的特有信息
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName retrieve_result_table_extra
@apiParam {String[]} [tdw_related] 需要返回的tdw关联信息,取值[cols_info,parts_info]
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
"tdw": {
"cluster_id": "lz",
"db_name": "ieg_tdbank",
"table_name": "bkdata_test_dsl_test1_fdt0",
"pri_part_key": "tdbank_imp_date",
"pri_part_type": "range",
"sub_part_key": null,
"sub_part_type": null,
"created_by ": "root",
"created_at": "2019-03-21 10:00:03.143735",
"updated_by": "user_by_blueking",
"updated_at": "2019-03-21 10:00:03.143735",
"table_comment": "test1",
"usability":"OK",
"associated_lz_id": {
"import": 211,
"check": 403
},
"parts_info": [
{
"level": 0,
"part_name": "p_2019041104",
"part_values": [
"2019041105"
]
},
{
"level": 0,
"part_name": "p_2019041106",
"part_values": [
"2019041107"
]
}
],
"cols_info":[ {
"col_name": "tdbank_imp_date",
"col_type": "string",
"col_coment": "partition fields"
},
{
"col_name": "dteventtimestamp",
"col_type": "string",
"col_coment": "dtEventTimeStamp"
},
{
"col_name": "idx",
"col_type": "string",
"col_coment": "idx"
}]
}
"""
tdw_related = request.query_params.getlist("tdw_related")
return_result = {}
if settings.ENABLED_TDW:
from meta.extend.tencent.tdw.models import TdwTable
from meta.extend.tencent.tdw.support import TDWRTSupport
tables = TdwTable.objects.filter(result_table_id=result_table_id).only("table_id").all()
if tables:
return_result = {"tdw": TDWRTSupport.tdw_retrieve(request, tables[0], tdw_related)}
return Response(return_result)
@action(detail=True, methods=["get"], url_path="lineage")
def get_lineage(self, request, result_table_id):
"""
@api {get} /meta/result_tables/:result_table_id/lineage/ 获取结果表的血缘信息
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName get_lineage_result_table
@apiParam {Number} [depth] 数字,默认3[可选]
@apiParam {String} [direction] BOTH or INPUT or OUTPUT, 默认BOTH[可选]
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"direction": "BOTH",
"depth": 2,
"nodes": {
"result_table_1":{},
"result_table_2":{},
"result_table_3":{},
"result_table_4":{},
"data_processing_1":{},
"data_processing_2":{}
},
"relations": [
{
"from": "result_table_1",
"to": "data_processing_1"
},
{
"from": "data_processing_1",
"to": "result_table_2"
},
{
"from": "result_table_2",
"to": "data_processing_2"
},
{
"from": "result_table_4",
"to": "data_processing_2"
},
{
"from": "data_processing_2",
"to": "result_table_3"
}
]
}
}
@apiError 1521020 结果表不存在
"""
depth = request.query_params.get("depth")
direction = request.query_params.get("direction")
backend_type = request.query_params.get("backend_type")
params = {"type_name": "ResultTable", "qualified_name": result_table_id}
if depth:
params["depth"] = depth
if direction:
params["direction"] = direction
if backend_type:
params["backend_type"] = backend_type
res_lineage = parseresult.get_result_table_lineage_info(params)
return Response(res_lineage)
@action(detail=True, methods=["post", "put", "patch"], url_path="override")
@params_valid(serializer=ResultTableOverrideSerializer)
def override_result_table(self, request, result_table_id, params):
"""
@api {post/put/patch} /meta/result_tables/:result_table_id/override/ 用新结果表替代旧结果表(用于修改结果表ID的场景)
@apiVersion 0.2.0
@apiGroup ResultTable
@apiName override_result_table
@apiParam {String} bk_username 用户名
@apiParam {String} result_table_id 结果表ID
@apiParam {Number} bk_biz_id 业务ID
@apiParam {String} result_table_name 结果表名称
@apiParam {String} [result_table_name_alias] 结果表中文名
@apiParam {String} [result_table_type] 结果表类型
@apiParam {String="user", "system"} [generate_type] 生成类型
@apiParam {String="public","private","sensitive"} [sensitivity] 敏感度
@apiParam {Number} [count_freq] 统计频率
@apiParam {string} [description] 结果表描述信息
@apiParam {Object[]} [fields] 结果表字段信息
@apiParam {String[]} tags | |
<filename>utils/utils.py<gh_stars>0
"utility methods for generating movies from learners"
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
import shutil
from skimage.filters import gaussian
from skimage.io import imsave
import PIL
import imageio
from scipy.ndimage.interpolation import zoom as npzoom
from .czi import get_czi_shape_info, build_index, is_movie
import czifile
import PIL
import numpy as np
from fastprogress import progress_bar
from pathlib import Path
import torch
import math
from .multi import MultiImage
from time import sleep
import shutil
from skimage.util import random_noise
from skimage import filters
from torchvision.models import vgg16_bn
__all__ = ['generate_movies', 'generate_tifs', 'ensure_folder', 'subfolders',
'build_tile_info', 'generate_tiles', 'unet_image_from_tiles_blend',
'get_xy_transforms', 'get_feat_loss', 'unet_image_from_tiles_partialsave',
'draw_random_tile', 'img_to_float', 'img_to_uint8']
def gram_matrix(x):
n,c,h,w = x.size()
x = x.view(n, c, -1)
return (x @ x.transpose(1,2))/(c*h*w)
class FeatureLoss(nn.Module):
def __init__(self, m_feat, layer_ids, layer_wgts, base_loss=F.l1_loss):
super().__init__()
self.base_loss = base_loss
self.m_feat = m_feat
self.loss_features = [self.m_feat[i] for i in layer_ids]
self.hooks = hook_outputs(self.loss_features, detach=False)
self.wgts = layer_wgts
self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids))
] + [f'gram_{i}' for i in range(len(layer_ids))]
def make_features(self, x, clone=False):
self.m_feat(x)
return [(o.clone() if clone else o) for o in self.hooks.stored]
def forward(self, input, target):
feat_input = input.repeat(1,3,1,1)
feat_target = target.repeat(1,3,1,1)
base_loss = self.base_loss
out_feat = self.make_features(feat_target, clone=True)
in_feat = self.make_features(feat_input)
self.feat_losses = [base_loss(input,target)]
self.feat_losses += [base_loss(f_in, f_out)*w
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.metrics = dict(zip(self.metric_names, self.feat_losses))
return sum(self.feat_losses)
def __del__(self): self.hooks.remove()
def get_feat_loss():
vgg_m = vgg16_bn(True).features.cuda().eval()
requires_grad(vgg_m, False)
blocks = [i-1 for i,o in enumerate(children(vgg_m)) if isinstance(o,nn.MaxPool2d)]
feat_loss = FeatureLoss(vgg_m, blocks[2:5], [5,15,2])
return feat_loss
def _down_up(x, scale=4, upsample=False, mode='bilinear'):
set_trace()
x = F.interpolate(x[None], scale_factor=1/scale)[0]
if upsample:
x = F.interpolate(x[None], scale_factor=scale, mode=mode)[0]
print('du shpe:', x.shape)
return x
down_up = TfmPixel(_down_up)
def _my_noise_old(x, gauss_sigma:uniform=0.01, pscale:uniform=10):
#print('noise')
#set_trace()
xn = x.numpy()
xorig_max = xn.max()
xn = np.random.poisson(xn*pscale)/pscale
xn += np.random.normal(0, gauss_sigma*xn.std(), size=x.shape)
xn = np.maximum(0,xn)
new_max = xn.max()
if new_max > 0:
xn /= new_max
xn *= xorig_max
x = x.new(xn)
return x
def _my_noise(x, gauss_sigma:uniform=0.01, pscale:uniform=10):
xn = x.numpy()
xorig_max = xn.max()
xn = random_noise(xn, mode='salt', amount=0.005)
xn = random_noise(xn, mode='pepper', amount=0.005)
lvar = filters.gaussian(x, sigma=5) + 1e-10
xn = random_noise(xn, mode='localvar', local_vars=lvar*0.5)
#xn = np.random.poisson(xn*pscale)/pscale
#xn += np.random.normal(0, gauss_sigma*xn.std(), size=x.shape)
x = x.new(xn)
new_max = xn.max()
if new_max > 0:
xn /= new_max
xn *= xorig_max
return x
my_noise = TfmPixel(_my_noise)
def get_xy_transforms(max_rotate=10., min_zoom=1., max_zoom=2., use_cutout=False, use_noise=False, xtra_tfms = None,
gauss_sigma=(0.01,0.05), pscale=(5,30)):
base_tfms = [[
rand_crop(),
dihedral_affine(),
rotate(degrees=(-max_rotate,max_rotate)),
rand_zoom(min_zoom, max_zoom)
],
[crop_pad()]]
y_tfms = [[tfm for tfm in base_tfms[0]], [tfm for tfm in base_tfms[1]]]
x_tfms = [[tfm for tfm in base_tfms[0]], [tfm for tfm in base_tfms[1]]]
if use_cutout: x_tfms[0].append(cutout(n_holes=(5,10)))
if use_noise:
x_tfms[0].append(my_noise(gauss_sigma=gauss_sigma, pscale=pscale))
#x_tfms[1].append(my_noise(gauss_sigma=(0.01,0.05),pscale=(5,30)))
if xtra_tfms:
for tfm in xtra_tfms:
x_tfms[0].append(tfm)
return x_tfms, y_tfms
def make_mask(shape, overlap, top=True, left=True, right=True, bottom=True):
mask = np.full(shape, 1.)
if overlap > 0:
h,w = shape
for i in range(min(shape[0], shape[0])):
for j in range(shape[1]):
if top: mask[i,j] = min((i+1)/overlap, mask[i,j])
if bottom: mask[h-i-1,j] = min((i+1)/overlap, mask[h-i-1,j])
if left: mask[i,j] = min((j+1)/overlap, mask[i,j])
if right: mask[i,w-j-1] = min((j+1)/overlap, mask[i,w-j-1])
return mask.astype(np.uint8)
def unet_image_from_tiles_partialsave(learn, in_img, tile_sz=(256, 256), scale=(4, 4), overlap_pct=(0.50, 0.50), img_info=None):
"""
This function run inference on a trained model and removes tiling artifacts.
Input:
- learn: learner
- in_img: input image (2d/3d), floating array
- tile_sz: XY dimension of the small tile that will be fed into GPU [p q]
- scale: upsampling scale
- overlap_pct: overlap percent while cropping the tiles in xy dimension [alpha beta],
floating tuple, ranging from 0 to 1
- img_info: mi, ma, max
Output:
- predicted image (2d), ranging from 0 to 1
"""
n_frames = in_img.shape[0]
if img_info:
mi, ma, imax = [img_info[fld] for fld in ['mi','ma','img_max']]
in_img = ((in_img - mi) / (ma - mi + 1e-20)).clip(0.,1.)
else:
mi, ma = 0., 1.
in_img = np.stack([npzoom(in_img[i], scale, order=1) for i in range(n_frames)])
Y, X = in_img.shape[1:3]
p, q = tile_sz[0:2]
alpha, beta = overlap_pct[0:2]
print('Y,X=',Y,X)
assembled = np.zeros((X,Y))
# X = p + (m - 1) * (1 - alpha) * p + epsilonX
numX, epsX = divmod(X-p, p-int(p*alpha)) if X-p > 0 else (0, X)
numY, epsY = divmod(Y-q, q-int(q*beta)) if Y-q > 0 else (0, Y)
numX = int(numX)+1
numY = int(numY)+1
for i in range(numX+1):
for j in range(numY+1):
crop_x_start = int(i*(1-alpha)*p)
crop_x_end = min(crop_x_start+p, X)
crop_y_start = int(j*(1-beta)*q)
crop_y_end = min(crop_y_start+q, Y)
src_tile = in_img[:, crop_y_start:crop_y_end, crop_x_start:crop_x_end]
in_tile = torch.zeros((p, q, n_frames))
in_x_size = crop_x_end - crop_x_start
in_y_size = crop_y_end - crop_y_start
if (in_y_size, in_x_size) != src_tile.shape[1:3]: set_trace()
in_tile[0:in_y_size, 0:in_x_size, :] = tensor(src_tile).permute(1,2,0)
if n_frames > 1:
img_in = MultiImage([Image(in_tile[:,:,i][None]) for i in range(n_frames)])
else:
img_in = Image(in_tile[:,:,0][None])
y, pred, raw_pred = learn.predict(img_in)
out_tile = pred.numpy()[0]
tileROI_x_start = int(0.5*int(alpha*p)) if crop_x_start != 0 else 0
tileROI_x_end = int(p-0.5*int(alpha*p)) if crop_x_end != X else int(alpha*p+epsX)
tileROI_y_start = int(0.5*int(beta*q)) if crop_y_start != 0 else 0
tileROI_y_end = int(q-0.5*int(beta*q)) if crop_y_end != Y else int(beta*q+epsY)
tileROI_x_end = X if X-q < 0 else tileROI_x_end
tileROI_y_end = Y if Y-p < 0 else tileROI_y_end
out_x_start = int(p-0.5*int(alpha*p)+(i-1)*(p-int(alpha*p))) if crop_x_start != 0 else 0
out_x_end = int(p-0.5*int(alpha*p)+i*(p-int(alpha*p))) if crop_x_end != X else X
out_y_start = int(q-0.5*int(beta*q)+(j-1)*(q-int(beta*q))) if crop_y_start != 0 else 0
out_y_end = int(q-0.5*int(beta*q)+j*(q-int(beta*q))) if crop_y_end != Y else Y
assembled[out_y_start:out_y_end, out_x_start:out_x_end] = out_tile[tileROI_y_start:tileROI_y_end, tileROI_x_start:tileROI_x_end]
assembled -= assembled.min()
assembled /= assembled.max()
assembled *= (ma - mi)
assembled += mi
return assembled.astype(np.float32)
def unet_multi_image_from_tiles(learn, in_img, tile_sz=128, scale=4, wsize=3):
cur_size = in_img.shape[1:3]
c = in_img.shape[0]
new_size = (cur_size[0] * scale, cur_size[1] * scale)
w, h = cur_size
in_tile = torch.zeros((c, tile_sz // scale, tile_sz // scale))
out_img = torch.zeros((1, w * scale, h * scale))
tile_sz //= scale
for x_tile in range(math.ceil(w / tile_sz)):
for y_tile in range(math.ceil(h / tile_sz)):
x_start = x_tile
x_start = x_tile * tile_sz
x_end = min(x_start + tile_sz, w)
y_start = y_tile * tile_sz
y_end = min(y_start + tile_sz, h)
in_tile[:, 0:(x_end - x_start), 0:(y_end - y_start)] = tensor(
in_img[:, x_start:x_end, y_start:y_end])
img_list = [
Image(tensor(npzoom(in_tile[i], scale, order=1))[None])
for i in range(wsize)
]
#img_list += img_list
tlist = MultiImage(img_list)
out_tile, _, _ = learn.predict(tlist)
out_x_start = x_start * scale
out_x_end = x_end * scale
out_y_start = y_start * scale
out_y_end = y_end * scale
#print("out: ", out_x_start, out_y_start, ",", out_x_end, out_y_end)
in_x_start = 0
in_y_start = 0
in_x_end = (x_end - x_start) * scale
in_y_end = (y_end - y_start) * scale
#print("tile: ",in_x_start, in_y_start, ",", in_x_end, in_y_end)
out_img[:, out_x_start:out_x_end, out_y_start:
out_y_end] = out_tile.data[:, in_x_start:in_x_end,
in_y_start:in_y_end]
return out_img
# take float in with info about mi,ma,max in and spits out (0-1.0)
def unet_image_from_tiles_blend(learn, in_img, tile_sz=256, scale=4, overlap_pct=5.0, img_info=None):
n_frames = in_img.shape[0]
if img_info:
mi, ma, imax, real_max = [img_info[fld] for fld in ['mi','ma','img_max','real_max']]
in_img /= real_max
# in_img = ((in_img - mi) / (ma - mi + 1e-20)).clip(0.,1.)
else:
mi, ma, imax, real_max = 0., 1., 1., 1.
in_img = np.stack([npzoom(in_img[i], scale, order=1) for i in range(n_frames)])
overlap = int(tile_sz*(overlap_pct/100.) // 2 * 2)
step_sz = tile_sz - overlap
h,w = in_img.shape[1:3]
assembled = np.zeros((h,w))
x_seams = set()
y_seams = set()
for x_tile in range(0,math.ceil(w/step_sz)):
for y_tile in range(0,math.ceil(h/step_sz)):
x_start = x_tile*step_sz
x_end = min(x_start + tile_sz, w)
y_start = y_tile*step_sz
y_end = min(y_start + tile_sz, h)
src_tile = in_img[:,y_start:y_end,x_start:x_end]
in_tile = torch.zeros((tile_sz, tile_sz, n_frames))
in_x_size = x_end - x_start
in_y_size = y_end - y_start
if (in_y_size, in_x_size) != src_tile.shape[1:3]: set_trace()
in_tile[0:in_y_size, 0:in_x_size, :] = tensor(src_tile).permute(1,2,0)
if n_frames > 1:
img_in = MultiImage([Image(in_tile[:,:,i][None]) for i in range(n_frames)])
else:
img_in = Image(in_tile[:,:,0][None])
y, pred, raw_pred = learn.predict(img_in)
out_tile = pred.numpy()[0]
half_overlap = overlap // 2
left_adj = half_overlap if x_start != 0 else 0
right_adj = half_overlap if x_end != w else 0
top_adj = half_overlap if y_start != 0 else 0
bot_adj = half_overlap if y_end != h else 0
trim_y_start = y_start + top_adj
trim_x_start = x_start + left_adj
trim_y_end = y_end - bot_adj
trim_x_end = x_end - right_adj
out_x_start = left_adj
out_y_start = top_adj
out_x_end = in_x_size - right_adj
out_y_end = in_y_size - bot_adj
assembled[trim_y_start:trim_y_end, trim_x_start:trim_x_end] = out_tile[out_y_start:out_y_end, out_x_start:out_x_end]
if trim_x_start != | |
<reponame>zlbupt/tablereport
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from openpyxl import Workbook
from tablereport import *
from tablereport.shortcut import write_to_excel
def test_table_initialize():
table = Table(
header=[['test', None, None], ['header1', 'header2', 'header3']],
body=[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
)
assert table.width == 3
assert table.height == 5
assert list(table.header) == [['test', None, None],
['header1', 'header2', 'header3']]
assert list(table.body) == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
def test_initialize_table_with_empty_headers_and_empty_body():
table = Table(header=[], body=[])
assert table.width == 0
assert table.height == 0
def test_column_selector_select_right_area_of_area():
table = Table(body=[[1, 2, 3, ], [4, 5, 6], [7, 8, 9]])
area = Area(table, 3, 3, (0, 0))
sub_area = area.select(ColumnSelector(lambda col: col == 2)).one()
assert sub_area.height == 3
assert sub_area.width == 1
assert sub_area.position == (0, 1)
def test_column_selector_select_right_area_of_area_2():
table = Table(body=[[1, 2, 3, ], [4, 5, 6], [7, 8, 9]])
area = Area(table, 3, 3, (0, 0))
areas = area.select(ColumnSelector(lambda col: col % 2))
assert len(areas) == 2
assert areas[0].position == (0, 0)
assert areas[1].position == (0, 2)
def test_column_selector_select_right_area_of_area_3():
table = Table(body=[[1, 2, 3, ], [4, 5, 6], [7, 8, 9]])
area = Area(table, 3, 3, (0, 0))
sub_area = area.select(ColumnSelector(lambda col: col == 2, width=2)).one()
assert sub_area.height == 3
assert sub_area.width == 2
assert sub_area.position == (0, 1)
def test_column_selector_select_right_area_of_table():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
area = table.body.select(ColumnSelector(lambda col: col == 2)).one()
assert area.height == 3
assert area.width == 1
assert area.position == (1, 1)
def test_row_selector_select_right_area_of_area():
table = Table(body=[[1, 2, 3, ], [4, 5, 6], [7, 8, 9]])
area = Area(table, 3, 3, (0, 0))
sub_area = area.select(RowSelector(lambda row: row == 2)).one()
assert sub_area.height == 1
assert sub_area.width == 3
assert sub_area.position == (1, 0)
def test_row_selector_select_right_area_of_area_2():
table = Table(body=[[1, 2, 3, ], [4, 5, 6], [7, 8, 9]])
area = Area(table, 3, 3, (0, 0))
areas = area.select(RowSelector(lambda row: row % 2))
assert len(areas) == 2
assert areas[0].position == (0, 0)
assert areas[1].position == (2, 0)
def test_row_selector_select_right_area_of_table():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
area = table.body.select(RowSelector(lambda row: row == 2)).one()
assert area.height == 1
assert area.width == 3
assert area.position == (2, 0)
def test_values_in_area_of_table_selected_by_column_selector():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
area = table.body.select(ColumnSelector(lambda col: col == 2)).one()
assert area.data == [[2], [5], [8]]
def test_column_selector_could_group_selected_areas():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4]])
areas = table.body.select(ColumnSelector(lambda col: col == 1))
areas = areas.group()
assert len(areas) == 2
assert areas[0].width == 1
assert areas[0].height == 3
assert areas[0].position == (1, 0)
assert areas[1].width == 1
assert areas[1].height == 1
assert areas[1].position == (4, 0)
def test_modify_area_is_equivalent_to_modify_table():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
area = table.body.select(ColumnSelector(lambda col: col == 2)).one()
# area.data == [[2], [5], [8]]
area.data[0][0] = 3
area.data[1][0] = 6
area.data[2][0] = 9
assert area.data == [[3], [6], [9]]
def test_areas_is_list_like_object():
areas = Areas()
areas.append(1)
areas.append(2)
assert len(areas) == 2
assert areas[0] == 1
assert areas[1] == 2
def test_get_area_data():
area = Table(body=[[1, 2, 3, ], [4, 5, 6], [7, 8, 9]])
area = area.select(ColumnSelector(lambda col: col == 2)).one()
assert area.data[0][0] == 2
assert area.data[1][0] == 5
assert area.data[2][0] == 8
assert area.data == [[2], [5], [8]]
def test_set_area_data():
area = Table(body=[[1, 2, 3, ], [4, 5, 6], [7, 8, 9]])
sub_area = area.select(ColumnSelector(lambda col: col == 2)).one()
sub_area.data[0][0] = 1
sub_area.data[1][0] = 2
sub_area.data[2][0] = 3
assert sub_area.data[0][0] == 1
assert sub_area.data[1][0] == 2
assert sub_area.data[2][0] == 3
assert sub_area.data == [[1], [2], [3]]
def test_merge_areas_1():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4]])
areas = table.body.select(ColumnSelector(lambda col: col == 1))
areas.group().merge()
assert table.data == [['header1', 'header2', 'header3'],
[1, 2, 3], [None, 2, 4], [None, 3, 5], [2, 3, 4]]
def test_merge_areas_2():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5]])
areas = table.body.select(ColumnSelector(lambda col: col == 1))
areas.group().merge()
assert table.data == [['header1', 'header2', 'header3'],
[1, 2, 3], [None, 2, 4], [None, 3, 5], [2, 3, 4],
[None, 4, 5]]
def test_add_summary_at_left_side_will_modify_table():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5]])
column = table.body.select(ColumnSelector(lambda col: col == 1)).one()
column.group().merge().left.summary(label_span=1, label='total')
assert table.data == [['header1', 'header2', 'header3'],
[1, 2, 3], [None, 2, 4], [None, 3, 5],
[None, 'total', 12],
[2, 3, 4], [None, 4, 5], [None, 'total', 9]]
def test_add_summary_below_will_modify_table():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5]])
column = table.body.select(ColumnSelector(lambda col: col == 1)).one()
column.group().merge().summary(label_span=2, label='total')
assert table.data == [['header1', 'header2', 'header3'],
[1, 2, 3], [None, 2, 4], [None, 3, 5],
['total', None, 12],
[2, 3, 4], [None, 4, 5], ['total', None, 9]]
def test_add_summary_below_will_modify_areas():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5]])
areas = table.body.select(ColumnSelector(lambda col: col == 1))
areas = areas.group().merge()
areas.summary(label_span=2, label='total')
area1 = areas[0]
assert area1.width == 1
assert area1.height == 4
assert area1.position == (1, 0)
area2 = areas[1]
assert area2.width == 1
assert area2.height == 3
assert area2.position == (5, 0)
def test_add_summary_only_below_entire_table_will_modify_area():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5]])
area = Area(table, 3, 5, (1, 0))
area.summary(label_span=2, label='total')
assert area.width == 3
assert area.height == 6
assert area.position == (1, 0)
def test_add_summary_only_below_entire_table_will_modify_table():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5]])
area = Area(table, 3, 5, (1, 0))
area.summary(label_span=2, label='total')
assert area.data == [[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5],
['total', None, 21]]
def test_add_nested_summary_will_modify_table():
table = Table(header=[['header1', 'header2', 'header3']],
body=[[1, 2, 3], [1, 2, 4], [1, 3, 5], [2, 3, 4], [2, 4, 5]])
areas = table.body.select(ColumnSelector(lambda col: col == 1))
areas.group().merge().left.summary(label_span=1, label='total')
area = Area(table, 3, 7, (1, 0))
area.summary(label_span=2, label='total')
assert table.data == [['header1', 'header2', 'header3'],
[1, 2, 3], [None, 2, 4], [None, 3, 5],
[None, 'total', 12],
[2, 3, 4], [None, 4, 5], [None, 'total', 9],
['total', None, 21]]
assert table.width == 3
assert table.height == 9
def test_each_elem_in_table_is_encapsulated_as_cell():
table = Table(body=[[1, 2, ], [4, 5, ]])
assert table.data[0][0] == Cell(1)
assert table.data[0][1] == Cell(2)
assert table.data[1][0] == Cell(4)
assert table.data[1][1] == Cell(5)
def test_set_cell_width_when_initialize_table():
table = Table(header=[['test', None], ['header1', 'header2']],
body=[[1, 2], ])
assert list(table) == [[Cell('test', width=2), None],
[Cell('header1'), Cell('header2')],
[Cell(1), Cell(2)]]
def test_iter_table_will_get_cell_list():
table = Table(body=[[1, 2, ], [4, 5, ]])
cells = [[Cell(1), Cell(2)], [Cell(4), Cell(5)]]
assert cells == list(table)
def test_merge_area_will_modify_cell():
table = Table(header=[['header1', 'header2']],
body=[[1, 2], [1, 3], [2, 3]])
areas = table.body.select(ColumnSelector(lambda col: col == 1))
areas.group().merge()
cells = [[Cell('header1'), Cell('header2')], [Cell(1, height=2), Cell(2)],
[None, Cell(3)],
[Cell(2), Cell(3)]]
assert cells == list(table)
def test_merge_areas_of_three_columns():
table = Table(header=[['header1', 'header2', 'header3', 'header4']],
body=[[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 3, 6]])
areas = table.body.select(ColumnSelector(lambda col: col <= 3))
areas.group().merge()
cells = [
[Cell('header1'), Cell('header2'), Cell('header3'), Cell('header4')],
[Cell(1, height=3), Cell(2, height=3), Cell(3, height=3), Cell(4)],
[None, None, None, Cell(5)],
[None, None, None, Cell(6)]]
assert cells == list(table)
def test_merge_areas_of_three_columns_2():
table = Table(header=[['header1', 'header2', 'header3', 'header4']],
body=[[1, 2, 3, 5], [1, 2, 3, 9], [1, 2, 33, 6],
[1, 2, 33, 1],
[1, 22, 3, 2], [1, 22, 3, 4], [1, 22, 33, 3],
[1, 22, 33, 2]])
areas = table.body.select(ColumnSelector(lambda col: col <= 3))
areas.group().merge()
cells = [
[Cell('header1'), Cell('header2'), Cell('header3'), Cell('header4')],
[Cell(1, height=8), Cell(2, height=4), Cell(3, height=2), Cell(5)],
[None, None, None, Cell(9)],
[None, None, Cell(33, height=2), Cell(6)],
[None, None, None, Cell(1)],
[None, Cell(22, height=4), Cell(3, height=2), Cell(2)],
[None, None, None, Cell(4)],
[None, None, Cell(33, height=2), Cell(3)],
[None, None, None, Cell(2)]]
assert cells == list(table)
def test_merge_areas_of_three_columns_without_headers():
table = Table(header=[],
| |
return input
none_mod = torch.jit.script(Mod(None))
double_mod = torch.jit.script(Mod(Double()))
self.assertEqual(none_mod(torch.tensor(1)), torch.tensor(1))
self.assertEqual(double_mod(torch.tensor(1)), torch.tensor(1) * 2)
def test_device_kwarg(self):
from torch import device
def f():
return device(type='cuda'), torch.device(type='cpu')
self.checkScript(f, ())
def test_script_module_export_tensor_type(self):
class M(torch.jit.ScriptModule):
def __init__(self, type):
super(M, self).__init__()
self.param = torch.nn.Parameter(torch.zeros((5, 5), dtype=type).random_())
@torch.jit.script_method
def foo(self):
return self.param
with torch.jit.optimized_execution(False):
for type in [torch.float, torch.double]:
m_orig = M(type)
m_import = self.getExportImportCopy(m_orig)
# check to make sure the storage wasn't resized
self.assertTrue(m_orig.param.storage().size() == 25)
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_orig.foo().dtype == m_import.foo().dtype)
@unittest.skipIf(not RUN_CUDA, "testing cuda tensors require CUDA")
def test_script_module_export_tensor_cuda(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.param = torch.nn.Parameter(torch.zeros((5, 5), device='cuda:0').random_())
@torch.jit.script_method
def foo(self):
return self.param
m_orig = M()
m_import = self.getExportImportCopy(m_orig)
# check to make sure the storage wasn't resized
self.assertTrue(m_orig.param.storage().size() == 25)
self.assertTrue(m_import.foo().device == torch.device('cuda:0'))
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_orig.foo().dtype == m_import.foo().dtype)
def test_script_module_export_blocks(self):
class M(torch.jit.ScriptModule):
def __init__(self, n, m):
super(M, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(n, m))
@torch.jit.script_method
def forward(self, input):
if bool(input.sum() > 0):
output = self.weight.mv(input)
else:
output = self.weight + input
return output
m_orig = M(200, 200)
m_import = self.getExportImportCopy(m_orig)
t = torch.rand(200)
self.assertEqual(m_orig(t), m_import(t))
def test_script_module_export_shared_storage(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.param1 = torch.nn.Parameter(torch.rand(5, 5))
self.param2 = torch.nn.Parameter(self.param1[3])
self.param3 = torch.nn.Parameter(torch.rand(5, 5))
self.param4 = torch.nn.Parameter(torch.rand(11, 5)[1:6])
@torch.jit.script_method
def foo(self):
return self.param1 + self.param2 + self.param3 + self.param4
with torch.jit.optimized_execution(False):
m_orig = M()
m_import = self.getExportImportCopy(m_orig)
self.assertEqual(m_orig.foo(), m_import.foo())
self.assertTrue(m_import.param1.storage().data_ptr() == m_import.param2.storage().data_ptr())
self.assertTrue(m_import.param1.storage().data_ptr() != m_import.param3.storage().data_ptr())
def test_sequential_intermediary_types(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self, x):
return x + 3
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
def forward(self, x):
return {"1": x}
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.foo = torch.nn.Sequential(A(), B())
def forward(self, x):
return self.foo(x)
self.checkModule(C(), (torch.tensor(1),))
def test_ellipsis_const_mid(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, Ellipsis, 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_mid_select(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, Ellipsis, 4, 4, 4:8, 2].size()
dummy = torch.zeros(8, 8, 8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_start(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[Ellipsis, 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_const_end(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[0:4, 2, Ellipsis].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_mid(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, ..., 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_mid_select(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[2, ..., 4, 4, 4:8, 2].size()
dummy = torch.zeros(8, 8, 8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_start(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[..., 0:4, 4:8].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_ellipsis_end(self):
def ellipsize(x):
# type: (Tensor) -> List[int]
return x[0:4, 2, ...].size()
dummy = torch.zeros(8, 8, 8, 8, 8)
self.checkScript(ellipsize, (dummy,), optimize=True)
def test_torch_manual_seed(self):
with freeze_rng_state():
def test():
torch.manual_seed(2)
return torch.rand(1)
script = torch.jit.script(test)
self.assertEqual(test(), script())
graph = script.graph_for()
FileCheck().check("aten::manual_seed").run(graph)
def test_index_select_shape_prop(self):
@torch.jit.script
def foo(x, y):
return torch.index_select(x, index=y, dim=1)
a = torch.zeros(2, 2)
b = torch.zeros(4, dtype=torch.long)
torch._C._jit_pass_complete_shape_analysis(foo.graph, (a, b), False)
FileCheck().check("Double(2, 4, strides=[4, 1], requires_grad=0, device=cpu)").run(str(foo.graph))
def test_shape_analysis_loop(self):
def foo(a, b, x):
c = a
# on the first iteration of the loop it appears that
# c should have a expand to the size of b
# but on the second+ iterations, there is no broadcast and the
# sizes are different.
# previously this would cause the compiler to (1) enter an infinite
# loop trying to compute the shape, and (2) insert invalid
# broadcasts.
# this test ensure we don't regress on these issues
for _ in range(2):
a = c + b
c = x
b = x
return a
self.checkScript(foo, (torch.zeros(1), torch.zeros(4), torch.zeros(5)), optimize=False)
def test_intlist_args(self):
def func_1(x):
return torch.nn.functional.adaptive_avg_pool1d(x, 1)
def func_2(x):
return torch.nn.functional.adaptive_avg_pool1d(x, output_size=1)
def func_3(x):
return torch.nn.functional.adaptive_avg_pool1d(x, output_size=[1])
x = torch.randn(8, 8, 8)
self.checkScript(func_1, [x], optimize=True)
self.checkScript(func_2, [x], optimize=True)
self.checkScript(func_3, [x], optimize=True)
def test_wrong_implicit_expand(self):
@_trace(torch.zeros(3), torch.zeros(1))
def foo(a, b):
return a + b
a = torch.rand(4)
b = torch.rand(4)
self.assertEqual(a + b, foo(a, b))
def test_builtin_args_fails(self):
with self.assertRaisesRegex(RuntimeError, 'Argument self not provided'):
@torch.jit.script
def f1(a):
torch.sum(foo=4)
with self.assertRaisesRegex(RuntimeError, 'specified twice'):
@torch.jit.script
def f2(a):
torch.sum(a, self=a)
with self.assertRaisesRegex(RuntimeError, 'not provided'):
@torch.jit.script
def f3(a):
torch.sum(dim=4)
with self.assertRaisesRegex(RuntimeError, 'for argument \'tensors\' but instead found type \'Tensor'):
@torch.jit.script
def f4(a):
torch.cat(a)
with self.assertRaisesRegex(RuntimeError, r'argument \'tensors\' but instead found type \'List\[int\]'):
@torch.jit.script
def f5(a):
torch.cat([3])
with self.assertRaisesRegex(RuntimeError, r'Expected a value of'
r' type \'List\[int\]\' for argument'
r' \'size\' but instead found type '
r'\'List\[Union\[List\[int\], int\]\]'):
@torch.jit.script
def f6(a):
a.expand(size=[3, [4]])
def test_builtin_args(self):
def t0(a):
# default arg dim
return torch.cat([a, a])
self.checkScript(t0, (torch.zeros(1, 1),))
def t1(a):
# keywords out of order
return torch.cat(dim=1, tensors=[a, a])
self.checkScript(t1, (torch.zeros(1, 1, 2),))
def t2(a):
# mix const/non-const attributes
if 1 == 1:
b = 1
else:
b = 0
return torch.sum(a, dim=b, keepdim=False)
self.checkScript(t2, (torch.zeros(1, 1, 2),))
def test_parser_type_annotations(self):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tuple[Tensor, Tensor], Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
self.assertExpected(str(cu.foo.schema))
def test_parser_type_annotations_comment(self):
cu = torch.jit.CompilationUnit('''
def foo(x, y):
# type: (Tensor, Tuple[Tuple[Tensor, Tensor], Tensor]) -> Tuple[Tensor, Tensor]
return x, x
''')
self.assertExpected(str(cu.foo.schema))
def test_parser_type_annotations_unknown_type(self):
with self.assertRaisesRegex(RuntimeError, "Unknown type name 'Foo'"):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tuple[Foo, Tensor], Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_subscript_non_ident(self):
with self.assertRaisesRegex(RuntimeError, r'Subscripted type must be a type identifier'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[Tensor, Tensor][Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_subscript_tensor(self):
with self.assertRaisesRegex(RuntimeError, r'Unknown type constructor Tensor'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tensor[Tensor, Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_parser_type_annotations_incompatible_expression(self):
with self.assertRaisesRegex(RuntimeError, r'Expression of type \+ cannot be used in a type expression'):
cu = torch.jit.CompilationUnit('''
def foo(x : Tensor, y : Tuple[3 + 4, Tensor]) -> Tuple[Tensor, Tensor]:
return x, x
''')
def test_gather_dynamic_index(self):
def t(x):
gather1 = x[0]
idx = 0 + 1
gather2 = x[idx]
return gather1 + gather2
self.checkScript(t, (torch.zeros(3, 2, 3),))
def test_torch_ignore_conversion_to_none(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
@torch.jit.ignore
def ignored(self, a: int) -> None:
l: int = len([2 for i in range(a) if i > 2])
return
def forward(self) -> int:
a: int = 4
b: int = 5
self.ignored(a)
return a + b
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
@torch.jit.ignore
def ignored(self, a: int):
l: int = len([2 for i in range(a) if i > 2])
return
def forward(self) -> int:
a: int = 4
b: int = 5
self.ignored(a)
return a + b
modelA = torch.jit.script(A())
self.assertEqual(modelA(), 9)
modelB = torch.jit.script(B())
self.assertEqual(modelB(), 9)
def test_addmm_grad(self):
""" This test checks several things:
1. An expand node was inserted before the addmm operating on the
bias term.
2. The fused form of addmm appears in the ultimate graph that's
executed.
3. A sum op was emitted for accumulating gradients along the 0th
(expanded) dimension of the bias term.
4. The correct symbolic representation for the backward pass of the
mm operator was emitted (x.t() -> mm)
TODO: we should actually check these conditions once we have a way
to dump the GraphExecutor state. Namely the processed forward graph
and the backward graph.
"""
@torch.jit.script
def addmm_grad_test(b, x, w):
return torch.addmm(b, x, w)
# Initialize param and input values
w_init = torch.rand(2, 5)
b_init = torch.rand(5)
x = torch.rand(3, 2)
# Clone trainable params
b = b_init.clone()
b.requires_grad_()
w = w_init.clone()
w.requires_grad_()
# Test symbolic differentiation
y = addmm_grad_test(b, x, w)
y.sum().backward()
# clone params for autograd reference
b_ref = b_init.clone()
b_ref.requires_grad_()
w_ref = w_init.clone()
w_ref.requires_grad_()
y_ref = torch.addmm(b_ref, x, w_ref)
y_ref.sum().backward()
self.assertEqual(w.grad, w_ref.grad)
self.assertEqual(b.grad, b_ref.grad)
@unittest.skipIf(not RUN_CUDA, "running tests on cuda to verify cudnn fix")
def test_batch_norm_inference_backward_cuda(self):
with enable_profiling_mode_for_profiling_tests():
class MyBatchNorm(torch.nn.Module):
def __init__(self, num_features, affine, track_running_stats):
super(MyBatchNorm, self).__init__()
self.bn = torch.nn.BatchNorm2d(
num_features, 1e-5, affine=affine, track_running_stats=track_running_stats).float()
| |
m):
'''
push: used to push a move on the board. More costly than play_move()
but you can pop it after. Helper for your search tree algorithm'''
assert not self._gameOver
self._pushBoard()
return self.play_move(m)
def pop(self):
'''
pop: another helper function for you rsearch tree algorithm. If a move has been pushed,
you can undo it by calling pop
'''
hashtopop = self._currentHash
self._popBoard()
if hashtopop in self._seenHashes:
self._seenHashes.remove(hashtopop)
##########################################################
##########################################################
def result(self):
'''
The scoring mechanism is fixed but really costly. It may be not a good idea to use it as a heuristics.
It is the chinese area scoring that computes the final result. It uses the same notation as in chess:
Returns:
- "1-0" if WHITE wins
- "0-1" if BLACK wins
- "1/2-1/2" if DEUCE
Known problems: dead stones are not removed, so the score only stricly apply the area rules. You may want
to keep playing to consolidate your area before computing the scores.
'''
score = self._count_areas()
score_black = self._nbBLACK + score[0]
score_white = self._nbWHITE + score[1]
if score_white > score_black:
return "1-0"
elif score_white < score_black:
return "0-1"
else:
return "1/2-1/2"
def compute_score(self):
''' Computes the score (chinese rules) and return the scores for (blacks, whites) in this order'''
score = self._count_areas()
return (self._nbBLACK + score[0], self._nbWHITE + score[1])
def final_go_score(self):
''' Returns the final score in a more GO-like way.'''
score_black, score_white = self.compute_score()
if score_white > score_black:
return "W+"+str(score_white-score_black)
elif score_white < score_black:
return "B+"+str(score_black-score_white)
else:
return "0"
##########################################################
##########################################################
##########################################################
''' Internal functions only'''
def _pushBoard(self):
currentStatus = []
currentStatus.append(self._nbWHITE)
currentStatus.append(self._nbBLACK)
currentStatus.append(self._capturedWHITE)
currentStatus.append(self._capturedBLACK)
currentStatus.append(self._nextPlayer)
currentStatus.append(self._board.copy())
currentStatus.append(self._gameOver)
currentStatus.append(self._lastPlayerHasPassed)
currentStatus.append(self._stringUnionFind.copy())
currentStatus.append(self._stringLiberties.copy())
currentStatus.append(self._stringSizes.copy())
currentStatus.append(self._empties.copy())
currentStatus.append(self._currentHash)
self._trailMoves.append(currentStatus)
def _popBoard(self):
oldStatus = self._trailMoves.pop()
self._currentHash = oldStatus.pop()
self._empties = oldStatus.pop()
self._stringSizes = oldStatus.pop()
self._stringLiberties = oldStatus.pop()
self._stringUnionFind = oldStatus.pop()
self._lastPlayerHasPassed = oldStatus.pop()
self._gameOver = oldStatus.pop()
self._board = oldStatus.pop()
self._nextPlayer = oldStatus.pop()
self._capturedBLACK = oldStatus.pop()
self._capturedWHITE = oldStatus.pop()
self._nbBLACK = oldStatus.pop()
self._nbWHITE = oldStatus.pop()
self._historyMoveNames.pop()
def _getPositionHash(self, fcoord, color):
return self._positionHashes[fcoord][color-1]
# Used only in init to build the neighborsEntries datastructure
def _get_neighbors(self, fcoord):
x, y = Board.unflatten(fcoord)
neighbors = ((x+1, y), (x-1, y), (x, y+1), (x, y-1))
return [Board.flatten(c) for c in neighbors if self._isOnBoard(c[0], c[1])]
# for union find structure, recover the number of the current string of stones
def _getStringOfStone(self, fcoord):
# In the union find structure, it is important to route all the nodes to the root
# when querying the node. But in Python, using the successive array is really costly
# so this is not so clear that we need to use the successive collection of nodes
# Moreover, not rerouting the nodes may help for backtracking on the structure
successives = []
while self._stringUnionFind[fcoord] != -1:
fcoord = self._stringUnionFind[fcoord]
successives.append(fcoord)
if len(successives) > 1:
for fc in successives[:-1]:
self._stringUnionFind[fc] = fcoord
return fcoord
def _merge_strings(self, str1, str2):
self._stringLiberties[str1] += self._stringLiberties[str2]
self._stringLiberties[str2] = -1
self._stringSizes[str1] += self._stringSizes[str2]
self._stringSizes[str2] = -1
assert self._stringUnionFind[str2] == -1
self._stringUnionFind[str2] = str1
def _put_stone(self, fcoord, color):
self._board[fcoord] = color
self._currentHash ^= self._getPositionHash(fcoord, color)
if self._DEBUG:
assert fcoord in self._empties
self._empties.remove(fcoord)
nbEmpty = 0
nbSameColor = 0
i = self._neighborsEntries[fcoord]
while self._neighbors[i] != -1:
n = self._board[self._neighbors[i]]
if n == Board._EMPTY:
nbEmpty += 1
elif n == color:
nbSameColor += 1
i += 1
nbOtherColor = 4 - nbEmpty - nbSameColor
currentString = fcoord
self._stringLiberties[currentString] = nbEmpty
self._stringSizes[currentString] = 1
stringWithNoLiberties = [] # String to capture (if applies)
i = self._neighborsEntries[fcoord]
while self._neighbors[i] != -1:
fn = self._neighbors[i]
if self._board[fn] == color: # We may have to merge the strings
stringNumber = self._getStringOfStone(fn)
self._stringLiberties[stringNumber] -= 1
if currentString != stringNumber:
self._merge_strings(stringNumber, currentString)
currentString = stringNumber
elif self._board[fn] != Board._EMPTY: # Other color
stringNumber = self._getStringOfStone(fn)
self._stringLiberties[stringNumber] -= 1
if self._stringLiberties[stringNumber] == 0:
if stringNumber not in stringWithNoLiberties: # We may capture more than one string
stringWithNoLiberties.append(stringNumber)
i += 1
return stringWithNoLiberties
def reset(self):
self.__init__()
def _isOnBoard(self, x, y):
return x >= 0 and x < Board._BOARDSIZE and y >= 0 and y < Board._BOARDSIZE
def _is_suicide(self, fcoord, color):
opponent = Board.flip(color)
i = self._neighborsEntries[fcoord]
libertiesFriends = {}
libertiesOpponents = {}
while self._neighbors[i] != -1:
fn = self._neighbors[i]
if self._board[fn] == Board._EMPTY:
return False
string = self._getStringOfStone(fn)
if self._board[fn] == color: # check that we don't kill the whole zone
if string not in libertiesFriends:
libertiesFriends[string] = self._stringLiberties[string] - 1
else:
libertiesFriends[string] -= 1
else:
if Board._DEBUG:
assert self._board[fn] == opponent
if string not in libertiesOpponents:
libertiesOpponents[string] = self._stringLiberties[string] - 1
else:
libertiesOpponents[string] -= 1
i += 1
for s in libertiesOpponents:
if libertiesOpponents[s] == 0:
return False # At least one capture right after this move, it is legal
if len(libertiesFriends) == 0: # No a single friend there...
return True
# Now checks that when we connect all the friends, we don't create
# a zone with 0 liberties
sumLibertiesFriends = 0
for s in libertiesFriends:
sumLibertiesFriends += libertiesFriends[s]
if sumLibertiesFriends == 0:
return True # At least one friend zone will be captured right after this move, it is unlegal
return False
# Checks if the move leads to an already seen board
# By doing this, it has to "simulate" the move, and thus
# it computes also the sets of strings to be removed by the move.
def _is_super_ko(self, fcoord, color):
# Check if it is a complex move (if it takes at least a stone)
tmpHash = self._currentHash ^ self._getPositionHash(fcoord, color)
assert self._currentHash == tmpHash ^ self._getPositionHash(fcoord, color)
i = self._neighborsEntries[fcoord]
libertiesOpponents = {}
opponent = Board.flip(color)
while self._neighbors[i] != -1:
fn = self._neighbors[i]
#print("superko looks at ", self.coord_to_name(fn), "for move", self.coord_to_name(fcoord))
if self._board[fn] == opponent:
s = self._getStringOfStone(fn)
#print("superko sees string", self.coord_to_name(s))
if s not in libertiesOpponents:
libertiesOpponents[s] = self._stringLiberties[s] - 1
else:
libertiesOpponents[s] -= 1
i += 1
for s in libertiesOpponents:
if libertiesOpponents[s] == 0:
#print("superko computation for move ", self.coord_to_name(fcoord), ":")
for fn in self._breadthSearchString(s):
#print(self.coord_to_name(fn)+" ", end="")
assert self._board[fn] == opponent
tmpHash ^= self._getPositionHash(fn, opponent)
#print()
if tmpHash in self._seenHashes:
return True, tmpHash
return False, tmpHash
def _breadthSearchString(self, fc):
color = self._board[fc]
string = set([fc])
frontier = [fc]
while frontier:
current_fc = frontier.pop()
string.add(current_fc)
i = self._neighborsEntries[current_fc]
while self._neighbors[i] != -1:
fn = self._neighbors[i]
i += 1
if self._board[fn] == color and not fn in string:
frontier.append(fn)
return string
def _count_areas(self):
''' Costly function that computes the number of empty positions that only reach respectively BLACK and WHITE
stones (the third values is the number of places touching both colours)'''
to_check = self._empties.copy() # We need to check all the empty positions
only_blacks = 0
only_whites = 0
others = 0
while len(to_check) > 0:
s = to_check.pop()
ssize = 0
assert self._board[s] == Board._EMPTY
frontier = [s]
touched_blacks, touched_whites = 0, 0
currentstring = []
while frontier:
current = frontier.pop()
currentstring.append(current)
ssize += 1 # number of empty places in this loop
assert current not in to_check
i = self._neighborsEntries[current]
while self._neighbors[i] != -1:
n = self._neighbors[i]
i += 1
if self._board[n] == Board._EMPTY and n in to_check:
to_check.remove(n)
frontier.append(n)
elif self._board[n] == Board._BLACK:
touched_blacks += 1
elif self._board[n] == Board._WHITE:
touched_whites += 1
# here we have gathered all the informations about an empty area
assert len(currentstring) == ssize
assert (self._nbBLACK == 0 and self._nbWHITE == 0) or touched_blacks > 0 or touched_whites > 0
if touched_blacks == 0 and touched_whites > 0:
only_whites += ssize
elif touched_whites == 0 and touched_blacks > 0:
only_blacks += ssize
else:
others += ssize
return (only_blacks, only_whites, others)
def _piece2str(self, c):
if c == self._WHITE:
return 'O'
elif c == self._BLACK:
return 'X'
else:
return '.'
def __str__(self):
''' WARNING: this print function does not reflect the classical coordinates. It represents the internal
values in the board.'''
toreturn = ""
for i, c in enumerate(self._board):
toreturn += self._piece2str(c) + | |
SPEECTHP = models.CharField("TYPE OF HLTH CARE WRKR - SPEECH THERAPY", max_length=2)
OTHRHCW = models.CharField("TYPE OF HLTH CARE WRKR - OTHER", max_length=2)
NONSKILL = models.CharField("TYPE OF HLTH CARE WRKR - NON-SKILLED", max_length=2)
SKILLED = models.CharField("TYPE OF HLTH CARE WRKR - SKILLED", max_length=2)
SKILLWOS = models.CharField("SPECIFY TYPE OF SKILLED WORKER", max_length=25)
OTHCW = models.CharField("TYPE OF HLTH CARE WRKR - SOME OTHER", max_length=2)
OTHCWOS = models.CharField("SPECIFY OTHER TYPE HEALTH CARE WORKER", max_length=25)
HOSPITAL = models.CharField("ANY HH CARE SVCE DUE TO HOSPITALIZATION", max_length=2)
VSTRELCN = models.CharField("ANY HH CARE SVCE RELATED TO HLTH COND", max_length=2)
TREATMT = models.CharField("PERSON RECEIVED MEDICAL TREATMENT", max_length=2)
MEDEQUIP = models.CharField("PERSON WAS TAUGHT USE OF MED EQUIPMENT", max_length=2)
DAILYACT = models.CharField("PERSON WAS HELPED WITH DAILY ACTIVITIES", max_length=2)
COMPANY = models.CharField("PERSON RECEIVED COMPANIONSHIP SERVICES", max_length=2)
OTHSVCE = models.CharField("PERSON RECEIVED OTH HOME CARE SERVICES", max_length=2)
OTHSVCOS = models.CharField("SPECIFY OTHER HOME CARE SRVCE RECEIVED", max_length=25)
FREQCY = models.CharField("PROVIDER HELPED EVERY WEEKSOME WEEKS", max_length=2)
DAYSPWK = models.CharField("# DAYS WEEK PROVIDER CAME", max_length=2)
DAYSPMO = models.CharField("# DAYS MONTH PROVIDER CAME", max_length=2)
HOWOFTEN = models.CharField("PROV CAME ONCE PER DAYMORE THAN ONCE", max_length=2)
TMSPDAY = models.CharField("TIMESDAY PROVIDER CAME TO HOME TO HELP", max_length=2)
HRSLONG = models.CharField("HOURS EACH VISIT LASTED", max_length=2)
MINLONG = models.CharField("MINUTES EACH VISIT LASTED", max_length=2)
SAMESVCE = models.CharField("ANY OTH MONS PER RECEIVED SAME SERVICES", max_length=2)
HHDAYS = models.CharField("DAYS PER MONTH IN HOME HEALTH, 2014", max_length=2)
HHSF14X = models.CharField("AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
HHMR14X = models.CharField("AMOUNT PAID, MEDICARE (IMPUTED)", max_length=8)
HHMD14X = models.CharField("AMOUNT PAID, MEDICAID (IMPUTED)", max_length=8)
HHPV14X = models.CharField("AMOUNT PAID, PRIVATE INSURANCE (IMPUTED)", max_length=8)
HHVA14X = models.CharField("AMOUNT PAID, VETERANSCHAMPVA(IMPUTED)", max_length=7)
HHTR14X = models.CharField("AMOUNT PAID, TRICARE(IMPUTED)", max_length=6)
HHOF14X = models.CharField("AMOUNT PAID, OTHER FEDERAL (IMPUTED)", max_length=6)
HHSL14X = models.CharField("AMOUNT PAID, STATE & LOCAL GOV (IMPUTED)", max_length=7)
HHWC14X = models.CharField("AMOUNT PAID, WORKERS COMP (IMPUTED)", max_length=7)
HHOR14X = models.CharField("AMOUNT PAID, OTHER PRIVATE (IMPUTED)", max_length=7)
HHOU14X = models.CharField("AMOUNT PAID, OTHER PUBLIC (IMPUTED)", max_length=5)
HHOT14X = models.CharField("AMOUNT PAID, OTHER INSURANCE (IMPUTED)", max_length=8)
HHXP14X = models.CharField("SUM OF HHSF14X - HHOT14X (IMPUTED)", max_length=8)
HHTC14X = models.CharField("HHLD REPORTED TOTAL CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT14F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2014", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2014", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2014", max_length=1)
# Methods
def __str__(self):
"""String for representing a HomeHealth14 object"""
return f"{self.DUPERSID}"
class HomeHealth13(models.Model):
""" Defines the HomeHealth Model for 2013, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "HomeHealth13"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
HHDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
HHDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
MPCELIG = models.CharField("MPC ELIGIBILITY FLAG", max_length=1)
SELFAGEN = models.CharField("DOES PROVIDER WORK FOR AGENCY OR SELF", max_length=2)
HHTYPE = models.CharField("HOME HEALTH EVENT TYPE", max_length=1)
CNA = models.CharField("TYPE OF HLTH CARE WRKR - CERT NURSE ASST", max_length=2)
COMPANN = models.CharField("TYPE OF HLTH CARE WRKR - COMPANION", max_length=2)
DIETICN = models.CharField("TYPE OF HLTH CARE WRKR - DIETITIANNUTRT", max_length=2)
HHAIDE = models.CharField("TYPE OF HLTH CARE WRKR - HOME CARE AIDE", max_length=2)
HOSPICE = models.CharField("TYPE OF HLTH CARE WRKR - HOSPICE WORKER", max_length=2)
HMEMAKER = models.CharField("TYPE OF HLTH CARE WRKR - HOMEMAKER", max_length=2)
IVTHP = models.CharField("TYPE OF HLTH CARE WRKR - IV THERAPIST", max_length=2)
MEDLDOC = models.CharField("TYPE OF HLTH CARE WRKR - MEDICAL DOCTOR", max_length=2)
NURPRACT = models.CharField("TYPE OF HLTH CARE WRKR - NURSEPRACTR", max_length=2)
NURAIDE = models.CharField("TYPE OF HLTH CARE WRKR - NURSE S AIDE", max_length=2)
OCCUPTHP = models.CharField("TYPE OF HLTH CARE WRKR - OCCUP THERAP", max_length=2)
PERSONAL = models.CharField("TYPE OF HLTH CARE WRKR - PERS CARE ATTDT", max_length=2)
PHYSLTHP = models.CharField("TYPE OF HLTH CARE WRKR - PHYSICL THERAPY", max_length=2)
RESPTHP = models.CharField("TYPE OF HLTH CARE WRKR - RESPIRA THERAPY", max_length=2)
SOCIALW = models.CharField("TYPE OF HLTH CARE WRKR - SOCIAL WORKER", max_length=2)
SPEECTHP = models.CharField("TYPE OF HLTH CARE WRKR - SPEECH THERAPY", max_length=2)
OTHRHCW = models.CharField("TYPE OF HLTH CARE WRKR - OTHER", max_length=2)
NONSKILL = models.CharField("TYPE OF HLTH CARE WRKR - NON-SKILLED", max_length=2)
SKILLED = models.CharField("TYPE OF HLTH CARE WRKR - SKILLED", max_length=2)
SKILLWOS = models.CharField("SPECIFY TYPE OF SKILLED WORKER", max_length=25)
OTHCW = models.CharField("TYPE OF HLTH CARE WRKR - SOME OTHER", max_length=2)
OTHCWOS = models.CharField("SPECIFY OTHER TYPE HEALTH CARE WORKER", max_length=25)
HOSPITAL = models.CharField("ANY HH CARE SVCE DUE TO HOSPITALIZATION", max_length=2)
VSTRELCN = models.CharField("ANY HH CARE SVCE RELATED TO HLTH COND", max_length=2)
TREATMT = models.CharField("PERSON RECEIVED MEDICAL TREATMENT", max_length=2)
MEDEQUIP = models.CharField("PERSON WAS TAUGHT USE OF MED EQUIPMENT", max_length=2)
DAILYACT = models.CharField("PERSON WAS HELPED WITH DAILY ACTIVITIES", max_length=2)
COMPANY = models.CharField("PERSON RECEIVED COMPANIONSHIP SERVICES", max_length=2)
OTHSVCE = models.CharField("PERSON RECEIVED OTH HOME CARE SERVICES", max_length=2)
OTHSVCOS = models.CharField("SPECIFY OTHER HOME CARE SRVCE RECEIVED", max_length=25)
FREQCY = models.CharField("PROVIDER HELPED EVERY WEEKSOME WEEKS", max_length=2)
DAYSPWK = models.CharField("# DAYS WEEK PROVIDER CAME", max_length=2)
DAYSPMO = models.CharField("# DAYS MONTH PROVIDER CAME", max_length=2)
HOWOFTEN = models.CharField("PROV CAME ONCE PER DAYMORE THAN ONCE", max_length=2)
TMSPDAY = models.CharField("TIMESDAY PROVIDER CAME TO HOME TO HELP", max_length=2)
HRSLONG = models.CharField("HOURS EACH VISIT LASTED", max_length=2)
MINLONG = models.CharField("MINUTES EACH VISIT LASTED", max_length=2)
SAMESVCE = models.CharField("ANY OTH MONS PER RECEIVED SAME SERVICES", max_length=2)
HHDAYS = models.CharField("DAYS PER MONTH IN HOME HEALTH, 2013", max_length=2)
HHSF13X = models.CharField("AMOUNT PAID, FAMILY (IMPUTED)", max_length=8)
HHMR13X = models.CharField("AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
HHMD13X = models.CharField("AMOUNT PAID, MEDICAID (IMPUTED)", max_length=8)
HHPV13X = models.CharField("AMOUNT PAID, PRIVATE INSURANCE (IMPUTED)", max_length=8)
HHVA13X = models.CharField("AMOUNT PAID, VETERANSCHAMPVA(IMPUTED)", max_length=7)
HHTR13X = models.CharField("AMOUNT PAID, TRICARE(IMPUTED)", max_length=7)
HHOF13X = models.CharField("AMOUNT PAID, OTHER FEDERAL (IMPUTED)", max_length=5)
HHSL13X = models.CharField("AMOUNT PAID, STATE & LOCAL GOV (IMPUTED)", max_length=7)
HHWC13X = models.CharField("AMOUNT PAID, WORKERS COMP (IMPUTED)", max_length=5)
HHOR13X = models.CharField("AMOUNT PAID, OTHER PRIVATE (IMPUTED)", max_length=7)
HHOU13X = models.CharField("AMOUNT PAID, OTHER PUBLIC (IMPUTED)", max_length=7)
HHOT13X = models.CharField("AMOUNT PAID, OTHER INSURANCE (IMPUTED)", max_length=7)
HHXP13X = models.CharField("SUM OF HHSF13X - HHOT13X (IMPUTED)", max_length=8)
HHTC13X = models.CharField("HHLD REPORTED TOTAL CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT13F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2013", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2013", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2013", max_length=1)
# Methods
def __str__(self):
"""String for representing a HomeHealth13 object"""
return f"{self.DUPERSID}"
class HomeHealth12(models.Model):
""" Defines the HomeHealth Model for 2012, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "HomeHealth12"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
HHDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
HHDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
MPCELIG = models.CharField("MPC ELIGIBILITY FLAG", max_length=1)
SELFAGEN = models.CharField("DOES PROVIDER WORK FOR AGENCY OR SELF", max_length=2)
HHTYPE = models.CharField("HOME HEALTH EVENT TYPE", max_length=1)
CNA = models.CharField("TYPE OF HLTH CARE WRKR - CERT NURSE ASST", max_length=2)
COMPANN = models.CharField("TYPE OF HLTH CARE WRKR - COMPANION", max_length=2)
DIETICN = models.CharField("TYPE OF HLTH CARE WRKR - DIETITIANNUTRT", max_length=2)
HHAIDE = models.CharField("TYPE OF HLTH CARE WRKR - HOME CARE AIDE", max_length=2)
HOSPICE = models.CharField("TYPE OF HLTH CARE WRKR - HOSPICE WORKER", max_length=2)
HMEMAKER = models.CharField("TYPE OF HLTH CARE WRKR - HOMEMAKER", max_length=2)
IVTHP = models.CharField("TYPE OF HLTH CARE WRKR - IV THERAPIST", max_length=2)
MEDLDOC = models.CharField("TYPE OF HLTH CARE WRKR - MEDICAL DOCTOR", max_length=2)
NURPRACT = models.CharField("TYPE OF HLTH CARE WRKR - NURSEPRACTR", max_length=2)
NURAIDE = models.CharField("TYPE OF HLTH CARE WRKR - NURSE S AIDE", max_length=2)
OCCUPTHP = models.CharField("TYPE OF HLTH CARE WRKR - OCCUP THERAP", max_length=2)
PERSONAL = models.CharField("TYPE OF HLTH CARE WRKR - PERS CARE ATTDT", max_length=2)
PHYSLTHP = models.CharField("TYPE OF HLTH CARE WRKR - PHYSICL THERAPY", max_length=2)
RESPTHP = models.CharField("TYPE OF HLTH CARE WRKR - RESPIRA THERAPY", max_length=2)
SOCIALW = models.CharField("TYPE OF HLTH CARE WRKR - SOCIAL WORKER", max_length=2)
SPEECTHP = models.CharField("TYPE OF HLTH CARE WRKR - SPEECH THERAPY", max_length=2)
OTHRHCW = models.CharField("TYPE | |
<gh_stars>1-10
#
# Depends
# Copyright (C) 2014 by <NAME> & <NAME>. All rights reserved.
# BSD license (LICENSE.txt for details).
#
"""Main GUI
The main window which contains the dependency graph view widget and a dock for
additional windows. Executing the program from the commandline interface
creates one of these windows (and therefore all startup routines execute), but
does not display it.
"""
import os
import sys
import json
import tempfile
import itertools
from PySide import QtCore, QtGui
import dag
import node
import util
import variables
import data_packet
import file_dialog
import undo_commands
import property_widget
import variable_widget
import graphics_widgets
import scenegraph_widget
class MainWindow(QtGui.QMainWindow):
"""Construct main UI
This class constructs the UI, consisting of the many windows, menu items,
undo managers, plugin systems, workflow variables, etc. It also holds
functions to manage what happens when dag nodes change (see
"DAG management" section) and loading and saving of DAG snapshots.
"""
def __init__(self, startFile="", parent=None):
QtGui.QMainWindow.__init__(self, parent)
# Add the DAG widget
self.graphicsViewWidget = graphics_widgets.GraphicsViewWidget(self)
self.graphicsScene = self.graphicsViewWidget.scene()
self.setCentralWidget(self.graphicsViewWidget)
# Create the docking widget for the properties dialog
self.propDock = QtGui.QDockWidget()
self.propDock.setObjectName('propDock')
self.propDock.setAllowedAreas(QtCore.Qt.RightDockWidgetArea | QtCore.Qt.LeftDockWidgetArea)
self.propDock.setWindowTitle("Properties")
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.propDock)
# Create and add the properties dialog to the dock widget
self.propWidget = property_widget.PropWidget(self)
self.propDock.setWidget(self.propWidget)
# Create the docking widget for the sceneGraph dialog
self.sceneGraphDock = QtGui.QDockWidget()
self.sceneGraphDock.setObjectName('sceneGraphDock')
self.sceneGraphDock.setAllowedAreas(QtCore.Qt.RightDockWidgetArea | QtCore.Qt.LeftDockWidgetArea)
self.sceneGraphDock.setWindowTitle("Scene Graph")
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.sceneGraphDock)
# Create and add the sceneGraph dialog to the dock widget
self.sceneGraphWidget = scenegraph_widget.SceneGraphWidget(self)
self.sceneGraphDock.setWidget(self.sceneGraphWidget)
# Create the docking widget for the variable dialog
self.variableDock = QtGui.QDockWidget()
self.variableDock.setObjectName('variableDock')
self.variableDock.setAllowedAreas(QtCore.Qt.RightDockWidgetArea | QtCore.Qt.LeftDockWidgetArea)
self.variableDock.setWindowTitle("Variables")
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.variableDock)
# Create and add the variable dialog to the dock widget
self.variableWidget = variable_widget.VariableWidget(self)
self.variableDock.setWidget(self.variableWidget)
self.variableDock.hide()
# Set some locals
self.dag = None
self.undoStack = QtGui.QUndoStack(self)
# Undo and Redo have built-in ways to create their menus
undoAction = self.undoStack.createUndoAction(self, "&Undo")
undoAction.setShortcuts(QtGui.QKeySequence.Undo)
redoAction = self.undoStack.createRedoAction(self, "&Redo")
redoAction.setShortcuts(QtGui.QKeySequence.Redo)
# Create the menu bar
fileMenu = self.menuBar().addMenu("&File")
fileMenu.addAction(QtGui.QAction("&Open DAG...", self, shortcut="Ctrl+O", triggered=self.openDialog))
fileMenu.addAction(QtGui.QAction("&Save DAG", self, shortcut="Ctrl+S", triggered=lambda: self.save(self.workingFilename)))
fileMenu.addAction(QtGui.QAction("Save DAG &Version Up", self, shortcut="Ctrl+Space", triggered=self.saveVersionUp))
fileMenu.addAction(QtGui.QAction("Save DAG &As...", self, shortcut="Ctrl+Shift+S", triggered=self.saveAs))
fileMenu.addAction(QtGui.QAction("&Quit...", self, shortcut="Ctrl+Q", triggered=self.close))
editMenu = self.menuBar().addMenu("&Edit")
editMenu.addAction(undoAction)
editMenu.addAction(redoAction)
editMenu.addSeparator()
createMenu = editMenu.addMenu("&Create Node")
editMenu.addAction(QtGui.QAction("&Delete Node(s)", self, shortcut="Delete", triggered=self.deleteSelectedNodes))
editMenu.addAction(QtGui.QAction("&Shake Node(s)", self, shortcut="Backspace", triggered=self.shakeSelectedNodes))
editMenu.addAction(QtGui.QAction("D&uplicate Node", self, shortcut="Ctrl+D", triggered=self.duplicateSelectedNodes))
editMenu.addSeparator()
editMenu.addAction(QtGui.QAction("&Group Nodes", self, shortcut="Ctrl+G", triggered=self.groupSelectedNodes))
editMenu.addAction(QtGui.QAction("&Ungroup Nodes", self, shortcut="Ctrl+Shift+G", triggered=self.ungroupSelectedNodes))
executeMenu = self.menuBar().addMenu("E&xecute")
executeMenu.addAction(QtGui.QAction("&Execute Graph", self, shortcut= "Ctrl+E", triggered=lambda: self.dag.execute_graph()))
executeMenu.addAction(QtGui.QAction("Execute Up To &Selected Node", self, shortcut= "Ctrl+Shift+E", triggered=lambda: self.executeSelected(executeImmediately=True)))
executeMenu.addSeparator()
executeMenu.addAction(QtGui.QAction("&Reload plugins", self, shortcut= "Ctrl+0", triggered=self.reloadPlugins))
windowMenu = self.menuBar().addMenu("&Window")
windowMenu.addAction(self.propDock.toggleViewAction())
windowMenu.addAction(self.sceneGraphDock.toggleViewAction())
windowMenu.addAction(self.variableDock.toggleViewAction())
# Application settings
self.settings = QtCore.QSettings('vcl', 'depends', self)
self.restoreSettings()
# Setup the variables, load the plugins, and auto-generate the read dag nodes
self.setupStartupVariables()
node.loadChildNodesFromPaths(variables.value('NODE_PATH').split(':'))
file_dialog.loadChildFileDialogsFromPaths(variables.value('FILE_DIALOG_PATH').split(':'))
# Generate the Create menu. Must be done after plugins are loaded.
for action in self.createCreateMenuActions():
createMenu.addAction(action)
# Load the starting filename or create a new DAG
self.workingFilename = startFile
self.dag = dag.DAG()
self.graphicsScene.setDag(self.dag)
if not self.open(self.workingFilename):
self.setWindowTitle("Depends")
self.undoStack.setClean()
# This is a small workaround to insure the properties dialog doesn't
# try to redraw twice when two nodes are rapidly selected
# (within a frame of eachother). There's a good chance the way I
# construct a property dialog is strange, but a twice-at-once redraw
# was making the entire UI destroy itself and spawn a temporary child
# window that had the same name as the program 'binary'.
self.selectionTimer = QtCore.QTimer(self)
self.selectionTimer.setInterval(0)
self.selectionTimer.timeout.connect(self.selectionRefresh)
# Hook up some signals
self.graphicsViewWidget.createNode.connect(self.create_node)
self.graphicsScene.selectionChanged.connect(self.selectionChanged)
self.graphicsScene.nodesDisconnected.connect(self.nodesDisconnected)
self.graphicsScene.nodesConnected.connect(self.nodesConnected)
self.propWidget.attrChanged.connect(self.propertyEdited)
self.propWidget.rangeChanged.connect(self.propertyRangeEdited)
self.propWidget.mouseover.connect(self.highlightDagNodes)
self.propWidget.mouseover.connect(self.sceneGraphWidget.highlightRowsUsingNodes)
self.sceneGraphWidget.mouseover.connect(self.highlightDagNodes)
self.sceneGraphWidget.mouseover.connect(self.propWidget.highlightInputs)
self.variableWidget.addVariable.connect(variables.add)
self.variableWidget.setVariable.connect(variables.setx)
self.variableWidget.removeVariable.connect(variables.remove)
self.undoStack.cleanChanged.connect(self.setWindowTitleClean)
def closeEvent(self, event):
"""
Save program settings and ask "are you sure" if there are unsaved changes.
"""
if not self.undoStack.isClean():
if self.yesNoDialog("Current workflow is not saved. Save it before quitting?"):
if self.workingFilename:
self.save(self.workingFilename)
else:
self.saveAs()
self.saveSettings()
QtGui.QMainWindow.closeEvent(self, event)
def updateScenegraph(self, dagNodes):
"""
Rebuild the scenegraph widget based on the given dag nodes.
"""
if dagNodes and len(dagNodes) == 1:
liveSceneGraph = self.dag.buildSceneGraph(dagNodes[0])
self.sceneGraphWidget.rebuild(liveSceneGraph, dagNodes[0])
else:
self.sceneGraphWidget.rebuild(None, None)
def selectedDagNodes(self):
"""
Return a list of all selected DagNodes in the scene.
"""
selectedDrawNodes = self.graphicsScene.selectedItems()
return [sdn.dagNode for sdn in selectedDrawNodes]
def setupStartupVariables(self):
"""
Each program starts with a set of workflow variables that are defined
by where the program is executed from and potentially a set of
environment variables.
"""
# The current session gets a "binary directory" variable
variables.add('DEPENDS_DIR')
variables.setx('DEPENDS_DIR', os.path.dirname(os.path.realpath(__file__)), readOnly=True)
# ...And a path that points to where the nodes are loaded from
variables.add('NODE_PATH')
if not os.environ.get('DEPENDS_NODE_PATH'):
variables.setx('NODE_PATH', os.path.join(variables.value('DEPENDS_DIR'), 'nodes'), readOnly=True)
else:
variables.setx('NODE_PATH', os.environ.get('DEPENDS_NODE_PATH'), readOnly=True)
# ...And a path that points to where the File Dialogs come from
variables.add('FILE_DIALOG_PATH')
if not os.environ.get('DEPENDS_FILE_DIALOG_PATH'):
variables.setx('FILE_DIALOG_PATH', os.path.join(variables.value('DEPENDS_DIR'), 'file_dialogs'), readOnly=True)
else:
variables.setx('FILE_DIALOG_PATH', os.environ.get('DEPENDS_FILE_DIALOG_PATH'), readOnly=True)
def clearVariableDictionary(self):
"""
Clear all variables from the 'global' variable dictionary that aren't
"built-in" to the current session.
"""
for key in variables.names():
if key == 'DEPENDS_DIR':
continue
if key == 'NODE_PATH':
continue
def saveSettings(self):
"""
Register the software's general settings with the QSettings object
and force a save with sync().
"""
self.settings.setValue("mainWindowGeometry", self.saveGeometry())
self.settings.setValue("mainWindowState", self.saveState())
self.settings.sync()
def restoreSettings(self):
"""
Restore the software's general settings by pulling data out of the
current settings object.
"""
self.restoreGeometry(self.settings.value('mainWindowGeometry'))
self.restoreState(self.settings.value('mainWindowState'))
###########################################################################
## Complex message handling
###########################################################################
def create_node(self, nodeType, nodeLocation):
"""
Create a new dag node with a safe name, add it to the dag, and register it with the QGraphicsScene.
"""
preSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
newDagNode = nodeType()
nodeName = node.cleanNodeName(newDagNode.typeStr())
nodeName = self.dag.safeNodeName(nodeName)
newDagNode.set_name(nodeName)
self.dag.add_node(newDagNode)
self.graphicsScene.addExistingDagNode(newDagNode, nodeLocation)
currentSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
self.undoStack.push(undo_commands.DagAndSceneUndoCommand(preSnap, currentSnap, self.dag, self.graphicsScene))
def delete_nodes(self, dagNodesToDelete):
"""
Delete an existing dag node and its edges, and make sure the QGraphicsScene cleans up as well.
"""
nodesAffected = list()
preSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
# Clean up the graphics scene
# TODO: Should be a signal that tells the scene what to do
for dagNode in dagNodesToDelete:
drawNode = self.graphicsScene.drawNode(dagNode)
drawEdges = drawNode.drawEdges()
for edge in drawEdges:
edge.sourceDrawNode().removeDrawEdge(edge)
edge.destDrawNode().removeDrawEdge(edge)
self.graphicsScene.removeItem(edge)
self.graphicsScene.removeItem(drawNode)
# Remove the nodes from the dag
for delNode in dagNodesToDelete:
nodesAffected = nodesAffected + self.dagNodeDisconnected(delNode)
nodesAffected.remove(delNode)
self.dag.remove_node(delNode)
currentSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
self.undoStack.push(undo_commands.DagAndSceneUndoCommand(preSnap, currentSnap, self.dag, self.graphicsScene))
# Updates the drawNodes for each of the affected dagNodes
self.graphicsScene.refreshDrawNodes(nodesAffected)
def shakeNodes(self, dagNodesToShake):
"""
Pull a node out of the dependency chain without deleting it and without
losing downstream information.
"""
nodesAffected = list()
preSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
for dagNode in dagNodesToShake:
inNodes = self.dag.nodeConnectionsIn(dagNode)
outNodes = self.dag.nodeConnectionsOut(dagNode)
drawNode = self.graphicsScene.drawNode(dagNode)
# Connect all previous dag nodes to all next nodes & add the draw edges
for inputDagNode in inNodes:
inputDrawNode = self.graphicsScene.drawNode(inputDagNode)
for outputDagNode in outNodes:
outputDrawNode = self.graphicsScene.drawNode(outputDagNode)
self.dag.connect_nodes(inputDagNode, outputDagNode)
newDrawEdge = self.graphicsScene.addExistingConnection(inputDagNode, outputDagNode)
newDrawEdge.horizontalConnectionOffset = self.graphicsScene.drawEdge(drawNode, outputDrawNode).horizontalConnectionOffset
newDrawEdge.adjust()
# Disconnect this dag node from everything
for inputDagNode in inNodes:
self.dag.disconnect_nodes(inputDagNode, dagNode)
for outputDagNode in outNodes:
nodesAffected = nodesAffected + self.dagNodeDisconnected(dagNode)
self.dag.disconnect_nodes(dagNode, outputDagNode)
# Remove all draw edges
for edge in drawNode.drawEdges():
edge.sourceDrawNode().removeDrawEdge(edge)
edge.destDrawNode().removeDrawEdge(edge)
self.graphicsScene.removeItem(edge)
# Nullify all our inputs
for input in dagNode.inputs():
dagNode.setInputValue(input.name, "")
currentSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
self.undoStack.push(undo_commands.DagAndSceneUndoCommand(preSnap, currentSnap, self.dag, self.graphicsScene))
# A few refreshes
self.propWidget.refresh()
self.updateScenegraph(self.selectedDagNodes())
self.graphicsScene.refreshDrawNodes(nodesAffected)
def duplicateNodes(self, dagNodesToDupe):
"""
Create identical copies of the given dag nodes, but drop their
incoming and outgoing connections.
"""
preSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
for dagNode in dagNodesToDupe:
dupedNode = dagNode.duplicate("_Dupe")
newLocation = self.graphicsScene.drawNode(dagNode).pos() + QtCore.QPointF(20, 20)
self.dag.add_node(dupedNode)
self.graphicsScene.addExistingDagNode(dupedNode, newLocation)
currentSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
self.undoStack.push(undo_commands.DagAndSceneUndoCommand(preSnap, currentSnap, self.dag, self.graphicsScene))
currentSnap = self.dag.snapshot(nodeMetaDict=self.graphicsScene.nodeMetaDict(), connectionMetaDict=self.graphicsScene.connectionMetaDict())
self.undoStack.push(undo_commands.DagAndSceneUndoCommand(preSnap, currentSnap, self.dag, self.graphicsScene))
# Updates the drawNodes for each of the affected dagNodes
self.propWidget.refresh()
self.graphicsScene.refreshDrawNodes([dupedNode])
def nodesDisconnected(self, fromDagNode, toDagNode):
"""
When the user interface disconnects two nodes, tell the in-flight
dag about it.
"""
nodesAffected = list()
nodesAffected = nodesAffected + self.dagNodeDisconnected(fromDagNode)
self.dag.disconnect_nodes(fromDagNode, toDagNode)
# A few refreshes
self.updateScenegraph(self.selectedDagNodes())
self.propWidget.refresh()
self.graphicsScene.refreshDrawNodes(nodesAffected)
def nodesConnected(self, fromDagNode, toDagNode):
"""
When the user interface connects two nodes, tell the in-flight dag
about it.
"""
self.dag.connect_nodes(fromDagNode, toDagNode)
self.updateScenegraph(self.selectedDagNodes())
def propertyEdited(self, dagNode, propName, newValue, propertyType=None):
"""
When the user interface edits a property of a node (meaning an attribute,
input, or output), communicate this information to the | |
interface.
i.ip6Addresses += autoConfig.addresses
# Auto configure the IPv6 default gateway if necessary.
if self.ip6DefaultGateway == None:
self.ip6DefaultGateway = autoConfig.defaultGateway
self.ip6DefaultInterface = i
# Auto configure the IPv4 DNS servers if necessary.
if self.ip6DNSServers == None:
self.ip6DNSServers = autoConfig.dnsServers
# Save a list of the prefixes that have been delegated to this interface.
if autoConfig.delegatedPrefixes:
i.delegatedIP6Prefixes += autoConfig.delegatedPrefixes
# Save any further routes associated with the auto-config response
if autoConfig.furtherRoutes:
self.ip6Routes += autoConfig.furtherRoutes
# If any of the assigned IPv6 addresses are network addresses (i.e. the IID is
# all zeros), then convert them to host addresses using the interface MAC address
# as the IID.
for n in range(len(i.ip6Addresses)):
a = i.ip6Addresses[n]
if isNetworkAddress(a):
a = makeIP6InterfaceAddress(a.network, macAddr=i.macAddress, macIs64Bit=i.usesMAC64)
i.ip6Addresses[n] = a
def configureRoutes(self):
# Add default routes...
if self.ip4DefaultGateway:
self.ip4Routes.append(
IPRoute(
dest = None,
interface = self.ip4DefaultInterface,
via = self.ip4DefaultGateway
)
)
if self.ip6DefaultGateway:
self.ip6Routes.append(
IPRoute(
dest = None,
interface = self.ip6DefaultInterface,
via = self.ip6DefaultGateway
)
)
# Perform route configuration for each interface...
for i in self.interfaces:
# If IPv4 is enabled for the interface and the attached network supports IPv4
# request advertised IPv4 routes and add them to the node's route table.
if i.ip4Enabled and i.network.ip4Supported:
self.ip4Routes += i.network.requestIP4AdvertisedRoutes(i)
# If IPv6 is enabled for the interface and the attached network supports IPv6
# request advertised IPv6 routes and add them to the node's route table.
if i.ip4Enabled and i.network.ip4Supported:
self.ip6Routes += i.network.requestIP6AdvertisedRoutes(i)
def requestIP4AutoConfig(self, interface):
return None
def requestIP6AutoConfig(self, interface):
return None
def requestIP4AdvertisedRoutes(self, interface):
return []
def requestIP6AdvertisedRoutes(self, interface):
return []
def filterIP6AutoConfig(self, interface, autoConfigs):
return autoConfigs
def buildNode(self, hostsTable):
global logIndent
logAction('Building node: %s' % self.name)
logIndent += 1
try:
self._buildNode()
self.installHostsTable(hostsTable)
self.installResolverConfig()
finally:
logIndent -= 1
def _buildNode(self):
# If the node is not implemented by the host...
if not self.isHostNode:
# Create a network namespace for the node.
addNamespace(self.nsName)
# Create a /etc/netns directory for the node.
etcDirName = os.path.join('/etc/netns', self.nsName)
if not os.path.exists(etcDirName):
logAction('Making directory: %s' % etcDirName)
os.makedirs(etcDirName)
# If the node is not implemented by the host, enable its loopback interface. (If the node
# is implemented by the host, presumably the loopback interface is already enabled).
if not self.isHostNode:
enableInterface('lo', nsName=self.nsName)
# Build each of the node's interfaces...
for i in self.interfaces:
i.buildInterface()
# Add routes for the node.
if not self.useTapInterfaces:
for r in self.ip4Routes + self.ip6Routes:
addRoute(
dest = r.destination,
dev = r.interface.ifName if r.interface != None else None,
via = r.via,
options = r.options,
nsName = self.nsName
)
def clearNode(self):
# Clear the node's interfaces.
if self.isHostNode:
for i in self.interfaces:
i.clearInterface()
# Delete the node's namespace.
if not self.isHostNode:
deleteNamespace(self.nsName)
# Delete the node's namespace /etc directory
if not self.isHostNode:
etcDirName = os.path.join('/etc/netns', self.nsName)
if os.path.isdir(etcDirName):
logAction('Deleting directory: %s' % etcDirName)
shutil.rmtree(etcDirName)
def isNodeBuilt(self):
if self.isHostNode:
return True
else:
nsList = getNamespaces()
return self.nsName in nsList
def getHostsEntries(self):
hostEntries = {}
nodeIP4Addrs = []
nodeIP6Addrs = []
for i in self.interfaces:
if i.ip4Address:
nodeIP4Addrs.append(i.ip4Address.ip)
hostEntries[self.name + '-' + i.ifName + '-ip4'] = [ i.ip4Address.ip ]
if len(i.ip6Addresses) > 0:
addrs = [ a.ip for a in i.ip6Addresses if not a.is_link_local ]
nodeIP6Addrs += addrs
hostEntries[self.name + '-' + i.ifName + '-ip6'] = preferGlobalAddresses(addrs)
# Filter address lists to prefer global addresses over non-global ones.
nodeIP4Addrs = preferGlobalAddresses(nodeIP4Addrs)
nodeIP6Addrs = preferGlobalAddresses(nodeIP6Addrs)
hostEntries[self.name] = nodeIP4Addrs + nodeIP6Addrs
for alias in self.hostAliases:
hostEntries[alias] = nodeIP4Addrs + nodeIP6Addrs
return hostEntries
def installHostsTable(self, hostsTable):
if not self.isHostNode:
etcDirName = os.path.join('/etc/netns', self.nsName)
hostsFileName = os.path.join(etcDirName, 'hosts')
logAction('Installing hosts file: %s' % hostsFileName)
with open(hostsFileName, 'w') as f:
f.write(hostsTable)
def installResolverConfig(self):
if not self.isHostNode:
etcDirName = os.path.join('/etc/netns', self.nsName)
resolvConfFileName = os.path.join(etcDirName, 'resolv.conf')
logAction('Installing resolv.conf file: %s' % resolvConfFileName)
with open(resolvConfFileName, 'w') as f:
if self.ip4DNSServers:
for a in self.ip4DNSServers:
f.write('nameserver %s\n' % a)
if self.ip6DNSServers:
for a in self.ip6DNSServers:
f.write('nameserver %s\n' % a)
def setEnvironVars(self, environ):
environ['SN_NODE_NAME'] = self.name
if not self.isHostNode:
environ['SN_NAMESPACE'] = self.nsName
environ['SN_NODE_INDEX'] = str(self.nodeIndex)
def getLwIPConfig(self):
lwipConfig = ''
for i in self.interfaces:
if isinstance(i, TapInterface):
lwipConfig += i.getLwipConfig()
if self.ip4DefaultGateway:
lwipConfig += '--ip4-default-gw %s ' % self.ip4DefaultGateway
if self.ip4DNSServers:
lwipConfig += '--dns-server %s ' % (','.join(self.ip4DNSServers))
# TODO: handle routes
return lwipConfig
def summary(self, prefix=''):
s = '%sNode "%s" (%s ' % (prefix, self.name, self.__class__.__name__,)
if self.isHostNode:
s += 'implemented by host):\n'
else:
s += 'implemented by namespace %s):\n' % self.nsName
s += '%s Node Index: %d\n' % (prefix, self.nodeIndex)
s += '%s Interfaces (%d):\n' % (prefix, len(self.interfaces))
for i in self.interfaces:
s += i.summary(prefix + ' ')
s += '%s IPv4 Routes:%s\n' % (prefix, ' None' if len(self.ip4Routes) == 0 else '')
for r in self.ip4Routes:
s += r.summary(prefix + ' ')
s += '%s IPv6 Routes:%s\n' % (prefix, ' None' if len(self.ip6Routes) == 0 else '')
for r in self.ip6Routes:
s += r.summary(prefix + ' ')
s += '%s IPv4 DNS Servers: %s\n' % (prefix, 'None' if self.ip4DNSServers == None or len(self.ip4DNSServers) == 0 else ', '.join(self.ip4DNSServers))
s += '%s IPv6 DNS Servers: %s\n' % (prefix, 'None' if self.ip6DNSServers == None or len(self.ip6DNSServers) == 0 else ', '.join(self.ip6DNSServers))
return s
class Gateway(Node):
def __init__(self, name,
outsideNetwork=None, outsideInterface='outside', outsideIP4Address=None, outsideIP6Addresses=[],
insideNetwork=None, insideIP4Subnet=None, insideInterface='inside', insideIP6Subnets=[],
isIP4DefaultGateway=True, isIP6DefaultGateway=True,
ip4DNSServers=['8.8.8.8'], ip6DNSServers=None, hostAliases=[],
useHost=False):
Node.__init__(self, name, isHostNode=useHost)
if not useHost:
if not outsideNetwork:
raise ConfigException('Must specify outside network for gateway %s' % (name))
if not insideNetwork:
raise ConfigException('Must specify inside network for gateway %s' % (name))
self.outsideInterface = self.addInterface(outsideNetwork, name=outsideInterface)
if not isinstance(self.outsideInterface, ExistingHostInterface):
if not isinstance(self.outsideInterface.network, Internet) and not isinstance(self.outsideInterface.network, WiFiNetwork):
raise ConfigException('Incompatible network %s attached to outside interface of gateway %s' % (outsideNetwork, name))
self.outsideInterface.ip4Address = toIP4InterfaceAddress(outsideIP4Address, 'Unable to assign IPv4 address to node %s, outside interface %s: ' % (name, self.outsideInterface.ifName))
self.outsideInterface.ip6Addresses = [ toIP6InterfaceAddress(a, 'Unable to assign IPv6 address to node %s, outside interface %s: ' % (name, self.outsideInterface.ifName)) for a in outsideIP6Addresses ]
self.outsideInterface.autoConfigIP4 = True
self.outsideInterface.autoConfigIP6 = (self.outsideInterface.ip6Addresses != None and len(self.outsideInterface.ip6Addresses) == 0)
else:
if outsideIP4Address != None or len(outsideIP6Addresses) != 0:
raise ConfigException('Cannot specify addresses for host interface %s on node %s' % (self.outsideInterface.ifName, name))
self.insideInterface = self.addInterface(insideNetwork, name=insideInterface)
if not isinstance(self.insideInterface, ExistingHostInterface):
if not isinstance(self.insideInterface.network, WiFiNetwork):
raise ConfigException('Incompatible network %s attached to inside interface of gateway %s' % (insideNetwork, name))
self.insideInterface.ip4Address = toIP4InterfaceAddress(insideIP4Subnet, 'Unable to assign IPv4 address to node %s, inside interface %s: ' % (name, self.insideInterface.ifName))
self.insideInterface.ip6Addresses = [ toIP6InterfaceAddress(a, 'Unable to assign IPv6 address to node %s, inside interface %s: ' % (name, self.insideInterface.ifName)) for a in insideIP6Subnets ]
self.insideInterface.autoConfigIP4 = False
self.insideInterface.autoConfigIP6 = False
else:
if insideIP4Subnet != None or len(insideIP6Subnets) != 0:
raise ConfigException('Cannot specify addresses for host interface %s on node %s' % (self.insideInterface.ifName, name))
self.isIP4DefaultGateway = isIP4DefaultGateway
self.isIP6DefaultGateway = isIP6DefaultGateway
if self.insideInterface.ip4Address != None and self.insideInterface.ip4Address.network.prefixlen < 32:
self.advertisedIP4Subnet = self.insideInterface.ip4Address.network
else:
self.advertisedIP4Subnet = None
if self.insideInterface.ip6Addresses != None:
for a in self.insideInterface.ip6Addresses:
if a.network.prefixlen < 128:
self.insideInterface.advertisedIP6Prefixes.append(a.network)
self.ip4DNSServers = ip4DNSServers
self.ip6DNSServers = ip6DNSServers
self.hostAliases = hostAliases
def _configureAddresses(self):
Node._configureAddresses(self)
if self.insideInterface.ip6Enabled and self.insideInterface.network.ip6Supported:
# Assign an address to the inside interface for all prefixes that have been delegated to the outside interface.
self.insideInterface.ip6Addresses += [
makeIP6InterfaceAddress(p, subnetNum=0, macAddr=self.insideInterface.macAddress, prefixLen=64) for p in self.outsideInterface.delegatedIP6Prefixes
]
# On the inside interface, advertise a /64 prefix for all prefixes that have been delegated to the outside interface.
self.insideInterface.advertisedIP6Prefixes += [
makeIP6Prefix(p, prefixLen=64) for p in self.outsideInterface.delegatedIP6Prefixes
]
def requestIP4AutoConfig(self, interface):
if interface.network == self.insideInterface.network and self.advertisedIP4Subnet:
return IP4AutoConfigInfo(
address = makeIP4IntefaceAddress(self.advertisedIP4Subnet, interface.node.nodeIndex, self.advertisedIP4Subnet.prefixlen),
defaultGateway = self.insideInterface.ip4Address.ip if self.isIP4DefaultGateway else None,
dnsServers = self.ip4DNSServers
)
else:
return None
def requestIP6AutoConfig(self, interface):
# If the node hasn't been configured, do | |
import unittest
import sublime
from Vintageous.tests.borrowed import mock
from Vintageous.tests.borrowed.mock import call
from Vintageous.state import VintageState
from Vintageous.test_runner import TestsState
from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL
from Vintageous.vi.constants import MODE_INSERT
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import MODE_REPLACE
from Vintageous.vi.constants import MODE_VISUAL
from Vintageous.vi.constants import MODE_VISUAL_LINE
from Vintageous.vi.constants import MODE_NORMAL_INSERT
from Vintageous.vi.constants import digraphs
from Vintageous.vi.constants import DIGRAPH_MOTION
from Vintageous.vi.constants import DIGRAPH_ACTION
from Vintageous.state import _init_vintageous
from Vintageous.vi.cmd_data import CmdData
from Vintageous.vi import utils
from Vintageous.tests.commands import set_text
from Vintageous.tests.commands import add_selection
from Vintageous.tests.commands import make_region
import Vintageous.state
class TestCaseUsingView(unittest.TestCase):
"""Base class to get a clean view for testing.
"""
def setUp(self):
TestsState.view.settings().erase('vintage')
TestsState.view.window().settings().erase('vintage')
TestsState.view.settings().erase('is_widget')
self.state = VintageState(TestsState.view)
self.state.view.set_overwrite_status(False)
def tearDown(self):
self.state.view.sel().clear()
self.state.view.sel().add(sublime.Region(0, 0))
class Test_buffer_was_changed_in_visual_mode(TestCaseUsingView):
def testAlwaysReportsChangedForNonVisualModes(self):
self.state.mode = _MODE_INTERNAL_NORMAL
self.assertTrue(self.state.buffer_was_changed_in_visual_mode())
self.state.mode = MODE_NORMAL
self.assertTrue(self.state.buffer_was_changed_in_visual_mode())
def testReportsFalseIfCommandHistoryIsEmpty(self):
self.state.mode = MODE_VISUAL
self.assertEqual(self.state.view.command_history(-1), ('', None, 0))
self.assertFalse(self.state.buffer_was_changed_in_visual_mode())
def testReturnsFalseInVisualModeIfCantFindModifyingCommand(self):
self.state.mode = MODE_VISUAL
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {'action':{'command': 'vi_enter_visual_mode', 'args': {}}}, 0),
# A command without an action is a non-modifying command.
# TODO: This asertion isn't reversible, so test that too.
('vi_run', {'action':{}}, 0),
]
cmd_hist.side_effect = reversed(items)
self.assertFalse(self.state.buffer_was_changed_in_visual_mode())
def testReturnsTrueInVisualModeIfCanlFindModifyingCommand(self):
self.state.mode = MODE_VISUAL
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {'action':{'command': 'vi_enter_visual_mode', 'args': {}}}, 0),
# A command without an action is a non-modifying command.
# TODO: This asertion isn't reversible, so test that too.
('vi_run', {'action':{'command': 'foo_bar'}}, 0),
]
cmd_hist.side_effect = reversed(items)
self.assertTrue(self.state.buffer_was_changed_in_visual_mode())
def testReturnsFalseInVisualLineModeIfCantFindModifyingCommand(self):
self.state.mode = MODE_VISUAL_LINE
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {'action':{'command': 'vi_enter_visual_line_mode', 'args': {}}}, 0),
# A command without an action is a non-modifying command.
# TODO: This asertion isn't reversible, so test that too.
('vi_run', {'action':{}}, 0),
]
cmd_hist.side_effect = reversed(items)
self.assertFalse(self.state.buffer_was_changed_in_visual_mode())
def testReturnsTrueInVisualLineModeIfCanFindModifyingCommand(self):
self.state.mode = MODE_VISUAL_LINE
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {'action':{'command': 'vi_enter_visual_line_mode', 'args': {}}}, 0),
# A command without an action is a non-modifying command.
# TODO: This asertion isn't reversible, so test that too.
('vi_run', {'action':{'command': 'foo_bar'}}, 0),
]
cmd_hist.side_effect = reversed(items)
self.assertTrue(self.state.buffer_was_changed_in_visual_mode())
@unittest.skip("Not implemented.")
def test_IgnoresNonModifyingCommandsThatAreTrueCommands(self):
# Leave this here for documentation. We need to exclude commands that do not modify the
# buffer, although are treated as other commands.
self.state.mode = MODE_VISUAL_LINE
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {'action':{'command': 'vi_enter_visual_line_mode', 'args': {}}}, 0),
('vi_run', {'action':{'command': 'vi_yy'}}, 0),
]
cmd_hist.side_effect = reversed(items)
self.assertFalse(self.state.buffer_was_changed_in_visual_mode())
def test_ReturnsFalseWhenWeExceedLookUpsLimit(self):
self.state.mode = MODE_VISUAL
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
many_items = [('xxx', {}, 0),] * 249
# Item 251th won't be consulted, so we should get False back.
many_items.insert(0, ('vi_run', {'action': {'command': 'foo_bar'}}, 0))
cmd_hist.side_effect = reversed(many_items)
self.assertFalse(self.state.buffer_was_changed_in_visual_mode())
def test_ReturnsTrueIfLastCommandInspectedIsModifyingCommand(self):
self.state.mode = MODE_VISUAL
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
many_items = [('xxx', {}, 0),] * 248
many_items.insert(0, ('vi_run', {'action': {'command': 'foo_bar'}}, 0))
cmd_hist.side_effect = reversed(many_items)
self.assertTrue(self.state.buffer_was_changed_in_visual_mode())
def test_ReturnsFalseIfNoModifyingCommandFoundAndWeHitBottomOfUndoStack(self):
# XXX: This should actually never happen in visual mode! It would mean that the opening
# vi_enter_visual_mode command was missing.
self.state.mode = MODE_VISUAL
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
many_items = [('xxx', {}, 0),] * 50
many_items.insert(0, ('', None, 0))
cmd_hist.side_effect = reversed(many_items)
self.assertFalse(self.state.buffer_was_changed_in_visual_mode())
class Test_update_repeat_command(TestCaseUsingView):
def tearDown(self):
self.state.repeat_command = ('', None, 0)
def testIsEmptyAfterInstantiation(self):
self.assertEqual(self.state.repeat_command, ('', None, 0))
def testIgnoresEmptyCommands(self):
self.state.repeat_command = ('foo', {}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('', None, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('foo', {}, 0))
def testCanUpdateIfNativeModifyingCommandFound(self):
self.state.repeat_command = ('foo', {}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('bar', {}, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('bar', {}, 0))
def testCommandStaysTheSameIfIdenticalModifyingCommandFound(self):
self.state.repeat_command = ('foo', {}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('foo', {}, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('foo', {}, 0))
def testIgnoreNonModifyingViRunCommands(self):
self.state.repeat_command = ('foo', {}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {}, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('foo', {}, 0))
def testCanUpdateIfViRunModifyingCommandFound(self):
self.state.repeat_command = ('foo', {}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {'action': 'fizz', 'args': {}}, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('vi_run', {'action': 'fizz', 'args': {}}, 0))
def testCommandStaysTheSameIfIdenticalViRunModifyingCommandFound(self):
self.state.repeat_command = ('vi_run', {'action': 'fizz', 'args': {}}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('vi_run', {'action': 'fizz', 'args': {}}, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('vi_run', {'action': 'fizz', 'args': {}}, 0))
def testCanUpdateIfSequenceCommandFound(self):
self.state.repeat_command = ('foo', {}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('sequence', {}, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('sequence', {}, 0))
def testCommandStaysTheSameIfIdenticalSequenceModifyingCommandFound(self):
self.state.repeat_command = ('sequence', {'action': 'fizz', 'args': {}}, 0)
# patch command_history
with mock.patch.object(self.state.view, 'command_history') as cmd_hist:
items = [('sequence', {'action': 'fizz', 'args': {}}, 0)]
cmd_hist.side_effect = reversed(items)
self.state.update_repeat_command()
self.assertEqual(self.state.repeat_command, ('sequence', {'action': 'fizz', 'args': {}}, 0))
class TestVintageStateProperties(TestCaseUsingView):
def testCantSetAction(self):
self.state.action = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['action'], 'foo')
def testCantGetAction(self):
self.state.action = 'foo'
self.assertEqual(self.state.action, 'foo')
def testCantSetMotion(self):
self.state.motion = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['motion'], 'foo')
def testCantGetMotion(self):
self.state.motion = 'foo'
self.assertEqual(self.state.motion, 'foo')
def testCantSetRegister(self):
self.state.register = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['register'], 'foo')
def testCantGetRegister(self):
self.state.register = 'foo'
self.assertEqual(self.state.register, 'foo')
def testCantSetUserInput(self):
self.state.user_input_parsers.append(lambda x: True)
self.state.motion = 'bogus'
self.state.user_input = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['user_motion_input'], 'foo')
def testCantGetUserInput(self):
self.state.user_input_parsers.append(lambda x: True)
self.state.motion = 'bogus'
self.state.user_input = 'foo'
self.assertEqual(self.state.settings.vi['user_motion_input'], 'foo')
def testCantSetExpectingRegister(self):
self.state.expecting_register = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['expecting_register'], 'foo')
def testCantGetExpectingRegister(self):
self.state.expecting_register = 'foo'
self.assertEqual(self.state.expecting_register, 'foo')
def testCantSetExpectingUserInput(self):
self.state.expecting_user_input = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['expecting_user_input'], 'foo')
def testCantGetExpectingUserInput(self):
self.state.expecting_user_input = 'foo'
self.assertEqual(self.state.expecting_user_input, 'foo')
def testCantSetExpectingCancelAction(self):
self.state.cancel_action = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['cancel_action'], 'foo')
def testCantGetExpectingCancelAction(self):
self.state.cancel_action = 'foo'
self.assertEqual(self.state.cancel_action, 'foo')
def testCantSetMode(self):
self.state.mode = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['mode'], 'foo')
def testCantGetMode(self):
self.state.mode = 'foo'
self.assertEqual(self.state.mode, 'foo')
def testCantSetMotionDigits(self):
self.state.motion_digits = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['motion_digits'], 'foo')
def testCantGetMotionDigits(self):
self.state.motion_digits = 'foo'
self.assertEqual(self.state.motion_digits, 'foo')
def testCantSetActionDigits(self):
self.state.action_digits = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['action_digits'], 'foo')
def testCantGetActionDigits(self):
self.state.action_digits = 'foo'
self.assertEqual(self.state.action_digits, 'foo')
def testCantSetNextMode(self):
self.state.next_mode = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['next_mode'], 'foo')
def testCantGetNextMode(self):
self.state.next_mode = 'foo'
self.assertEqual(self.state.next_mode, 'foo')
def testCanGetDefaultNextMode(self):
self.assertEqual(self.state.next_mode, MODE_NORMAL)
def testCantSetNextModeCommand(self):
self.state.next_mode_command = 'foo'
self.assertEqual(self.state.view.settings().get('vintage')['next_mode_command'], 'foo')
def testCantGetNextModeCommand(self):
self.state.next_mode_command = 'foo'
self.assertEqual(self.state.next_mode_command, 'foo')
class Test_reset(TestCaseUsingView):
def testResetsData(self):
self.state.action = 'action'
self.state.motion = 'motion'
self.state.register = 'register'
self.state.user_input_parsers.append(lambda x: True)
self.state.user_input = 'user_input'
self.state.expecting_register = 'expecting_register'
self.state.expecting_user_input = 'expecting_user_input'
self.state.cancel_action = 'cancel_action'
self.state.mode = 'mode'
self.state.next_mode = 'next_mode'
self.state.next_mode_command = 'next_mode_command'
self.state.reset()
self.assertEqual(self.state.action, None)
self.assertEqual(self.state.motion, None)
self.assertEqual(self.state.register, None)
self.assertEqual(self.state.user_input, '')
self.assertEqual(self.state.expecting_register, False)
self.assertEqual(self.state.expecting_user_input, False)
self.assertEqual(self.state.cancel_action, False)
self.assertEqual(self.state.mode, 'mode')
self.assertEqual(self.state.next_mode, MODE_NORMAL)
self.assertEqual(self.state.next_mode_command, None)
def testCallsCorrectModeChangeMethod(self):
self.state.next_mode = MODE_INSERT
self.state.next_mode_command = 'foo'
# XXX Check the code. Do we actually need to call this method at this point?
with mock.patch.object(self.state.view, 'run_command') as rc:
self.state.reset()
rc.assert_called_once_with('foo')
self.state.reset()
self.state.next_mode = MODE_NORMAL
self.state.next_mode_command = 'bar'
with mock.patch.object(self.state.view, 'run_command') as m:
self.state.reset()
m.assert_called_once_with('bar')
def testDoesNotCallAnyModeChangeCommandForOtherModes(self):
self.state.next_mode = MODE_VISUAL_LINE
self.state.next_mode_command = 'foo'
with mock.patch.object(self.state.view, 'run_command') as rc:
self.state.reset()
self.assertEqual(rc.call_count, 0)
def testDoesNotCallAnyModeChangeCommandIfNotSpecified(self):
self.state.next_mode = MODE_NORMAL
with mock.patch.object(self.state.view, 'run_command') as rc:
self.state.reset()
self.assertEqual(rc.call_count, 0)
self.state.next_mode = MODE_VISUAL
with mock.patch.object(self.state.view, 'run_command') as rc:
self.state.reset()
self.assertEqual(rc.call_count, 0)
def testUpdatesRepeatCommandIfThereWasAnAction(self):
self.state.action = 'foo'
with mock.patch.object(self.state, 'update_repeat_command') as m:
self.state.reset()
self.assertEqual(m.call_count, 1)
def testDoesNotUpdateRepeatCommandIfThereWasNoAction(self):
with mock.patch.object(self.state, 'update_repeat_command') as m:
self.state.reset()
self.assertEqual(m.call_count, 0)
class Test__init_vintageous(TestCaseUsingView):
def testAbortsIfPassedWidget(self):
self.state.action = 'foo'
self.state.view.settings().set('is_widget', True)
_init_vintageous(self.state.view)
self.assertEqual(self.state.action, 'foo')
def testAbortsIfPassedViewWithoutSettings(self):
self.state.action = 'foo'
_init_vintageous(object())
self.assertEqual(self.state.action, 'foo')
def testAbortsIfAskedToNotResetDuringInit(self):
self.state.action = 'foo'
with mock.patch.object(Vintageous.state, '_dont_reset_during_init') as x:
x.return_value = True
_init_vintageous(self.state.view)
self.assertEqual(self.state.action, 'foo')
def testResetIsCalled(self):
self.state.action = 'foo'
with mock.patch.object(VintageState, 'reset') as m:
_init_vintageous(self.state.view)
self.assertEqual(m.call_count, 1)
def testResets(self):
self.state.action = 'foo'
_init_vintageous(self.state.view)
self.assertEqual(self.state.action, None)
def testCallsEnterNormalModeCommandIfStateIsInVisualMode(self):
self.state.mode = MODE_VISUAL
with mock.patch.object(self.state.view, 'run_command') as m:
_init_vintageous(self.state.view)
m.assert_called_once_with('enter_normal_mode')
def testCallsEnterNormalModeCommandIfStateIsInVisualLineMode(self):
self.state.mode = MODE_VISUAL_LINE
with mock.patch.object(self.state.view, 'run_command') as m:
_init_vintageous(self.state.view)
m.assert_called_once_with('enter_normal_mode')
def testCallsEnterNormalModeFromInsertModeCommandIfStateIsInInsertMode(self):
self.state.mode = MODE_INSERT
with mock.patch.object(self.state.view, 'run_command') as m:
_init_vintageous(self.state.view)
m.assert_called_once_with('vi_enter_normal_mode_from_insert_mode')
def testCallsEnterNormalModeFromInsertModeCommandIfStateIsInReplaceMode(self):
self.state.mode = MODE_REPLACE
with mock.patch.object(self.state.view, 'run_command') as m:
_init_vintageous(self.state.view)
m.assert_called_once_with('vi_enter_normal_mode_from_insert_mode')
def testCallsRunNormalInsertModeActionsCommandIfStateIsInNormalInsertMode(self):
self.state.mode = MODE_NORMAL_INSERT
with mock.patch.object(self.state.view, 'run_command') as m:
_init_vintageous(self.state.view)
m.assert_called_once_with('vi_run_normal_insert_mode_actions')
# TODO: Test that enter_normal_mode() gets called when it should.
class Test_action(TestCaseUsingView):
def canSet(self):
self.state.action = 'foo'
self.assertEqual(self.state.action, 'foo')
def testTriesToFindDigraph(self):
self.state.action = 'xxx'
with mock.patch.dict(digraphs, {('xxx', 'yyy'): ('ooo', None)}):
self.state.action = 'yyy'
self.assertEqual(self.state.action, 'ooo')
def testCanFindDigraphMotion(self):
self.state.action = 'xxx'
subst = {('xxx', 'xxx'): ('ooo', DIGRAPH_MOTION)}
with mock.patch.dict(digraphs, subst):
self.state.action = 'xxx'
self.assertEqual(self.state.motion, 'ooo')
self.assertEqual(self.state.action, None)
def testCanFindDigraphAction(self):
self.state.action = 'xxx'
subst = {('xxx', 'xxx'): ('ooo', DIGRAPH_ACTION)}
with mock.patch.dict(digraphs, subst):
self.state.action = 'xxx'
self.assertEqual(self.state.motion, | |
#!/usr/bin/env python
#
# Copyright 2016 Tangentix Ltd
"""
App Engine Settings Module
==========================
Provides cached settings for use by App Engine instances in the form of key value pairs
usage:
from settings import Settings
settings=Settings() # Loads up current settings on first try
settings.thing="helloworld" # Changes or creates a new entry for keyname="thing" in the datastore
settings.thing # returns a string value named as keyname="thing" in the datastore
settings.setmaxage(1500) # sets up the default maximum age
settings.forcerefresh() # Refreshes all the settings unconditionally
Has hardcoded limit of 1000 settings, but frankly that'd be horrible to use this for!
If the object presented is a string, a float or an int it is stored as a string representation of such and enttype is set accordingly
otherwise, e.g. for more complex objects a jsonpickled version is stored (to allow human editing when settings is visited)
add a settings handle to the app.yaml like this:
- url: /settings*
script: settings.app
TO DO:
Switch to storing incoming objects as json serialized versions of themselves, so anyting that will json.dumps will be stored OK
Then recovery is returns whatever you get from json.loads
Add GUI to allow settings to be maintained more easily and to reduce the danger of errors with json fields
"""
import logging,webapp2,json
from google.appengine.ext import ndb
from datetime import datetime,timedelta
class SettingStore(ndb.Model):
"""
Used to store a single key value pair of settings
"""
keyname=ndb.StringProperty()
value=ndb.TextProperty()# Used to store the value
enttype=ndb.StringProperty()# Used to store type, if not present then string is assumed
class Settings(object):
"""
A settings object, contains all the settings
"""
def __init__(self,maxage=1000):
#logging.info("Initialising settings")
self._maxage=maxage # In seconds
self._lastloaded=None # Datetime for the last load of the settings
#self.forcerefresh() # Set up settings first time
if self._settings=={}:# We have nothing at all so set up the dummy (needed so you can use console to manage)
logging.warn("No old settings, creating a dummy record- can be deleted once real data is available")
dummy=SettingStore()
dummy.keyname="DummyKey"
dummy.value="DummyValue"
dummy.put()
def setone(self,keyname,newvalue):
"""
Directly sets a single value in both the cached and datastore locations
without doing a complete refresh
"""
qry=SettingStore.query(SettingStore.keyname==keyname)
entries=qry.fetch(10)
#logging.info("Found entries of : %s " % entries)
if len(entries)==1:
# Key already exists so needs to be changed
#logging.info("Modifying keyname: %s to value %s" % (keyname,newvalue))
entry=entries[0]
# Validate thta they newvalue matches the enttype
if type(newvalue)==int:
try:
newvalue=int(newvalue)
except ValueError:
raise Exception("Could not convert to int")
else:
storevalue=str(newvalue)
elif type(newvalue)==float:
try:
newvalue=float(newvalue)
except ValueError:
raise Exception("Could not convert to float")
else:
storevalue=str(newvalue)
elif type(newvalue)==bool:
try:
newvalue=(newvalue=="True")
except ValueError:
raise Exception("Could not cast to bool")
else:
storevalue=str(newvalue)
else:
storevalue=str(newvalue)
# Modify the existing entry
entry.value=storevalue
entry.put()
elif len(entries)==0:
#logging.info( "Creating new setting keyname: %s to newvalue: %s " % (keyname,newvalue))
# Make new key
s=SettingStore()
s.keyname=keyname
if type(newvalue)==int:
s.enttype="int"
s.value=str(newvalue)
elif type(newvalue)==float:
s.enttype="float"
s.value=str(newvalue)
elif type(newvalue)==str:
s.enttype="string"
s.value=newvalue
elif type(newvalue)==bool:
s.enttype="boolean"
s.value=str(newvalue)
else:
#Assume it is a json serealizable element
s.enttype="json"
s.value=json.dumps(newvalue)
s.put()
else:
logging.error("Strange- we seem to have %s instances of a settings called %s" % (len(entries),keyname))
self._settings[keyname]=newvalue# Set local cache of that value
def setmaxage(self,maxage):
"""
Change the existing maximum age (in seconds) for the cached settings
"""
self._maxage=maxage
def forcerefresh(self):
"""
Loads the entire set of existing keys and values from the datastore
regardless of the age or presence of the cached data
"""
#logging.info( "loading settings from datastore to local cache")
qry=SettingStore.query()
sets=qry.fetch(1000)# Return up to 1000 records
self._lastloaded=datetime.utcnow()
newsettings={}
for set in sets:
if set.enttype=="int":
val=int(set.value)
elif set.enttype=="float":
val=float(set.value)
elif set.enttype=="boolean":
val=(set.value==True)
elif set.enttype=="string":
val=set.value
else:
val=json.loads(set.value)
newsettings[set.keyname]=val
self._settings=newsettings # replace the old settings
#logging.info("Loaded new data into settings")
def refresh(self):
"""
Loads or refreshes the cache only if it is stale
"""
#logging.info( "Non-forced refresh selected")
if self._lastloaded==None:
#logging.info("No settings loaded yet, doing so now. settings._lastloaded is: %s.." % self._lastloaded)
self.forcerefresh()
elif datetime.utcnow()>(self._lastloaded+timedelta(seconds=self._maxage)):
logging.info( "Data in settings older than %s seconds, forcing refresh" % self._maxage)
self.forcerefresh()
else:
logging.info("No refresh needed as _lastloaded = %s and now it's %s" % (self._lastloaded,datetime.utcnow()))
def __getattr__(self,keyname):
"""
Attempts to return the setting with the keyname
1st: If the cache is fresh and the value exists then it is simply returned
2nd: If not then the cache is refreshed
However, if it still does not exist, then None is returned
"""
self.refresh()
res=self._settings.get(keyname,None)
if not res:
self.forcerefresh()
res=self._settings.get(keyname,None)
return res
def __setattr__(self,keyname,newvalue):
"""
Attempts to set or update a setting called keyname with the required newvalue
Works directly on the datastore, then forces a cache refresh
"""
# Deal with setting instance attributes
if keyname[0]=="_":
self.__dict__[keyname]=newvalue
#print "Set instance attribute name: %s to newvalue: %s" % (keyname,newvalue)
else:
self.setone(keyname,newvalue)
# Now refresh all the entries from the datastore into the cache
self.forcerefresh()
class ShowEntry(webapp2.RequestHandler):
"""
Allows editing a single entry at a time
"""
def get(self):
"""
Entry form
"""
keyname=self.request.get("keyname")
qry=SettingStore.query(SettingStore.keyname==keyname)
entries=qry.fetch(1)
entry=entries[0]
if entry.enttype=="int":
r=self.intform(entry)
elif entry.enttype=="float":
r=self.floatform(entry)
elif entry.enttype=="string":
r=self.stringform(entry)
elif entry.enttype=="json":
r=self.jsonform(entry)
elif entry.enttype=="boolean":
r=self.booleanform(entry)
else:
r="<h3>Unsupported type: %s</h3>" % entry
page="""<!DOCTYPE HTML>
<html>
<head>
<link href="/statics/dist/jsoneditor.css" rel="stylesheet" type="text/css">
<script src="/statics/dist/jsoneditor.js"></script>
<style type="text/css">
#jsoneditor {
width: 90%%;
height: 500px;
}
</style>
</head>
<h2>Setting: %s</h2>
<form action="/settings/modifysetting/" method="post">
%s
<br />
<input type="submit" value="submit" />
</form>
</html>
""" % (entry.keyname,r)
self.response.write(page)
def intform(self,entry):
"""Returns form innerHtml to edit the type: int"""
r="""
<input type="hidden" name="enttype" value="int" />
<input type="hidden" name="keyname" value="%s" />
<input type="text" length="50" name="value" value="%s" />
""" % (entry.keyname,entry.value)
return r
def booleanform(self,entry):
"""Returns form innerHtml to edit the type: bool"""
r="""
<input type="hidden" name="enttype" value="boolean" />
<input type="hidden" name="keyname" value="%s" />
<select name="value">
<option value="True">True</option>
<option value="False">False</option>
</select>
""" % (entry.keyname)
return r
def floatform(self,entry):
"""Returns form innerHtml to edit the type: int"""
r="""
<input type="hidden" name="enttype" value="float" />
<input type="hidden" name="keyname" value="%s" />
<input type="text" length="50" name="value" value="%s" />
""" % (entry.keyname,entry.value)
return r
def stringform(self,entry):
"""Returns form innerHtml to edit the type: int"""
r="""
<input type="hidden" name="enttype" value="string" />
<input type="hidden" name="keyname" value="%s" />
<input type="text" size="100" name="value" value="%s" />
""" % (entry.keyname,entry.value)
return r
def jsonform(self,entry):
"""Returns form innerHtml to edit the type: int"""
r="""
<input type="hidden" name="enttype" value="json" />
<input type="hidden" name="keyname" value="%s" />
<div id="jsoneditor"></div>
<textarea style="enabled:false" id="rawjson" cols="80" rows="5" name="value">%s</textarea>
<script>
// create the editor
var container = document.getElementById('jsoneditor');
var options = {onChange:changed};
var editor = new JSONEditor(container, options);
// set json
var jsonval=JSON.parse(document.getElementById("rawjson").value)
editor.set(jsonval);
editor.expandAll();
function changed(jsonnew)
{
var jsonlive=editor.get();
//console.log(jsonlive);
var jsontext=JSON.stringify(jsonlive, null, 2);
//console.log(jsontext);
document.getElementById("rawjson").value=jsontext;
}
</script>
""" % (entry.keyname,entry.value)
return r
class ModifySetting(webapp2.RequestHandler):
"""
Update a single settings entry
"""
def post(self):
enttype=self.request.get("enttype")
keyname=self.request.get("keyname")
valtext=self.request.get("value")
if enttype=="int":
v=int(valtext)
value=valtext
elif enttype=="float":
v=float(valtext)
value=valtext
elif enttype=="string":
value=valtext
elif enttype=="json":
value=valtext
elif enttype=="boolean":
assert valtext in ["False","True"],"Wierd error where somehow a non boolean value was returned from teh form"
value=valtext
else:
raise Exception("Unexpected type from form entry- %s - strange" % enttype)
qry=SettingStore.query(SettingStore.keyname==keyname)
entries=qry.fetch(1)
entry=entries[0]
entry.value=value
entry.put()
self.response.write("""<h3>
Updated value of %s</h3><p>New value is:<br />
<pre>%s</pre>
<script>
window.setTimeout(backtolist,3000);
function backtolist()
{
window.location="/settings";
}
</script>""" % (keyname,value))
class CreateNewForm(webapp2.RequestHandler):
"""
Renders form to create a new empty setting
"""
def get(self):
r="""
<form action="/settings/createnewentry/">
Keyname: <input type="text" name="keyname" /><br/>
Type: <select name="enttype">
<option value="int"/>int</option>
<option value="float">float</option>
<option value="string">string</option>
<option value="boolean">boolean</option>
<option value="json">json</option>
</select><br/>
<input type="submit" value="submit" />
</form>
"""
self.response.write(r)
class CreateNewEntry(webapp2.RequestHandler):
"""
Sets up a new empty setting
"""
def get(self):
keyname=self.request.get("keyname")
enttype=self.request.get("enttype")
newEntity=SettingStore()
newEntity.keyname=keyname
newEntity.enttype=enttype
newEntity.value='"None Yet!"'
newEntity.put()
r="""
<h3>Set up a new setting of keyname: %s and type: %s</h3>
<script>
window.setTimeout(backtolist,3000);
function backtolist()
{
window.location="/settings";
}
</script>
""" % (keyname,enttype)
self.response.write(r)
class DeleteSetting(webapp2.RequestHandler):
"""
Deletes a single setting
"""
def get(self):
keyname=self.request.get("keyname")
qry=SettingStore.query(SettingStore.keyname==keyname)
entries=qry.fetch(1)
entry=entries[0]
entry.key.delete()
r="""<h3>Entry for %s deleted</h3>
<script>
window.setTimeout(backtolist,3000);
function backtolist()
{
window.location="/settings";
}
</script>
""" % keyname
self.response.write(r)
class MainHandler(webapp2.RequestHandler):
"""
Lists and allows editing of the settings
"""
def get(self):
"""
List the settings
"""
entries=self.retreiveAllSettings()
| |
#pylint: disable=C0302
# Copyright 2014-2015 Whitewood Encryption Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''NIST SP800-90A code'''
import numbers
import struct
import logging
from Crypto.Cipher import AES
from Crypto.Hash import SHA, SHA224, SHA256, SHA384, SHA512
import WesEntropy.Engine.utilities as utilities
import WesEntropy.Engine.entropysource as entropysource
#pylint: disable=C0103
#pylint: disable=R0902
class DRBG(object):
'''DRBG class'''
#pylint: disable=R0913
def __init__(self, entropy_source,
supported_security_strengths,
nonce_required,
max_length,
max_personalization,
max_additional_input,
max_n_bits,
max_interval):
if type(self) == DRBG:
raise NotImplementedError(
'This class should not be instantiated directly.')
# Check that we can process the entropy source provided
if isinstance(entropy_source, entropysource.EntropySource):
self.entropy_source = entropy_source
else:
self.entropy_source = entropysource.new(entropy_source)
if not self.entropy_source:
raise ValueError(
'Unrecognized entropy source [%s]' % entropy_source)
self.supported_security_strengths = supported_security_strengths
self.nonce_required = nonce_required
self.max_length = max_length
self.max_personalization = max_personalization
self.max_additional_input = max_additional_input
self.max_n_bits = max_n_bits
self.max_interval = max_interval
self.counter = None
self.instantiated = False
self.security_strength = 0
self.min_length = 0
self.prediction_resistance = False
self.reseed_required = True
self.reseed_failed = False
self.max_security_strength = max(self.supported_security_strengths)
#pylint: enable=R0913
def get_name(self):
'''Get name of DRBG for inclusion in statistics'''
raise NotImplementedError()
def instantiate_drbg(self, requested_security_strength,
prediction_resistance=False, personalization=None,
reseed_rate=None):
'''Instantiate DRBG as specified'''
# Now instantiate the DRBG using this entropy source
result, explanation = self.instantiate(
requested_security_strength, prediction_resistance, personalization)
if 'SUCCESS' != result:
raise RuntimeError(
'%s: Unable to instantiate DRBG - %s.' % (result, explanation))
# Finally, adjust the reseed rate as requested
if reseed_rate is not None:
if isinstance(reseed_rate, numbers.Number):
self.max_interval = reseed_rate
elif reseed_rate == 'MINIMAL':
# No adjustment needed here - the default is minimal.
pass
elif reseed_rate == 'LINESPEED':
self.max_interval = 1
return
def __del__(self):
if self.instantiated:
self.uninstantiate()
def instantiate(self, requested_security_strength,
prediction_resistance=False, personalization=None):
'''
Instantiate a DRBG, as specified in NIST SP800-90A Section 9.1, page 27
'''
# Step 1
if requested_security_strength > self.max_security_strength:
return ('ERROR',
'security strength greater than max provided by algorithm')
# Step 2
if (prediction_resistance and
not self.entropy_source.supports_prediction_resistance):
return ('ERROR',
'entropy source does not support prediction resistance')
if prediction_resistance:
self.prediction_resistance = True
# Step 3
if personalization and len(personalization) > self.max_personalization:
return 'ERROR', 'personalization string too long'
# Step 4
for security_strength in self.supported_security_strengths:
if (requested_security_strength <=
security_strength <=
self.max_security_strength):
self.security_strength = security_strength
self.min_length = security_strength
break
# Step 5
# There is NO step 5!
# Step 6
# WARNING: There appears to be a bug in this step as written in
# the standard. The entropy goes into the Update function, which
# in some cases requires exactly seedlen bits. In this implementation,
# seedlen is not yet known, so we take min_length * 2, and truncate
# it later if needed.
status, entropy_input = self.entropy_source.get_entropy_input(
self.security_strength, self.min_length * 2,
self.max_length, prediction_resistance)
# Step 7
if status != 'SUCCESS':
return ('CATASTROPHIC_ERROR',
('received [%s] while requesting entropy_input' % status))
# Step 8
if self.nonce_required:
status, nonce = self.entropy_source.get_entropy_input(
self.security_strength, self.security_strength / 2,
self.security_strength, prediction_resistance)
if status != 'SUCCESS':
return ('CATASTROPHIC_ERROR',
('received [%s] while requesting nonce' % status))
else:
nonce = None
# Step 9
self.instantiate_algorithm(
entropy_input, nonce, personalization, self.security_strength)
# Step 10
# The state handle is the object itself.
self.instantiated = True
self.reseed_required = False
return 'SUCCESS', 'SUCCESS'
def reseed(self, prediction_resistance = None, additional_input = ''):
'''
Reseed a DRBG, as specified in NIST SP800-90A Section 9.2, page 30
'''
# Step 1
if not self.instantiated:
return 'ERROR'
# Step 2
# Prediction resistance defaults to the value set at instantiation
if prediction_resistance is None:
prediction_resistance = self.prediction_resistance
elif prediction_resistance and not self.prediction_resistance:
return 'ERROR'
# Step 3
if len(additional_input) > self.max_additional_input:
return 'ERROR'
# LINESPEED tweak - put in as many bits as we are taking out
# WARNING: This tweak is hard to implement because sometimes we
# require entropy_input and additional_input to be seedlen bits.
# Step 4
status, entropy_input = self.entropy_source.get_entropy_input(
self.security_strength, self.min_length,
self.max_length, prediction_resistance)
# Step 5
# Keep count of reseed rate, if failure, drop to provisional rate and
# retry every so often until minimal is hit, then catastrophic fail.
if status != 'SUCCESS':
logging.debug("DRBG Counter: %s", self.counter)
if self.counter >= (1L << 48):
return 'CATASTROPHIC_ERROR'
else:
return 'RESEED_FAILED'
# Step 6
self.reseed_algorithm(entropy_input, additional_input)
# Step 7
# This is taken care of by instance variables.
# Step 8
return 'SUCCESS'
#pylint: disable=R0911
#pylint: disable=R0912
def generate(self, n_bits, security_strength = None,
prediction_resistance = None, additional_input = ''):
'''
Generate DRBG output, as specified in NIST SP800-90A Section 9.3.1,
page 33
'''
# Step 1
if not self.instantiated:
return 'ERROR', 'DRBG not instantiated'
# Step 2
if n_bits > self.max_n_bits:
return ('ERROR',
('Requested %s bits, where max is %s bits.' %
(n_bits, self.max_n_bits)))
# Step 3
# Security strength defaults to the value set at instantiation
if security_strength is None:
security_strength = self.security_strength
elif security_strength > self.security_strength:
return 'ERROR', \
'Requested a higher security strength than is available.'
# Step 4
if len(additional_input) > self.max_additional_input:
return 'ERROR', 'Additional input is too long.'
# Step 5
# Prediction resistance defaults to the value set at instantiation
if prediction_resistance is None:
prediction_resistance = self.prediction_resistance
elif prediction_resistance and not self.prediction_resistance:
return 'ERROR', \
'Requested prediction resistance but it isn\'t available'
# Step 6
pseudorandom_bits = None
while not pseudorandom_bits:
# Step 7
if self.reseed_required or prediction_resistance:
# Step 7.1
status = self.reseed(prediction_resistance, additional_input)
# Step 7.2
if status != 'SUCCESS':
if status == 'RESEED_FAILED':
# Log only if it's the first failure to avoid log spam
if not self.reseed_failed:
self.reseed_failed = True
logging.warn("DRBG reseed failed, " +
"continuing with previous seed.")
else:
return status, 'Reseed failed, quitting.'
else:
self.reseed_failed = False
# Step 7.3
# This is taken care of by instance variables
# Step 7.4
additional_input = ''
# Step 7.5
self.reseed_required = False
# Step 8
(status, pseudorandom_bits) = self.generate_algorithm(
n_bits, additional_input)
# Step 9
if status == 'RESEED':
# Step 9.1
self.reseed_required = True
# Step 9.2
if self.prediction_resistance:
prediction_resistance = True
# Step 9.3
if pseudorandom_bits == '':
pseudorandom_bits = None
# Step 10
# This is taken care of by instance variables
# Step 11
return ('SUCCESS', pseudorandom_bits)
#pylint: enable=R0911
#pylint: enable=R0912
def uninstantiate(self):
'''Uninstantiate this DRBG'''
self.entropy_source.close_entropy_source()
self.uninstantiate_algorithm()
self.instantiated = False
return
def instantiate_algorithm(self, entropy_input, nonce,
personalization, security_strength):
'''Instantiate this algorithm'''
raise NotImplementedError(
'This class should not be instantiated directly.')
def reseed_algorithm(self, entropy_input, additional_input):
'''Reseed this DRBG'''
raise NotImplementedError(
'This class should not be instantiated directly.')
def generate_algorithm(self, n_bits, additional_input):
'''Generate n_bits of pseudo-random data'''
raise NotImplementedError(
'This class should not be instantiated directly.')
def uninstantiate_algorithm(self):
'''Uninstantiate this algorithm'''
raise NotImplementedError(
'This class should not be instantiated directly.')
#pylint: enable=R0902
def aes_cipher(key):
'''Return a new AES Cipher'''
return AES.new(key, mode = AES.MODE_ECB)
DRBG_CIPHERS = {
'AES' : {
'new cipher' : aes_cipher,
'keylengths' : {112: None, 128: 128, 192: 192, 256: 256},
'block size' : 128,
}
}
DRBG_HASHES = {
'SHA1' : {
'hash' : SHA,
'strengths' : [80],
'seed length' : 440,
'output length' : 160,
},
'SHA224' : {
'hash' : SHA224,
'strengths' : [80, 128, 192],
'seed length' : 440,
'output length' : 224,
},
'SHA256' : {
'hash' : SHA256,
'strengths' : [80, 128, 192, 256],
'seed length' : 440,
'output length' : 256,
},
'SHA384' : {
'hash' : SHA384,
'strengths' : [80, 128, 192, 256],
'seed length' : 888,
'output length' : 384,
},
'SHA512' : {
'hash' : SHA512,
'strengths' : [80, 128, 192, 256],
'seed length' : 888,
'output length' : 512,
},
}
#pylint: disable=R0902
class HashDRBG(DRBG):
'''
HashDRBG as specified in NIST SP800-90A, Section 10.1.1, page 39
'''
#pylint: disable=R0913
def __init__(self, hashtype, entropy_source, | |
oprot.writeI64(self.mutator)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class flush_mutator_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('flush_mutator_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_namespace_args:
"""
Attributes:
- ns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'ns', None, None, ), # 1
)
def __init__(self, ns=None,):
self.ns = ns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ns = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_namespace_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.STRING, 1)
oprot.writeString(self.ns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_namespace_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_namespace_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_table_args:
"""
Attributes:
- ns
- name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
)
def __init__(self, ns=None, name=None,):
self.ns = ns
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_table_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_table_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_table_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_table_id_args:
"""
Attributes:
- ns
- table_name
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
)
def __init__(self, ns=None, table_name=None,):
self.ns = ns
self.table_name = table_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_table_id_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_table_id_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_table_id_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for | |
import argparse
import numpy as np
import torch
import math
import pickle
import pprint
import math
import sys
SHAPES = ['circle', 'cross', 'ellipse', 'pentagon', 'rectangle', 'semicircle', 'square', 'triangle']
COLORS = ['blue', 'cyan', 'gray', 'green', 'magenta', 'red', 'yellow']
OUTOFDOMAIN = [('square', 'red'), ('triangle', 'green'), ('circle', 'blue'), ('rectangle', 'yellow'), ('cross', 'magenta'), ('ellipse', 'cyan')]
def convert_tensor_to_string(message):
'''Converts binary message stored in a pytorch tensor to a string'''
assert message.dim() == 1
m_len = message.size(0)
m_str = ""
for i in range(m_len):
m_str += "0" if message[i] == 0 else "1"
return m_str
def add_elem(m_dict, message, shape, color, answer_type):
'''General'''
m_dict['total'] += 1
m_dict[answer_type] += 1
if message not in m_dict:
m_dict[message] = {'shape': {'count': 0},
'color': {'count': 0},
'shape_color': {'count': 0},
'total': 0,
'correct': 0, 'correct_p': 0,
'incorrect': 0, 'incorrect_p': 0}
m_dict[message]['total'] += 1
m_dict[message][answer_type] += 1
'''Shapes'''
shape_dict = m_dict[message]['shape']
if shape is not None:
shape_dict['count'] += 1
if shape not in shape_dict:
shape_dict[shape] = 1
else:
shape_dict[shape] += 1
'''Colors'''
color_dict = m_dict[message]['color']
if color is not None:
color_dict['count'] += 1
if color not in color_dict:
color_dict[color] = 1
else:
color_dict[color] += 1
'''Shapes and colors'''
shape_color_dict = m_dict[message]['shape_color']
if shape is not None and color is not None:
sc = shape + '_' + color
shape_color_dict['count'] += 1
if sc not in shape_color_dict:
shape_color_dict[sc] = 1
else:
shape_color_dict[sc] += 1
return m_dict
def calc_ratios(m_dict):
for m, stats in m_dict.items():
if m == 'total' or m == 'correct' or m == 'incorrect':
pass
else:
stats['correct_p'] = stats['correct'] / stats['total']
stats['incorrect_p'] = stats['incorrect'] / stats['total']
shape_dict = stats['shape']
for s in SHAPES:
if s in shape_dict:
shape_dict[s + '_p'] = shape_dict[s] / stats['total']
color_dict = stats['color']
for c in COLORS:
if c in color_dict:
color_dict[c + '_p'] = color_dict[c] / stats['total']
shape_color_dict = stats['shape_color']
for s in SHAPES:
for c in COLORS:
sc = s + '_' + c
if sc in shape_color_dict:
shape_color_dict[sc + '_p'] = shape_color_dict[sc] / stats['total']
shape_color_dict[sc + '_pratio'] = \
shape_color_dict[sc + '_p'] / (stats['shape'][s + '_p'] * stats['color'][c + '_p'])
shape_color_dict[sc + '_log_pratio'] = math.log(shape_color_dict[sc + '_pratio'])
m_dict['correct_p'] = m_dict['correct'] / m_dict['total']
m_dict['incorrect_p'] = m_dict['incorrect'] / m_dict['total']
return m_dict
def count_pratios(m_dict):
counts = []
for m, stats in m_dict.items():
if m == 'total' or m == 'correct' or m == 'incorrect' or m == 'correct_p' or m == 'incorrect_p':
pass
else:
pratios = []
shape_color_dict = stats['shape_color']
for s in SHAPES:
for c in COLORS:
sc = s + '_' + c
if sc in shape_color_dict:
pratios.append((shape_color_dict[sc], shape_color_dict[sc + '_log_pratio']))
if len(pratios) > 0:
counts.append((m, stats['total'], pratios, len(pratios)))
counts = sorted(counts, key=lambda x: x[3])
return counts
def get_pratio_stats(counts):
mean_log_pratio = {'total': {'count': 0, 'log_pratio': 0},
'1': {'count': 0, 'log_pratio': 0},
'<=2': {'count': 0, 'log_pratio': 0},
'<=3': {'count': 0, 'log_pratio': 0},
'<=4': {'count': 0, 'log_pratio': 0},
'5+': {'count': 0, 'log_pratio': 0}}
for _, elem in enumerate(counts):
for prt in elem[2]:
if elem[3] == 1:
mean_log_pratio['1']['log_pratio'] += prt[0] * prt[1]
mean_log_pratio['1']['count'] += prt[0]
if elem[3] <= 2:
mean_log_pratio['<=2']['log_pratio'] += prt[0] * prt[1]
mean_log_pratio['<=2']['count'] += prt[0]
if elem[3] <= 3:
mean_log_pratio['<=3']['log_pratio'] += prt[0] * prt[1]
mean_log_pratio['<=3']['count'] += prt[0]
if elem[3] <= 4:
mean_log_pratio['<=4']['log_pratio'] += prt[0] * prt[1]
mean_log_pratio['<=4']['count'] += prt[0]
if elem[3] >= 5:
mean_log_pratio['5+']['log_pratio'] += prt[0] * prt[1]
mean_log_pratio['5+']['count'] += prt[0]
mean_log_pratio['total']['log_pratio'] += prt[0] * prt[1]
mean_log_pratio['total']['count'] += prt[0]
'''Normalize'''
mean_log_pratio['total']['log_pratio'] /= mean_log_pratio['total']['count']
mean_log_pratio['1']['log_pratio'] /= mean_log_pratio['1']['count']
mean_log_pratio['<=2']['log_pratio'] /= mean_log_pratio['<=2']['count']
mean_log_pratio['<=3']['log_pratio'] /= mean_log_pratio['<=3']['count']
mean_log_pratio['<=4']['log_pratio'] /= mean_log_pratio['<=4']['count']
mean_log_pratio['5+']['log_pratio'] /= mean_log_pratio['5+']['count']
mean_log_pratio['total']['pratio'] = math.exp(mean_log_pratio['total']['log_pratio'])
mean_log_pratio['1']['pratio'] = math.exp(mean_log_pratio['1']['log_pratio'])
mean_log_pratio['<=2']['pratio'] = math.exp(mean_log_pratio['<=2']['log_pratio'])
mean_log_pratio['<=3']['pratio'] = math.exp(mean_log_pratio['<=3']['log_pratio'])
mean_log_pratio['<=4']['pratio'] = math.exp(mean_log_pratio['<=4']['log_pratio'])
mean_log_pratio['5+']['pratio'] = math.exp(mean_log_pratio['5+']['log_pratio'])
pprint.pprint(mean_log_pratio)
return mean_log_pratio
def build_message_dict(data, agents="both"):
m_dict = {'total': 0, 'correct': 0, 'incorrect': 0}
for _, d in enumerate(data):
if agents == "both":
for msg, m_prob in zip(d["msg_1"], d["probs_1"]):
answer_type = 'correct' if d["correct"] else 'incorrect'
m_dict = add_elem(m_dict, convert_tensor_to_string(msg), d["shape"], d["color"], answer_type)
for msg, m_prob in zip(d["msg_2"], d["probs_2"]):
answer_type = 'correct' if d["correct"] else 'incorrect'
m_dict = add_elem(m_dict, convert_tensor_to_string(msg), d["shape"], d["color"], answer_type)
elif agents == "one":
for msg, m_prob in zip(d["msg_1"], d["probs_1"]):
answer_type = 'correct' if d["correct"] else 'incorrect'
m_dict = add_elem(m_dict, convert_tensor_to_string(msg), d["shape"], d["color"], answer_type)
elif agents == "two":
for msg, m_prob in zip(d["msg_2"], d["probs_2"]):
answer_type = 'correct' if d["correct"] else 'incorrect'
m_dict = add_elem(m_dict, convert_tensor_to_string(msg), d["shape"], d["color"], answer_type)
else:
print("Error, please select 'both', 'one' or 'two' agents")
break
m_dict = calc_ratios(m_dict)
print(f'Total messages: {m_dict["total"]}')
exclude = ['total', 'correct', 'correct_p', 'incorrect', 'incorrect_p']
list_dict = [(key, val) for key, val in m_dict.items() if key not in exclude]
list_dict = sorted(list_dict, key=lambda x: x[1]['total'], reverse=True)
return m_dict, list_dict
def count_blanks(data, blank_m1, blank_m2):
counts = {'01': {'correct': 0, 'incorrect': 0},
'10': {'correct': 0, 'incorrect': 0},
'11': {'correct': 0, 'incorrect': 0},
'00': {'correct': 0, 'incorrect': 0}}
for _, d in enumerate(data):
for msg_1, msg_2 in zip(d["msg_1"], d["msg_2"]):
str_m1 = convert_tensor_to_string(msg_1)
str_m2 = convert_tensor_to_string(msg_2)
if str_m1 == blank_m1 and str_m2 == blank_m2:
if d['correct']:
counts['11']['correct'] += 1
else:
counts['11']['incorrect'] += 1
elif str_m1 == blank_m1 and str_m2 != blank_m2:
if d['correct']:
counts['10']['correct'] += 1
else:
counts['10']['incorrect'] += 1
elif str_m1 != blank_m1 and str_m2 == blank_m2:
if d['correct']:
counts['01']['correct'] += 1
else:
counts['01']['incorrect'] += 1
elif str_m1 != blank_m1 and str_m2 != blank_m2:
if d['correct']:
counts['00']['correct'] += 1
else:
counts['00']['incorrect'] += 1
total = counts['11']['correct'] + counts['11']['incorrect'] + counts['00']['correct'] + counts['00']['incorrect'] + counts['01']['correct'] + counts['01']['incorrect'] + counts['10']['correct'] + counts['10']['incorrect']
counts['11']['p'] = (counts['11']['correct'] + counts['11']['incorrect']) / total
counts['00']['p'] = (counts['00']['correct'] + counts['00']['incorrect']) / total
counts['10']['p'] = (counts['10']['correct'] + counts['10']['incorrect']) / total
counts['01']['p'] = (counts['01']['correct'] + counts['01']['incorrect']) / total
pprint.pprint(counts)
return counts
def calc_entropy_ratio(data, agents, answer_type):
entropies = []
messages = []
message_probs = []
convert = torch.log(torch.zeros(1).fill_(2))
nans = 0
for _, d in enumerate(data):
include = None
if answer_type == "both":
include = True
elif answer_type == "correct":
include = d["correct"]
else:
include = not d["correct"]
if include:
if agents == "both":
for msg, m_prob in zip(d["msg_1"], d["probs_1"]):
messages.append(msg.numpy())
message_probs.append(m_prob.numpy())
H = - torch.mul(m_prob, torch.log(m_prob) / convert).sum() - torch.mul(1 - m_prob, torch.log(1 - m_prob) / convert).sum()
if np.isnan(H):
# print("ARRRGHHH NAN!")
nans += 1
else:
entropies.append(H)
for msg, m_prob in zip(d["msg_2"], d["probs_2"]):
messages.append(msg.numpy())
message_probs.append(m_prob.numpy())
H = - torch.mul(m_prob, torch.log(m_prob) / convert).sum() - torch.mul(1 - m_prob, torch.log(1 - m_prob) / convert).sum()
if np.isnan(H):
# print("ARRRGHHH NAN!")
nans += 1
else:
entropies.append(H)
elif agents == "one":
for msg, m_prob in zip(d["msg_1"], d["probs_1"]):
messages.append(msg.numpy())
message_probs.append(m_prob.numpy())
H = - torch.mul(m_prob, torch.log(m_prob) / convert).sum() - torch.mul(1 - m_prob, torch.log(1 - m_prob) / convert).sum()
if np.isnan(H):
# print("ARRRGHHH NAN!")
nans += 1
else:
entropies.append(H)
elif agents == "two":
for msg, m_prob in zip(d["msg_2"], d["probs_2"]):
messages.append(msg.numpy())
message_probs.append(m_prob.numpy())
H = - torch.mul(m_prob, torch.log(m_prob) / convert).sum() - torch.mul(1 - m_prob, torch.log(1 - m_prob) / convert).sum()
if np.isnan(H):
# print("ARRRGHHH NAN!")
nans += 1
else:
entropies.append(H)
print(f'Number of messages skipped due to nan: {nans}')
# print(f'Entropies: {entropies[:10]}')
mean_e = sum(entropies) / len(entropies)
messages = np.stack(messages)
message_probs = np.stack(message_probs)
mean_m = torch.from_numpy(np.mean(messages, axis=0)).float()
mean_m_prob = torch.from_numpy(np.mean(message_probs, axis=0)).float()
# Convert to base 2
ent_mean_m = - torch.mul(mean_m, torch.log(mean_m) / convert).sum() - torch.mul(1 - mean_m, torch.log(1 - mean_m) / convert).sum()
ent_mean_m_prob = - torch.mul(mean_m_prob, torch.log(mean_m_prob) / convert).sum() - torch.mul(1 - mean_m_prob, torch.log(1 - mean_m_prob) / convert).sum()
# print(f'E(m|x) = {mean_m}')
print(f'E[H(m|x)] = {mean_e}, H[E(m|x)] = {ent_mean_m}/{ent_mean_m_prob}')
print(f'Ratio: {mean_e / ent_mean_m}/{mean_e / ent_mean_m_prob}')
def add_shape_color_elem(d, m_dict, shape=None, color=None, blanks=[]):
for msg_1, msg_2 in zip(d["msg_1"], d["msg_2"]):
str_m1 = convert_tensor_to_string(msg_1)
str_m2 = convert_tensor_to_string(msg_2)
inc_1 = False if str_m1 in blanks else True
inc_2 = False if str_m2 in blanks else True
if shape is not None and color is not None:
m_dict[shape + '_' + color]['count'] += 1
if inc_1:
m_dict[shape + '_' + color]['1']['all'].append(msg_1.numpy())
if str_m1 in m_dict[shape + '_' + color]['1']:
m_dict[shape + '_' + color]['1'][str_m1] += 1
else:
m_dict[shape + '_' + color]['1'][str_m1] = 1
if inc_2:
m_dict[shape + '_' + color]['2']['all'].append(msg_2.numpy())
if str_m2 in m_dict[shape + '_' + color]['2']:
m_dict[shape + '_' + color]['2'][str_m2] += 1
else:
m_dict[shape + '_' + color]['2'][str_m2] = 1
if shape is not None:
m_dict[shape]['count'] += 1
if inc_1:
m_dict[shape]['1']['all'].append(msg_1.numpy())
if | |
### tensorflow==2.3.0
### https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html
### https://google.github.io/mediapipe/solutions/pose
### https://www.tensorflow.org/api_docs/python/tf/keras/Model
### https://www.tensorflow.org/lite/guide/ops_compatibility
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Concatenate
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer
### https://github.com/google/mediapipe/issues/245
### https://github.com/mvoelk/keras_layers
### How to initialize a convolution layer with an arbitrary kernel in Keras? https://stackoverrun.com/ja/q/12269118
### saved_model_cli show --dir saved_model/ --tag_set serve --signature_def serving_default
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer
from tensorflow.keras.initializers import Constant
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import nn_ops
import numpy as np
import sys
import cv2
# tmp = np.load('weights/depthwise_conv2d_Kernel')
# print(tmp.shape)
# print(tmp)
# def init_f(shape, dtype=None):
# ker = np.load('weights/depthwise_conv2d_Kernel')
# print(shape)
# return ker
# sys.exit(0)
# class MaxPoolingWithArgmax2D(Layer):
# def __init__(self, pool_size=(2, 2), strides=(2, 2), padding='same', **kwargs):
# super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
# self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
# self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
# self.padding = conv_utils.normalize_padding(padding)
# def call(self, inputs, **kwargs):
# ksize = [1, self.pool_size[0], self.pool_size[1], 1]
# strides = [1, self.strides[0], self.strides[1], 1]
# padding = self.padding.upper()
# output, argmax = nn_ops.max_pool_with_argmax(inputs, ksize, strides, padding)
# # output, argmax = tf.raw_ops.MaxPoolWithArgmax(inputs, ksize, strides, padding)
# argmax = tf.cast(argmax, K.floatx())
# return [output, argmax]
# def compute_output_shape(self, input_shape):
# ratio = (1, 2, 2, 1)
# output_shape = [dim // ratio[idx] if dim is not None else None for idx, dim in enumerate(input_shape)]
# output_shape = tuple(output_shape)
# return [output_shape, output_shape]
# def compute_mask(self, inputs, mask=None):
# return 2 * [None]
# def get_config(self):
# config = super(MaxPoolingWithArgmax2D, self).get_config()
# config.update({
# 'pool_size': self.pool_size,
# 'strides': self.strides,
# 'padding': self.padding,
# })
# return config
def max_pooling_with_argmax2d(input):
net_main = tf.nn.max_pool(input,
ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME')
input_shape = input.get_shape().as_list()
mask_shape = [input_shape[0], input_shape [1]//2,input_shape[2]//2, input_shape[3]]
pooling_indices = tf.zeros(mask_shape, dtype=tf.int64)
for n in range(mask_shape[0]):
for i in range(mask_shape[1]):
for j in range(mask_shape[2]):
in_indices = [ [n, w, h] for w in range(i*2, i*2+2) for h in range(j*2, j*2+2)]
slice = tf.gather_nd(input, in_indices)
argmax = tf.argmax(slice, axis=0)
indices_location = [[n, i, j, d] for d in range(input_shape[3])]
sparse_indices = tf.SparseTensor(indices=indices_location, values=argmax, dense_shape=mask_shape)
pooling_indices = tf.compat.v1.sparse_add(pooling_indices, sparse_indices)
return [net_main, pooling_indices]
class MaxUnpooling2D(Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
mask = tf.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
# calculation new shape
if output_shape is None:
output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype='int32')
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype='int32')
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
output_shape = [mask_shape[0], mask_shape[1] * self.size[0], mask_shape[2] * self.size[1], mask_shape[3]]
return tuple(output_shape)
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({
'size': self.size,
})
return config
height = 512
width = 512
inputs = Input(shape=(height, width, 4), batch_size=1, name='input')
# Block_01
conv1_1 = Conv2D(filters=8, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_Bias')))(inputs)
prelu1_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_Alpha')), shared_axes=[1, 2])(conv1_1)
conv1_2 = Conv2D(filters=32, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_1_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_1_Bias')))(prelu1_1)
prelu1_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_1_Alpha')), shared_axes=[1, 2])(conv1_2)
# Block_02
conv2_1 = Conv2D(filters=16, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_2_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_2_Bias')))(prelu1_2)
prelu2_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_2_Alpha')), shared_axes=[1, 2])(conv2_1)
depthconv2_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_Bias')))(prelu2_1)
conv2_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_3_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_3_Bias')))(depthconv2_1)
prelu2_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_3_Alpha')), shared_axes=[1, 2])(conv2_2)
depthconv2_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_1_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_1_Bias')))(prelu2_2)
prelu2_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_4_Alpha')), shared_axes=[1, 2])(depthconv2_2)
conv2_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_4_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_4_Bias')))(prelu2_3)
maxpoolarg2_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg2_1 = max_pooling_with_argmax2d(prelu1_2)
conv2_4 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_5_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_5_Bias')))(maxpoolarg2_1[0])
add2_1 = Add()([conv2_3, conv2_4])
prelu2_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_5_Alpha')), shared_axes=[1, 2])(add2_1)
# Block_03
conv3_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_6_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_6_Bias')))(prelu2_4)
prelu3_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_6_Alpha')), shared_axes=[1, 2])(conv3_1)
depthconv3_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_2_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_2_Bias')))(prelu3_1)
conv3_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_7_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_7_Bias')))(depthconv3_1)
prelu3_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_7_Alpha')), shared_axes=[1, 2])(conv3_2)
depthconv3_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_3_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_3_Bias')))(prelu3_2)
prelu3_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_8_Alpha')), shared_axes=[1, 2])(depthconv3_2)
conv3_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_8_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_8_Bias')))(prelu3_3)
add3_1 = Add()([conv3_3, prelu2_4])
prelu3_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_9_Alpha')), shared_axes=[1, 2])(add3_1)
# Block_04
conv4_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_9_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_9_Bias')))(prelu3_4)
prelu4_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_10_Alpha')), shared_axes=[1, 2])(conv4_1)
depthconv4_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_4_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_4_Bias')))(prelu4_1)
conv4_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_10_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_10_Bias')))(depthconv4_1)
prelu4_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_11_Alpha')), shared_axes=[1, 2])(conv4_2)
depthconv4_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_5_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_5_Bias')))(prelu4_2)
prelu4_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_12_Alpha')), shared_axes=[1, 2])(depthconv4_2)
conv4_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_11_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_11_Bias')))(prelu4_3)
add4_1 = Add()([conv4_3, prelu3_4])
prelu4_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_13_Alpha')), shared_axes=[1, 2])(add4_1)
# Block_05
conv5_1 = Conv2D(filters=32, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_12_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_12_Bias')))(prelu4_4)
prelu5_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_14_Alpha')), shared_axes=[1, 2])(conv5_1)
depthconv5_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_6_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_6_Bias')))(prelu5_1)
conv5_2 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_13_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_13_Bias')))(depthconv5_1)
prelu5_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_15_Alpha')), shared_axes=[1, 2])(conv5_2)
depthconv5_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_7_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_7_Bias')))(prelu5_2)
prelu5_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_16_Alpha')), shared_axes=[1, 2])(depthconv5_2)
conv5_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_14_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_14_Bias')))(prelu5_3)
maxpoolarg5_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu4_4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg5_1 = max_pooling_with_argmax2d(prelu4_4)
conv5_4 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_15_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_15_Bias')))(maxpoolarg5_1[0])
add5_1 = Add()([conv5_3, conv5_4])
prelu5_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_17_Alpha')), shared_axes=[1, 2])(add5_1)
# Block_06
conv6_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_16_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_16_Bias')))(prelu5_4)
prelu6_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_18_Alpha')), shared_axes=[1, 2])(conv6_1)
depthconv6_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_8_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_8_Bias')))(prelu6_1)
conv6_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_17_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_17_Bias')))(depthconv6_1)
prelu6_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_19_Alpha')), shared_axes=[1, 2])(conv6_2)
depthconv6_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_9_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_9_Bias')))(prelu6_2)
prelu6_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_20_Alpha')), shared_axes=[1, 2])(depthconv6_2)
conv6_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_18_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_18_Bias')))(prelu6_3)
add6_1 = Add()([conv6_3, prelu5_4])
prelu6_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_21_Alpha')), shared_axes=[1, 2])(add6_1)
# Block_07
conv7_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_19_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_19_Bias')))(prelu6_4)
prelu7_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_22_Alpha')), shared_axes=[1, 2])(conv7_1)
depthconv7_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_10_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_10_Bias')))(prelu7_1)
conv7_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_20_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_20_Bias')))(depthconv7_1)
prelu7_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_23_Alpha')), shared_axes=[1, 2])(conv7_2)
depthconv7_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_11_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_11_Bias')))(prelu7_2)
prelu7_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_24_Alpha')), shared_axes=[1, 2])(depthconv7_2)
conv7_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_21_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_21_Bias')))(prelu7_3)
add7_1 = Add()([conv7_3, prelu6_4])
prelu7_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_25_Alpha')), shared_axes=[1, 2])(add7_1)
# Block_08
conv8_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_22_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_22_Bias')))(prelu7_4)
prelu8_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_26_Alpha')), shared_axes=[1, 2])(conv8_1)
depthconv8_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_12_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_12_Bias')))(prelu8_1)
conv8_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_23_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_23_Bias')))(depthconv8_1)
prelu8_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_27_Alpha')), shared_axes=[1, 2])(conv8_2)
depthconv8_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_13_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_13_Bias')))(prelu8_2)
prelu8_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_28_Alpha')), shared_axes=[1, 2])(depthconv8_2)
conv8_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_24_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_24_Bias')))(prelu8_3)
add8_1 = Add()([conv8_3, prelu7_4])
prelu8_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_29_Alpha')), shared_axes=[1, 2])(add8_1)
# Block_09
conv9_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_25_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_25_Bias')))(prelu8_4)
prelu9_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_30_Alpha')), shared_axes=[1, 2])(conv9_1)
depthconv9_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_14_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_14_Bias')))(prelu9_1)
conv9_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_26_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_26_Bias')))(depthconv9_1)
prelu9_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_31_Alpha')), shared_axes=[1, 2])(conv9_2)
depthconv9_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_15_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_15_Bias')))(prelu9_2)
prelu9_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_32_Alpha')), shared_axes=[1, 2])(depthconv9_2)
conv9_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_27_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_27_Bias')))(prelu9_3)
add9_1 = Add()([conv9_3, prelu8_4])
prelu9_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_33_Alpha')), shared_axes=[1, 2])(add9_1)
# Block_10
conv10_1 = Conv2D(filters=16, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_28_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_28_Bias')))(prelu9_4)
prelu10_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_34_Alpha')), shared_axes=[1, 2])(conv10_1)
depthconv10_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_16_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_16_Bias')))(prelu10_1)
conv10_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_29_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_29_Bias')))(depthconv10_1)
prelu10_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_35_Alpha')), shared_axes=[1, 2])(conv10_2)
depthconv10_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_17_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_17_Bias')))(prelu10_2)
prelu10_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_36_Alpha')), shared_axes=[1, 2])(depthconv10_2)
conv10_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_30_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_30_Bias')))(prelu10_3)
maxpoolarg10_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu9_4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg10_1 = max_pooling_with_argmax2d(prelu9_4)
add10_1 = Add()([conv10_3, maxpoolarg10_1[0]])
prelu10_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_37_Alpha')), shared_axes=[1, 2])(add10_1)
# Block_11
conv11_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_31_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_31_Bias')))(prelu10_4)
prelu11_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_38_Alpha')), shared_axes=[1, 2])(conv11_1)
depthconv11_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_18_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_18_Bias')))(prelu11_1)
conv11_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
| |
#################################################################
# #
# SPECTRUM ANALYST #
# version: 1.0 - Feb - 2020 #
# @author: <NAME> <EMAIL> #
#################################################################
import sys,os
import numpy as np
import math
from math import factorial
import matplotlib.pyplot as plt
import Elements
import xfit
"""
1st; calculate background with peakstrip()
2nd; calibrate the energy axis
3rd; verify configuration parameters - configdict and lookup variables -
4th; run getpeakarea()
"""
global __FANO__
global __NOISE__
__NOISE__, __FANO__ = 80, 0.114
def updatefano(x,y):
global __FANO__, __NOISE__
__FANO__, __NOISE__ = x, y
return
def function(ydata,x):
""" Returns x index value of ydata array """
return ydata[x]
def dif2(ydata,x,gain):
""" Complementary function to getdif2 """
value = (function(ydata, x + 2*gain) - 2*function(ydata, x + gain)\
+ function(ydata, x)) / (gain * gain)
return value
def getdif2(ydata,xdata,gain):
""" Returns the second differential of ydata """
xinterval = np.pad(xdata,2,'edge')
yinterval = np.pad(ydata,2,'edge')
dif2curve = np.zeros([len(yinterval)])
for x in range(len(xinterval)-2):
difvalue = dif2(yinterval,x,1)
dif2curve[x] = difvalue
dif2curve = dif2curve[1:-3]
return dif2curve
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
""" This function was taken from https://gist.github.com/krvajal
and compared to scipy.signal package, an affidable and renown package
for signal analysis. """
""" References
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688 """
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def strip(an_array,cycles,width):
"""
Strips the peaks contained in the input array following an
iteractive peak clipping method (van Espen, Chapter 4 in Van Grieken, 2002)
INPUT:
an_array; np.array
cycles; int
width; int
OUTPUT:
an_array; np.array
"""
##################################################################
# W IS THE WIDTH OF THE FILTER. THE WINDOW WILL BE (1*W)+1 #
# W VALUE MUST BE LARGER THAN 2 AND ODD, SINCE 3 IS THE MINIMUM #
# SATISFACTORY POLYNOMIAL DEGREE TO SMOOTHEN THE DATA #
##################################################################
size = an_array.shape[0]
for k in range(cycles):
if k >= cycles-8:
width = int(width/np.sqrt(2))
for l in range(0, size):
if l-width < 0: low = 0
else: low = l-width
if l+width >= size: high = size-1
else: high = l+width
m = (an_array[low] + an_array[high])/2
if an_array[l] > m: an_array[l] = m
return an_array
def peakstrip(an_array,cycles,width,*args):
"""
Calculates the continuum/background contribution of the input spectrum following
the SNIPBG method described in van Espen, Chapter 4 in Van Grieken, 2002.
INPUT:
an_array; np.array
cycles; int
width; int
args; list (cycles, width, sav_gol order and sav_gol window can be fully
given as a list instead)
OUTPUT:
snip_bg; np.array
"""
TEST = False
#initialize snip_bg array
snip_bg = np.zeros(an_array.shape[0])
#square root the data
sqr_data = an_array**0.5
#apply savgol filter to the square root data
if len(args) > 0:
savgol_window,order = args[0],args[1]
try:
#smooth_sqr = scipy.signal.savgol_filter(sqr_data,savgol_window,order)
smooth_sqr = savitzky_golay(sqr_data,savgol_window,order)
except:
raise ValueError
else:
smooth_sqr = savitzky_golay(sqr_data,width,3)
for i in range(smooth_sqr.shape[0]):
if smooth_sqr[i] < 0: smooth_sqr[i] = 0
#strip peaks
snip_bg = strip(smooth_sqr,cycles,width)
#transform back
snip_bg **= 2
return snip_bg
def sigma(energy):
global __NOISE__, __FANO__
return math.sqrt(((__NOISE__/2.3548)**2)+(3.85*__FANO__*energy))
def shift_center(xarray,yarray):
""" Returns the highest value and its index in yarray and its corresponding value
in xarray """
ymax = yarray.max()
y_list = yarray.tolist()
idx = y_list.index(ymax)
return xarray[idx],ymax,idx
def setROI(lookup,xarray,yarray,localconfig):
"""
INPUT:
lookup; eV energy (int)
xarray; np.array
yarray; np.array
localconfig; dict
OUTPUT:
low_idx; int
high_idx; int
peak_center; int
isapeak; bool
- indexes corresponding to 2*FWHM of a gaussian centered
at eV energy position (int, int, int, bool)
"""
lookup = int(lookup)
peak_corr = 0
isapeak = True
if localconfig.get('bgstrip') == "SNIPBG":
yarray = savitzky_golay(yarray,5,3)
for peak_corr in range(2):
FWHM = 2.3548 * sigma(lookup)
lowx = (lookup - (FWHM))/1000
highx = (lookup + (FWHM))/1000
idx = 0
while xarray[idx] <= lowx:
idx+=1
lowx_idx = idx-3
while xarray[idx] <= highx:
idx+=1
highx_idx = idx+3
ROIaxis = xarray[lowx_idx:highx_idx]
ROIdata = yarray[lowx_idx:highx_idx]
shift = shift_center(ROIaxis,ROIdata)
if 1.10*(-FWHM/2) < (shift[0]*1000)-lookup < 1.10*(FWHM/2):
lookup = shift[0]*1000
peak_corr = 0
else:
lookupcenter = int(len(ROIaxis)/2)
shift = (0,0,lookupcenter)
isapeak = False
lowx_idx = lowx_idx + 2
highx_idx = highx_idx - 3
peak_center = shift[2]-3
return lowx_idx,highx_idx,peak_center,isapeak
def getpeakarea(lookup,data,energyaxis,continuum,localconfig,RAW,usedif2,dif2):
"""
Calculates the netpeak area of a given lookup energy.
INPUT:
lookup; int (theoretical peak center eV energy)
data; np.array (counts)
energyaxis; np.array (calibrated energy axis KeV)
continuum; np.array (background counts)
localconfig; dict (condiguration)
RAW; np.array (counts, same as data)
usedif2; bool
dif2; np.array (second differential of counts)
OUTPUT:
area; float
idx; list (containing setROI function outputs)
"""
Area = 0
idx = setROI(lookup,energyaxis,data,localconfig)
isapeak = idx[3]
xdata = energyaxis[idx[0]:idx[1]]
ydata = data[idx[0]:idx[1]]
original_data = RAW[idx[0]:idx[1]]
ROIbg = continuum[idx[0]:idx[1]]
if usedif2 == True:
dif2 = getdif2(data,energyaxis,1)
for i in range(len(dif2)):
if dif2[i] < -1: dif2[i] = dif2[i]
elif dif2[i] > -1: dif2[i] = 0
######################################
# SIGNAL TO NOISE PEAK TEST CRITERIA #
######################################
# After <NAME>. & <NAME>. 2004
if isapeak == True:
if original_data.sum() - ROIbg.sum() < 3*math.sqrt(abs(ROIbg.sum())): isapeak = False
##########################
# 2ND DIFFERENTIAL CHECK #
##########################
if usedif2 == True and isapeak == True:
sliced_dif2 = dif2[idx[0]:idx[1]]
# checks second differential and calculates the net area - background
if sliced_dif2[idx[2]] < 0 or sliced_dif2[idx[2]+1] < 0 or sliced_dif2[idx[2]-1] < 0:
if ROIbg.sum() < ydata.sum(): Area += ydata.sum() - ROIbg.sum()
##########################
return Area,idx
def getdata(mca):
""" Extract the data contained in spectrum files
INPUT:
mca; path
OUTPUT:
Data; 1D-array """
name = str(mca)
name = name.split("\\")[-1]
name = name.replace('_',' ')
# custom MC generated files
if 'test' in name or 'obj' in name or 'newtest' in name:
Data = []
datafile = open(mca)
lines = datafile.readlines()
for line in lines:
line = line.split()
try: counts = float(line[1])
except: counts = float(line[0])
counts = counts * 10e3
Data.append(counts)
Data = np.asarray(Data)
# this works for mca extension files
else:
ObjectData=[]
datafile = open(mca)
line = datafile.readline()
line = line.replace("\r","")
line = line.replace("\n","")
# AMPTEK files start with this tag
if "<<PMCA SPECTRUM>>" in line:
while "<<DATA>>" not in line:
line = datafile.readline()
if line == "": break
line = datafile.readline()
while "<<END>>" not in line:
try: ObjectData.append(int(line))
except ValueError as exception:
datafile.close()
raise exception.__class__.__name__
line = datafile.readline()
if line == "": break
# Works if file is just counts per line
elif line.isdigit():
while "<<END>>" not in line:
ObjectData.append(int(line))
line = datafile.readline()
if line == "": break
# if file has two columns separated by space or tab
elif "\t" in line or " " in line:
while "<<END>>" not in line:
counts = line.split("\t")[-1]
if counts.isdigit(): ObjectData.append(int(counts))
else:
counts = line.split(" ")[-1]
ObjectData.append(int(counts))
line = datafile.readline()
if line == "": break
Data = np.asarray(ObjectData)
datafile.close()
return Data
def linregress(x, y, sigmay=None, full_output=False):
# DISCLAIMER #
# function extracted from PyMca5.PyMcaMath.fitting.RateLaw script
"""
Linear fit to a straight line following <NAME>:
"Data Reduction and Error Analysis for the Physical Sciences"
Parameters
----------
x, y : array_like
two sets of measurements. Both arrays should have the same length.
sigmay : The uncertainty on the y values
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
r_value : float
correlation coefficient
if full_output is true, | |
translation will be saved
if not bad_word_translation_already_has_been_added:
instance_bad_word_translation_src = { "type": type, "question_content": question_with_word_usage, "word_to_translate": word_to_translate, "word_translation": word_translation }
bad_words_translation_list_from_json_file.append(instance_bad_word_translation_src)
# Save new added translated word in .json file with words transation
JsonFilesOperations.save_changes_in_json_file(file_content_json, JsonFilesOperations.path_with_bad_words_translations_file)
else: # when incorrect_translations.json file is empty then to file will be adding new translated word
# Create .json file with translations JSON format Schema
bad_translated_words_file_json_content = { "incorrect_list" : [ {
"type": type,
"question_content": question_with_word_usage,
"word_to_translate": word_to_translate,
"word_translation": word_translation
}] }
# Save bad word translation in json file (incorrect_translations.json)
JsonFilesOperations.save_changes_in_json_file(bad_translated_words_file_json_content, JsonFilesOperations.path_with_bad_words_translations_file)
file_with_bad_translations_only_to_read.close()
# Function checks if added translated word isn't in bad words translation file
## Behaviour: Function returns False when word translation is correct or True when word translation is incorrect
def word_translation_is_bad(question_with_word_usage, word_to_translate, word_translation):
file_with_bad_words_translations = open(JsonFilesOperations.path_with_bad_words_translations_file, "r")
file_with_bad_words_translation_content = file_with_bad_words_translations.read()
if len(file_with_bad_words_translation_content) > 0:
# Answer for question "If word translation is bad?"
local_word_translation_is_bad: bool = False
# Data from json file with bad translations ("incorrect_translations.json")
file_with_bad_words_translation_content_json = json.loads(file_with_bad_words_translation_content)
bad_translations_list = file_with_bad_words_translation_content_json["incorrect_list"]
# Iterate over all translations and check if this translation is bad
for bad_translation in bad_translations_list:
local_word_usage_question: str = bad_translation["question_content"].strip()
local_word_to_translate: str = bad_translation["word_to_translate"].strip()
local_word_translation: str = bad_translation["word_translation"].strip()
# When this word translation is bad because it is in the bad translations list
if local_word_usage_question == question_with_word_usage and local_word_to_translate == word_to_translate and local_word_translation == word_translation:
local_word_translation_is_bad = True
break
# Return check result
return local_word_translation_is_bad
else:
return False
# Function which will save words which coudn't be translated because it is a Synonim or other blue error
def save_word_which_coudnt_be_translated(question_with_word_usage: str, word_to_translate: str):
file_with_words_which_coudnt_be_translated = open(JsonFilesOperations.path_with_words_which_coudnt_be_translated, "r")
file_with_words_which_coudnt_be_translated_content = file_with_words_which_coudnt_be_translated.read()
if len(file_with_words_which_coudnt_be_translated_content) > 0:
# Deserialize .json file content to JSON object syntax
file_content_json = json.loads(file_with_words_which_coudnt_be_translated_content)
# Get .json words which coudn't be translated list for add new word which coudn't be translated to array
words_which_coudnt_be_translated_list_from_json_file: list[dict[str, str]] = file_content_json["coudn't_translated_list"]
# Add new word which coudn't be translated to file with bad words translations
## Check when this word which coudn't be translated hasn't been already added
word_which_coudnt_be_translated_already_has_been_added: bool = False
for sing_word in words_which_coudnt_be_translated_list_from_json_file:
local_question_content = sing_word["question_content"]
local_word_to_translate = sing_word["word_to_translate"]
if local_question_content == question_with_word_usage and local_word_to_translate == word_to_translate:
word_which_coudnt_be_translated_already_has_been_added = True
break
### When the same bad word translation hasn't be found in .json file content with bad word translations then this bad word translation will be saved
if not word_which_coudnt_be_translated_already_has_been_added:
instance_bad_word_translation_src = { "question_content": question_with_word_usage.strip(), "word_to_translate": word_to_translate.strip() }
words_which_coudnt_be_translated_list_from_json_file.append(instance_bad_word_translation_src)
# Save new added translated word in .json file with words transation
JsonFilesOperations.save_changes_in_json_file(file_content_json, JsonFilesOperations.path_with_words_which_coudnt_be_translated)
else:
word_which_coudnt_be_translated_file_json_content = {
"coudn't_translated_list": [
{
"question_content": question_with_word_usage.strip(),
"word_to_translate": word_to_translate.strip()
}
]
}
JsonFilesOperations.save_changes_in_json_file(word_which_coudnt_be_translated_file_json_content, JsonFilesOperations.path_with_words_which_coudnt_be_translated)
file_with_words_which_coudnt_be_translated.close()
# Function which delete word which coudn't be translated from this words list placed in JSON file
def delete_word_which_coudnt_be_translated(question_with_word_usage: str, word_to_translate: str):
file = open(JsonFilesOperations.path_with_words_which_coudnt_be_translated)
file_content = file.read()
# Deserialize content from JSON format
file_content_json = json.loads(file_content)
# Get list with all words from json
list_with_words: list[dict[str, str]] = file_content_json["coudn't_translated_list"]
# Iterate over all element from list and remove word from it
word_matched: bool = False
for num in range(0, len(list_with_words)):
# Get word instance
word_instance = list_with_words[num]
# Get keys from instance
local_word_question = word_instance["question_content"]
local_word_to_translate = word_instance["word_to_translate"]
# When word_instance is just as word which should be deleted
if local_word_question == question_with_word_usage and local_word_to_translate == word_to_translate:
word_matched = True
list_with_words.remove(word_instance)
break
if word_matched:
print("\n\nHere:\n"+str(list_with_words))
JsonFilesOperations.save_changes_in_json_file(file_content_json, JsonFilesOperations.path_with_words_which_coudnt_be_translated)
# Imitate user in putting letters inside <textarea></textarea>'s and <input>
def put_keys_as_a_user(string: str, in_element):
for letter in string:
in_element.send_keys(letter)
sleep_time = random.uniform(0.180, 0.700) # genereate random float number
time.sleep(sleep_time)
# Function translate word from parameter to english (default) by use google translator and return translated word from her body
def translate_word_by_use_google_tr(word: str):
translator_machine = googletrans.Translator()
translator_word_lang_detect = translator_machine.detect(word).lang
translated_word = translator_machine.translate(word, dest="en", src=translator_word_lang_detect) # get the translated word text
return translated_word
def start_new_session(browser):
start_session_button = browser.find_element(By.XPATH, '//*[@id="student_panel"]/p[1]/a')
if start_session_button.is_displayed(): # when start session button is displayed
start_session_button.click() # button "Zacznij codzienną sesję/dokończ sesję"
if browser.current_url.__contains__("https://instaling.pl/ling2/html_app/app.php"):
# Environment configuration variables
timeout_between_answers_for_questions: float = 1.5 # delay between answers for questions // in seconds
timeout_between_pass_word_and_check_it: float = 0.3 # delay between pass translated word and send it for check // in seconds
# Loop which allow program to do all questions from single session
iteration_count: int = 0
while True:
iteration_count += 1
# Handle finish page after when the bot pass answers for all questions
finish_page = browser.find_element(By.ID, "finish_page")
if finish_page.is_displayed(): # when sessions ends
# Go to main instaling.pl user panel after end the session
go_to_main_instaling_page = browser.find_element(By.XPATH, "/html/body/div/div[12]/div[2]/h4")
go_to_main_instaling_page.click()
# Stop session loop and so stop answering for session questions by program
print("Session ends!!!")
break
# Start/Continue started session (only one page can be displayed in one time)
start_session_page = browser.find_element(By.ID, "start_session_page")
start_repeat_page = browser.find_element(By.ID, "start_repeat_page")
continue_session_page = browser.find_element(By.ID, "continue_session_page")
# This is do only when program starts session
if iteration_count == 1:
button: WebElement
if start_session_page.is_displayed():
button = start_session_page.find_element(By.CLASS_NAME, "big_button")
elif start_repeat_page.is_displayed():
button = start_repeat_page.find_element(By.CLASS_NAME, "big_button")
elif continue_session_page.is_displayed():
button = continue_session_page.find_element(By.CLASS_NAME, "big_button")
button.click()
#### Session words!!!!
time.sleep(0.5) # wait 0.5 sec before starts doing asnwer action
learning_page = browser.find_element(By.ID, "learning_page")
# Handle question about "If you know this word __? If you know these word next word will be harder -> so i think you shoudn't know this word when you are bot heheheh :)"
learning_page_question_if_you_know_new_word: WebElement = learning_page.find_element(By.XPATH, "//div[@id=\"new_word_form\"]")
if learning_page_question_if_you_know_new_word.is_displayed():
# Step 1 click "No"
dont_know_button: WebElement = learning_page_question_if_you_know_new_word.find_element(By.XPATH, "//div[@id=\"dont_know_new\"]")
dont_know_button.click()
# Step 2 skip advice about
possible_word_page = browser.find_element(By.ID, "possible_word_page")
if possible_word_page.is_displayed():
# Click on "Skip" button
skip_button: WebElement = possible_word_page.find_element(By.ID, "skip")
skip_button.click()
continue # go to next loop iteration
## Get Data of Normal Question
# Section with question and word/words to translate
learning_page_question_usage_example_text: str = learning_page.find_element(By.XPATH, "//div[@id=\"question\"]/div[@class=\"usage_example\"]").text # question text
learning_page_question_caption_translations_text: str = learning_page.find_element(By.XPATH, "//div[@id=\"question\"]/div[@class=\"caption\"]/div[@class=\"translations\"]").text # word which should be translated
# Section with input for answer and submit button
learning_page_learning_form_check_input: WebElement = learning_page.find_element(By.XPATH, "//div[@class=\"learning_form\"]/table//input[@id=\"answer\"]") # Input Element for the answer
learning_page_learning_form_check_button: WebElement = learning_page.find_element(By.XPATH, "//div[@class=\"learning_form\"]/div[@id=\"check\"]") # Button to submit translation
# Function which gets translation for added word to tranlsate or words list when first word to translate is bad translation for question word
def translate_this_word(word_to_translate: Any):
""" Params:
word_to_translate - this can be str type with single word to translate when in question with to translate are only one word or list with words when word can have many translations
"""
return_word_translation: Tuple[str, str] # in tuple should be: 0 - word_to_translate, 1 - word_translation
communication_word_translate: str # this is word to translate which is use only in "communication alert" when transaltion for word coundn't be getted
if isinstance(word_to_translate, list): # When to translate has been added list with words, with simlar meaning
return_word_translation = ("", "") # Set default value for tuple when this values won't be set from loop
communication_word_translate = word_to_translate[0] # Set default translated word
# Iterate over all words from list
for single_word_to_translate in word_to_translate:
local_translation = JsonFilesOperations.get_word_translation_from_file(learning_page_question_usage_example_text, single_word_to_translate) ## Translate getted word -> in the first stage this translation has been set from file with translations then from google translator
## Get translation word translation from Google Translator when word translation coudn't be found in JSON file
if len(local_translation) == 0 or local_translation == None:
local_translation = translate_word_by_use_google_tr(single_word_to_translate).text
### Check if word translation is good or go to next iteration -> check action has been doed in "incorrect_translatrions.json" file
if not JsonFilesOperations.word_translation_is_bad(question_with_word_usage=learning_page_question_usage_example_text, word_to_translate=single_word_to_translate, word_translation=local_translation):
return_word_translation = (single_word_to_translate, local_translation) ## Set Returned variable correct value
break ## Stops loop
elif isinstance(word_to_translate, str): # When to translate has been added single world
return_word_translation = ("", "") # Set default value for tuple when this values won't be set from loop
communication_word_translate = word_to_translate # Set default translated word
local_translation = JsonFilesOperations.get_word_translation_from_file(learning_page_question_usage_example_text, word_to_translate) ## Translate getted word -> in the first stage this translation has been set from file with translations then from google translator
## Get | |
<reponame>birdie0111/Intershiplz<filename>backend/pyWebscrap.py
from bs4 import BeautifulSoup # retourne un parse-tree
# pour sélectionné par xpath
from lxml import etree # retourne un parse-tree pour
# sélectionné par xpath
import requests # envoyer request HTTP
import datetime # obtenir la date d'aujourd'hui
import os, stat # changer le mod des fichiers
import chardet # obtenir la valeur de encoding
import sys # changer l'encodage du stdout
import re # selectionner par regex
import io # changer l'encodage du stdout
#------------------------------------------------------------------------------------------définir dossiers text_files & fichier_html
# /home/IdL/2021/liuqinyu/public_html
roadPare = "/home/IdL/2021/tangyuhe/public_html" # obtenir le chemin du dossier
pathFile = roadPare+"/text_files" # définir le chemin des dossier .txt
if not os.path.exists(pathFile): # Si le dossier n'existe pas encore, créer un
os.makedirs(pathFile, 0o777)
pathWindow = roadPare+"/fichier_html" # définir le chemin des dossier .html
if not os.path.exists(pathWindow):
os.makedirs(pathWindow, 0o777)
# le mode est maintenant 755, il faut le changer en vrai 777 :
os.chmod(pathFile,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
os.chmod(pathWindow,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
#------------------------------------------------------------------------------------------"Offres d'emploi et de stage en TAL"
sys.stdout = io.TextIOWrapper(buffer=sys.stdout.buffer,encoding='utf8')
url = "http://w3.erss.univ-tlse2.fr/membre/tanguy/offres.html#Stages"
days = 20
def get_posts(url):
'''
Retourner les urls des stages dans le site
Parameters:
url (str): url du site de stages
Returns:
real_urls (list str): une liste de urls des stages
'''
posts = requests.get(url)
posts.encoding = "utf-8"
regex = 'href="(.*)"'
all_urls = re.findall(regex, posts.text)
real_urls = []
for u in all_urls:
if "offres/S" in u:
real_urls.append(u)
real_urls = real_urls[:days]
return real_urls
def get_content(real_urls, url):
'''
Parcourir les informations des stages, et ecrire dans fichiers .txt et .html
dans le serveur
Parameters:
url (str): url du site de stages
real_urls (list str): une liste de urls des stages
Returns:
None
'''
half_url = "http://w3.erss.univ-tlse2.fr/membre/tanguy/"
if(real_urls == []):
print("no urls\n")
else:
posts = requests.get(url)
posts.encoding = "utf-8"
selector = etree.HTML(posts.text)
dates = []
institutes = []
places = []
titles = []
for i in range(2,days+2):
dateOrigin = selector.xpath("//tr[" + str(i) + "]/td[1]/text()")[2] # dates
list_date = dateOrigin.split("/")
date = list_date[2] + "-" + list_date[1] + "-" + list_date[0] # format y-m-d
institute = selector.xpath("//tr[" + str(i) + "]/td[2]/text()")[2] # labos or firms
place = selector.xpath("//tr[" + str(i) + "]/td[3]/text()")[2] # places
title = selector.xpath("//tr[" + str(i) + "]/td[4]/a/text()")[2] # titles?
dates.append(date)
institutes.append(institute)
places.append(place)
titles.append(title)
for i in range(len(real_urls)):
c_post = requests.get(half_url + real_urls[i])
# traiter encodage :
encod = chardet.detect(c_post.content)['encoding'] # important, obtenir la valeur de encoding de detection
if encod == "utf-8":
c_post.encoding = encod
else :
c_post.encoding = "windows-1252"
c_corri = c_post.text
# supprimer \n inapproprié :
# (?<!(>|\n|\.|[A-Z]|[0-9]))\n(?!(<|\s|•|[1-9]\.|— |– |- |[A-Z]))
regex = r"(?<!(>|\n|\.|[A-Z]|[0-9]))\n(?!(<|\s|•|[1-9]\.|— |– |- |[A-Z]))"
c_corri_final = re.sub(regex," ",c_corri)
filename = pathFile+"/Stage" + str(i) + ".txt"
fileWindow = pathWindow+"/Stage" + str(i) + ".html"
with open(filename, "w", encoding = "UTF-8") as fd:
os.chmod(filename,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
fd.write("Titre: " + titles[i] + "\n")
fd.write("Date: " + dates[i] + "\n")
fd.write("Organisme: " + institutes[i] + "\n")
fd.write("Lieu: " + places[i] + "\n\n\n")
fd.write(c_corri_final)
with open(fileWindow, "w", encoding = "UTF-8") as fd:
os.chmod(fileWindow,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
fd.write("""<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="UTF-8">
<title> Internshiplz - Y<NAME> - Qinyue LIU - M1 IDL </title>
<link rel="stylesheet" type="text/css" media="all" href="../FichierHtml_style.css" />
</head>
<body>""")
fd.write("<p>Titre: " + titles[i] + "</p>")
fd.write("<p>Date: " + dates[i] + "</p>")
fd.write("<p>Organisme: " + institutes[i] + "</p>")
fd.write("<p>Lieu: " + places[i] + "</p><br/>")
fd.write("<p>")
for cara in c_corri_final:
if cara != "\n":
fd.write(cara)
else :
fd.write("</p><p>")
fd.write("</p>")
fd.write("""
</body>
</html>
""")
real_urls = get_posts(url)
get_content(real_urls, url)
#------------------------------------------------------------------------------------------"Linkedin"
url_lin = "https://www.linkedin.com/jobs/search?keywords=Nlp&location=France&locationId=&geoId=105015875&f_TPR=&f_JT=I"
def get_posts_linkedin(url_lin):
'''
Parcourir les informations des stages, et ecrire dans fichiers .txt et .html
dans le serveur
Parameters:
url_lin (str): url du site de linked_in
Returns:
None
'''
posts = requests.get(url_lin)
posts.encoding = "utf-8"
selector = etree.HTML(posts.text)
titles = selector.xpath('//*[@class="base-search-card__title"]/text()')
locations = selector.xpath('//*[@class="job-search-card__location"]/text()')
companies = selector.xpath('//*[@class="hidden-nested-link"]/text()')
regex = 'href="(.*)" data-tracking-control-name="public_jobs_jserp-result_search-card"'
all_urls = re.findall(regex, posts.text)
regex = 'datetime="(.*)">'
dates = re.findall(regex, posts.text)
for i in range(3):
#sftp://<EMAIL>/home/IdL/2021/liuqinyu/public_html/new/fichier_html
filename = pathFile+"/Linkedin" + str(i) + ".txt"
fileWindow = pathWindow+"/Linkedin" + str(i) + ".html"
title = titles[i].strip(" \n")
location = locations[i].strip(" \n")
company = companies[i].strip(" \n")
list_date = dates[i].split("-")
# format d/m/y ------ date = list_date[2] + "/" + list_date[1] + "/" + list_date[0]
# format y-m-d :
date = list_date[0] + "-" + list_date[1] + "-" + list_date[2]
# print(titles[i])
with open(filename,'w',encoding="UTF-8") as fd:
os.chmod(filename,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
fd.write("Titre: " + title + "\n")
fd.write("Date: " + date + "\n")
fd.write("Organisme: " + company + "\n")
fd.write("Lieu: " + location + "\n\n\n")
fd.write(all_urls[i] + "\n")
with open(fileWindow,'w',encoding="Windows-1252") as fd:
os.chmod(fileWindow,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
fd.write("""<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="Windows-1252">
<title> Internshiplz - Yuhe TANG - Qinyue LIU - M1 IDL </title>
<link rel="stylesheet" type="text/css" media="all" href="../FichierHtml_style.css" />
</head>
<body>""")
fd.write("<p>Titre: " + title + "</p>")
fd.write("<p>Date: " + date + "</p>")
fd.write("<p>Organisme: " + company + "</p>")
fd.write("<p>Lieu: " + location + "</p><br/>")
fd.write("<p>"+all_urls[i] + "</p>")
fd.write("""
</body>
</html>
""")
get_posts_linkedin(url_lin)
#---------------------------------------------------------------------------------------------------"Indeed"
urlIndeed = "https://fr.indeed.com/emplois?q=traitement+automatique+des+langues&jt=internship"
# obtenir les codes html de la page Indeed
server = "https://fr.indeed.com"
target = urlIndeed
req = requests.get(url = target)
html = req.text
# obtenir les urls des pages de stages
urls = []
divJobcard = BeautifulSoup(html,'lxml')
jobcard = divJobcard.find('div', id = 'mosaic-provider-jobcards')
divA = BeautifulSoup(str(jobcard),'lxml')
# a = divA.find_all('a', rel="nofollow",target="_blank")
a = divA.find_all('a', class_="jcs-JobTitle" , target="_blank")
for hrefStage in a :
href = server + hrefStage.get('href')
urls.append(href)
def getUrl(urlhtml):
'''
Obtenir le code source en html du site
Parameters:
urlhtml (str): url du site de stages
Returns:
html (any): code source du site
'''
response = requests.get(url=urlhtml)
wb_data = response.text
html = etree.HTML(wb_data)
return html
def getTitle(urlStage) :
'''
Obtenir le titre de stage
Parameters:
urlStage (str): url du site de stages
Returns:
title[0] (str): le titre
'''
htmlStage = getUrl(urlStage)
title = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[2]/div[1]/div[1]/h1/text()')
# //*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[3]/div[1]/div[1]/h1
# //*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[2]/div[1]/div[1]/h1
if len(title) == 0 :
title = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[3]/div[1]/div[1]/h1/text()')
return title[0] # avoir une liste avec un seul titre, pour obtenir le titre seulement, utiliser indice = 0
def getInst(urlStage):
'''
Obtenir l'institut de stage
Parameters:
urlStage (str): url du site de stages
Returns:
institue[0] (str): l'institue
'''
htmlStage = getUrl(urlStage)
institue = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[2]/div[1]/div[2]/div/div/div/div[1]/div[2]/div//text()')
if len(institue) == 0 :
institue = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[3]/div[1]/div[2]/div/div/div/div[1]/div[2]/div//text()')
# //*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[3]/div[1]/div[2]/div/div/div/div[1]/div[2]/div/a
return institue[0]
def getPlace(urlStage):
'''
Obtenir le lieu de stage
Parameters:
urlStage (str): url du site de stages
Returns:
place[0] (str): le lieu
'''
htmlStage = getUrl(urlStage)
place = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[2]/div[1]/div[2]/div/div/div/div[2]/div/text()')
if len(place) == 0 :
place = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[3]/div[1]/div[2]/div/div/div/div[2]/div/text()')
return place[0]
def getDate(urlStage):
'''
Obtenir la date de stage
Parameters:
urlStage (str): url du site de stages
Returns:
Date (str): la date
'''
htmlStage = getUrl(urlStage)
dateChaine = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[6]/div[2]//text()')
if len(dateChaine) == 0 :
dateChaine = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[7]/div[2]//text()')
for chaine in dateChaine :
if "il y a" in chaine :
entre = str(chaine)
numDate = int(entre[7:9])
today = datetime.date.today()
Date = today - datetime.timedelta(days=numDate)
Date = Date.strftime("%Y-%m-%d")
return Date
if "instant" in chaine or "Aujourd'hui" in chaine :
Date = datetime.date.today()
Date = Date.strftime("%Y-%m-%d")
return Date
def getContent(urlStage):
'''
Parcourir les informations des stages, et ecrire dans fichiers .txt et .html
dans le serveur
Parameters:
urlStage (str): url du site de stages
Returns:
None
'''
htmlStage = getUrl(urlStage)
head = htmlStage.xpath('//*[@id="viewJobSSRRoot"]/div[1]/div/div[3]/div/div/div[1]/div[1]/div[2]//text()')
body = htmlStage.xpath('//*[@id="jobDescriptionText"]//text()')
content = head + body
return content # obtenir une liste des chaînes
# pour chaque site du stage, obtenir les informations et les écrire dans un fichier .txt
for href in urls :
# obtenir les informations
# pour test : print(str(href))
title = getTitle(href)
date = getDate(href)
inst = getInst(href)
place = getPlace(href)
content = getContent(href) # une liste mais pas str
# et les écrire dans un fichier .txt
i = urls.index(href)
filename = pathFile+"/Indeed" + str(i) + ".txt"
txt = open(filename, "w+", encoding = "UTF-8")
os.chmod(filename,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
txt.write("Titre: " + title + "\n")
txt.write("Date: " + date + "\n")
txt.write("Organisme: " + inst + "\n")
txt.write("Lieu: " + place + "\n")
txt.write("\n\n")
for line in content : # écrire ligne par ligne
txt.write(line)
txt.close()
fileWindow = pathWindow+"/Indeed" + str(i) + ".html"
html = open(fileWindow, "w+", encoding = "UTF-8")
os.chmod(fileWindow,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
html.write("""<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="UTF-8">
<title> Internshiplz - Yuhe TANG - Qinyue LIU - M1 IDL </title>
<link rel="stylesheet" type="text/css" media="all" href="../FichierHtml_style.css" />
</head>
<body>""")
html.write("<p>Titre: " + title + "</p>")
html.write("<p>Date: " + date + "</p>")
html.write("<p>Organisme: " + inst + "</p>")
html.write("<p>Lieu: " + place + "</p><br/>")
for line in content | |
<reponame>hlerebours/lambda-calculus<filename>lambdax/test/test_lambda_calculus.py
""" For now, all tests related to `lambdax`. It'll be split later. """
from collections import OrderedDict
from functools import partial
import random
from pytest import raises
import lambdax
from lambdax import λ, X, x1, x2, x3, x4, x5, is_λ, comp, chaining, and_, or_, if_
from lambdax.test import assert_value
def test_still_functional_builtins():
assert_value(abs(-42), 42)
assert_value(pow(2, 3), 8)
def test_provided_magic_variables():
magic_variables = [(name, value)
for name, value in vars(lambdax).items()
if name[0].lower() == 'x' and (len(name) == 1 or name[-1].isdigit())]
assert len(magic_variables) == 20
assert all(value._λ_index + 1 == int(name[1:] or 1) # pylint: disable=protected-access
for name, value in magic_variables)
def test_identity():
identity = X
assert is_λ(identity)
assert_value(identity(int), int)
assert_value(identity(42), 42)
def test_attribute():
# delay the read of an attribute: here, get_my_attr ~= lambda x: x.my_attr
class Foo(object):
test = 42
def __init__(self):
self.test = 51
get_my_attr = X.test
assert is_λ(get_my_attr)
assert_value(get_my_attr(Foo()), 51)
assert_value(get_my_attr(Foo), 42)
def test_property():
# delay the read of an property: here, get_imaginary_part ~= lambda x: x.imag
get_imaginary_part = X.imag
assert is_λ(get_imaginary_part)
assert_value(get_imaginary_part(complex(42, 51)), 51)
def test_instantiation():
# delay the instantiation of a class: here, instantiate ~= lambda x: x()
instantiate = X()
assert is_λ(instantiate)
assert_value(instantiate(int), 0)
assert_value(instantiate(str), "")
def test_method_no_arg():
# delay the call to the method __neg__ with no argument
neg = -X
assert is_λ(neg)
assert_value(list(map(neg, range(4))), [0, -1, -2, -3])
def test_method_with_constant():
# delay the application of a method taking a constant;
# here, join ~= lambda x: x.join(['O', 'o'])
join = X.join(λ(['O', 'o'])) # λ(..) prevents from β-reducing too soon
assert is_λ(join)
assert_value(join('_'), 'O_o')
def test_method_with_variable():
# delay the application of a method taking a variable;
# here, join ~= lambda x, y: x.join(y)
join = x1.join(x2)
assert is_λ(join)
assert_value(join('_', ['O', 'o']), 'O_o')
def test_method_with_variable2():
# the same as before but with variables taken in another order;
# here, join ~= lambda x, y: y.join(x)
join = x2.join(x1)
assert is_λ(join)
assert_value(join(['O', 'o'], '_'), 'O_o')
def test_asymmetric_method():
# delay the application of a method; here, half ~= lambda x: x // 2
# note that 2.__floordiv__ exists; this test checks that we don't call it by mistake:
# half(21) != 2.__floordiv__(21) (== 2 // 21 == 0)
half = X // 2
assert is_λ(half)
assert_value(half(21), 10)
def test_multiply():
# delay the application of a usual infix operator
my_lambda = X * 4
assert is_λ(my_lambda)
assert_value(my_lambda(3), 12)
def test_power():
# delay the application of another usual infix operator
my_lambda = X ** 3
assert is_λ(my_lambda)
assert_value(my_lambda(2), 8)
def test_getitem():
my_lambda = X["abc"]
assert is_λ(my_lambda)
assert_value(my_lambda({"abc": "Foo"}), "Foo")
def test_getslice():
# it's just a particular case of `getitem`, really (with a "slice" as argument)
my_lambda = X[1:3]
assert_value(my_lambda("abcdefg"), "bc")
def test_abs():
my_lambda = abs(X)
assert is_λ(my_lambda)
assert_value(my_lambda(-12), 12)
assert_value(my_lambda(13), 13)
def test_bit_and():
my_lambda = X & 2
assert is_λ(my_lambda)
assert_value(my_lambda(0), 0)
assert_value(my_lambda(1), 0)
assert_value(my_lambda(2), 2)
assert_value(my_lambda(3), 2)
assert_value(my_lambda(4), 0)
assert_value(my_lambda(6), 2)
def test_bit_shift():
my_lambda = X << 2
assert is_λ(my_lambda)
assert_value(my_lambda(0), 0)
assert_value(my_lambda(1), 4)
assert_value(my_lambda(3), 12)
def test_bit_flip():
my_lambda = ~X
assert is_λ(my_lambda)
assert_value(my_lambda(0), -1)
assert_value(my_lambda(1), -2)
assert_value(my_lambda(3), -4)
def test_eq():
my_lambda = X + 3 == 7
assert is_λ(my_lambda)
assert my_lambda(4) is True
assert my_lambda(-4) is False
assert my_lambda(True) is False
def test_lt():
my_lambda = X < 42
assert is_λ(my_lambda)
assert my_lambda(-10) is True
assert my_lambda(42) is False
assert my_lambda(51) is False
def test_ge():
my_lambda = X >= 10
assert is_λ(my_lambda)
assert my_lambda(12) is True
assert my_lambda(10) is True
assert my_lambda(-50) is False
def test_bool():
my_lambda = λ(bool)(X)
assert is_λ(my_lambda)
# actually calls `__bool__`
assert my_lambda(0) is False
assert my_lambda(4) is True
# actually calls `__len__`
assert my_lambda("") is False
assert my_lambda("string") is True
assert my_lambda([]) is False
assert my_lambda([0]) is True
def test_getattr():
get_imag = λ(getattr)(X, 'imag')
assert is_λ(get_imag)
assert_value(get_imag(complex(14, 42)), 42)
with raises(AttributeError):
get_imag('foo')
get_attr = λ(getattr)(42, X)
assert is_λ(get_attr)
assert_value(get_attr('__str__')(), '42')
with raises(AttributeError):
get_attr('foo')
my_getattr = λ(getattr)(x1, x2)
assert is_λ(my_getattr)
assert_value(my_getattr(14, '__str__')(), '14')
with raises(AttributeError):
my_getattr(14, 'foo')
my_getattr_with_default = λ(getattr)(x1, x2, x3)
assert is_λ(my_getattr_with_default)
assert_value(my_getattr_with_default(15, '__str__', int)(), '15')
assert_value(my_getattr_with_default(16, 'foo', int)(), 0)
assert_value(my_getattr_with_default(17, 'foo', 1.4), 1.4)
def test_logic_and():
my_lambda = and_(X, 4)
assert is_λ(my_lambda)
assert_value(my_lambda(0), 0)
assert_value(my_lambda(1), 4)
assert_value(my_lambda(-3), 4)
assert_value(my_lambda(""), "")
assert_value(my_lambda("abc"), 4)
assert_value(my_lambda([]), [])
assert_value(my_lambda([0]), 4)
def test_logic_or():
my_lambda = or_(X, 4)
assert is_λ(my_lambda)
assert_value(my_lambda(0), 4)
assert_value(my_lambda(1), 1)
assert_value(my_lambda(-3), -3)
assert_value(my_lambda(""), 4)
assert_value(my_lambda("abc"), "abc")
assert_value(my_lambda([]), 4)
assert_value(my_lambda([0]), [0])
def test_logic_laziness():
to_fill = []
empty = []
lazy_or = or_(X, lambdax.iadd(to_fill, ['empty']))
lazy_and = and_(X, lambdax.iadd(to_fill, X))
assert is_λ(lazy_or) and is_λ(lazy_and)
assert_value(lazy_or(4), 4)
assert lazy_and(empty) is empty
assert_value(to_fill, [])
assert lazy_and([2, 3]) is to_fill
assert lazy_or(empty) is to_fill
assert_value(to_fill, [2, 3, 'empty'])
def test_ternary_logic():
_bach = if_(X % 2 == 0, X / 2, X * 3 + 1)
assert_value(_bach(2), 1)
assert_value(_bach(42), 21)
assert_value(_bach(21), 64)
def test_ternary_laziness():
to_fill = []
# test laziness
append_bool = if_(x1, x2.append(λ(1)), x2.append(λ(0)))
assert_value(to_fill, [])
append_bool(False, to_fill)
assert_value(to_fill, [0])
append_bool(True, to_fill)
assert_value(to_fill, [0, 1])
def test_hard_use_case():
my_lambda = λ(len)(X["abc"][5:])
assert is_λ(my_lambda)
assert_value(my_lambda({"abc": "24-character-long-string"}), 24 - 5)
def test_setattr_forbidden():
my_var = X
with raises(AttributeError):
my_var.my_attr = 42
my_const = λ(3)
with raises(AttributeError):
my_const.toto = 42
my_expr = my_var * my_const
with raises(AttributeError):
my_expr.toto = 42
assert_value(my_expr(4), 12) # it still works :)
def test_two_variables():
my_lambda = x1 + x2
assert is_λ(my_lambda)
assert_value(my_lambda(1, 5), 6)
def test_two_variables_harder():
my_lambda = (x1 + 4) * x2 + 7
assert is_λ(my_lambda)
assert_value(my_lambda(3, 5), 42)
def test_with_named_param():
class Class:
@staticmethod
def meth(arg, named=42):
return arg, named
my_lambda = x1.meth(x2)
assert is_λ(my_lambda)
assert_value(my_lambda(Class(), 1), (1, 42))
my_lambda = x1.meth(arg=x2)
assert is_λ(my_lambda)
assert_value(my_lambda(Class(), 2), (2, 42))
my_lambda = x1.meth(3, x2)
assert is_λ(my_lambda)
assert_value(my_lambda(Class(), 51), (3, 51))
my_lambda = x1.meth(4, named=x2)
assert is_λ(my_lambda)
assert_value(my_lambda(Class(), 52), (4, 52))
def test_many_variables():
my_lambda = x1 ** 5 + x2 ** 4 + x3 ** 3 + x4 ** 2 + x5
assert is_λ(my_lambda)
assert_value(my_lambda(1, 2, 3, 4, 5), 1 + 2 ** 4 + 3 ** 3 + 4 ** 2 + 5)
def test_multiple_var_usage():
my_lambda = x1 ** 2 + x2 + x1 * 4
assert is_λ(my_lambda)
assert_value(my_lambda(3, 7), 28)
my_lambda += x2 ** 3 # it really is the same `x2` than before
assert_value(my_lambda(-2, 3), 26)
def test_wrong_var_choices():
def test_expr(expr_2args, missing_x):
try:
expr_2args(1, 2)
assert False
except TypeError as exc:
assert "Missing x%d" % missing_x in str(exc)
test_expr(x2 ** 2 + x3, missing_x=1)
test_expr(x3 ** 2 + x1, missing_x=2)
def test_distributivity():
add6 = X + 3 * 2
mul2_add3 = X * 2 + 3
assert is_λ(add6)
assert is_λ(mul2_add3)
assert_value(add6(7), 13)
assert_value(mul2_add3(7), 17)
def test_associativity():
by_syntax = (X + 3) * 2
by_definition = X + 3
by_definition *= 2
assert is_λ(by_syntax)
assert is_λ(by_definition)
assert_value(by_syntax(7), 20)
assert_value(by_definition(7), 20)
def test_augment_abstraction():
add3 = X + 3
add7 = add3 + 4
assert is_λ(add3)
assert is_λ(add7)
assert_value(add3(10), 13)
assert_value(add7(10), 17)
assert_value(add3(20), 23)
assert_value(add7(20), 27)
add3 *= 2
assert_value(add3(30), 66) # damn!
assert_value(add7(30), 37) # that one remains unchanged
def test_composition():
mul3 = X * 3
add7 = X + 7
mul3_add7 = comp(add7, mul3)
assert is_λ(mul3_add7)
assert_value(mul3_add7(2), 13)
add7_mul3 = chaining(add7, mul3)
assert is_λ(add7_mul3)
assert_value(add7_mul3(2), 27)
def test_compose_with_non_abstraction():
# this makes sense
suffixed_length = comp(len, X + "def")
assert is_λ(suffixed_length)
assert_value(suffixed_length("abc"), 6)
# that doesn't make sense
with raises(ValueError):
comp(X + "def", "abc")
def test_compose_different_card():
two_var = x1 * 3 + x2 * 7
one_var = X * 2
composed = comp(one_var, two_var)
assert is_λ(composed)
assert_value(composed(2, 4), (2 * 3 + 4 * 7) * 2)
composed = comp(two_var, one_var)
assert is_λ(composed)
# it makes no sense to have "g ∘ f" where `g` doesn't take exactly one
# parameter (the return value of `f`)
with raises(TypeError):
composed(2, 4)
with raises(TypeError):
composed(3)
# ... except if the return of `f` has the same arity as `g`
assert_value(composed((1,)), two_var(1, 1))
def test_tricky_compose():
times1 = X
times2 = X * 2
times3 = X * 3
multiply_all = λ(map)(X, range(5))
res = list(map(partial(comp, multiply_all), (times1, times2, times3)))
assert_value(res, [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15]
])
def test_mixing_is_not_composing():
# /!\ please don't mix expressions... particularly when it looks like a composition
lambda_a = X
lambda_b = X.__add__
my_lambda = lambda_b(lambda_a)
assert is_λ(my_lambda)
assert_value(my_lambda(3), 6)
# this is possible, but dangerous because of the shared X
lambda_a | |
rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_174(self):
inp = '''0.1'''
fmt = '''(G5.2E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_175(self):
inp = '''-0.1'''
fmt = '''(G5.2E4)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_176(self):
inp = '''0.01'''
fmt = '''(G5.2E4)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_177(self):
inp = '''-0.01'''
fmt = '''(G5.2E4)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_178(self):
inp = '''0.001'''
fmt = '''(G5.2E4)'''
result = [1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_179(self):
inp = '''-0.001'''
fmt = '''(G5.2E4)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_180(self):
inp = '''0.0001'''
fmt = '''(G5.2E4)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_181(self):
inp = '''-0.0001'''
fmt = '''(G5.2E4)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_182(self):
inp = '''-1.96e-16'''
fmt = '''(G5.2E4)'''
result = [-1.9600000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_183(self):
inp = '''3.14159'''
fmt = '''(G5.2E4)'''
result = [3.1410000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_184(self):
inp = '''- 1.0'''
fmt = '''(G5.2E4)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_185(self):
inp = '''1d12'''
fmt = '''(G5.2E4)'''
result = [1.0000000000000000e+10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_186(self):
inp = '''1D12'''
fmt = '''(G5.2E4)'''
result = [1.0000000000000000e+10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_187(self):
inp = '''-1 d12'''
fmt = '''(G5.2E4)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_188(self):
inp = '''.'''
fmt = '''(G5.2E4)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_189(self):
inp = '''.1'''
fmt = '''(G5.2E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_190(self):
inp = '''0.1E+200'''
fmt = '''(G5.2E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_191(self):
inp = '''3.'''
fmt = '''(G10.2E4)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_192(self):
inp = '''-3.'''
fmt = '''(G10.2E4)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_193(self):
inp = '''10.'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_194(self):
inp = '''-10.'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_195(self):
inp = '''100.'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_196(self):
inp = '''-100.'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_197(self):
inp = '''1000.'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_198(self):
inp = '''-1000.'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_199(self):
inp = '''10000.'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e+04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_200(self):
inp = '''-10000.'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e+04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_201(self):
inp = '''100000.'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e+05]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_202(self):
inp = '''-100000.'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e+05]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_203(self):
inp = '''123456789.'''
fmt = '''(G10.2E4)'''
result = [1.2345678900000000e+08]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_204(self):
inp = '''0.1'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_205(self):
inp = '''-0.1'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_206(self):
inp = '''0.01'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_207(self):
inp = '''-0.01'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_208(self):
inp = '''0.001'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_209(self):
inp = '''-0.001'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_210(self):
inp = '''0.0001'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e-04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_211(self):
inp = '''-0.0001'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e-04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_212(self):
inp = '''-1.96e-16'''
fmt = '''(G10.2E4)'''
result = [-1.9600000000000000e-16]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_213(self):
inp = '''3.14159'''
fmt = '''(G10.2E4)'''
result = [3.1415899999999999e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_214(self):
inp = '''- 1.0'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_215(self):
inp = '''1d12'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e+10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_216(self):
inp = '''1D12'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000000e+10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_217(self):
inp = '''-1 d12'''
fmt = '''(G10.2E4)'''
result = [-1.0000000000000000e+10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_218(self):
inp = '''.'''
fmt = '''(G10.2E4)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_219(self):
inp = '''.1'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_220(self):
inp = '''0.1E+200'''
fmt = '''(G10.2E4)'''
result = [1.0000000000000001e+199]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_221(self):
inp = '''3.'''
fmt = '''(G3.3E4)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_222(self):
inp = '''-3.'''
fmt = '''(G3.3E4)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_223(self):
inp = '''10.'''
fmt = '''(G3.3E4)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_224(self):
inp = '''-10.'''
fmt = '''(G3.3E4)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_225(self):
inp = '''100.'''
fmt = '''(G3.3E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_226(self):
inp = '''-100.'''
fmt = '''(G3.3E4)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_227(self):
inp = '''1000.'''
fmt = '''(G3.3E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_228(self):
inp = '''-1000.'''
fmt = '''(G3.3E4)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_229(self):
inp = '''10000.'''
fmt = '''(G3.3E4)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_230(self):
inp = '''-10000.'''
fmt = '''(G3.3E4)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_231(self):
inp = '''100000.'''
fmt = '''(G3.3E4)'''
result = [1.0000000000000001e-01]
eds, | |
#!/usr/bin/python
# -*- coding: utf-8 -*
# log function
"""
:description:
common log related module
"""
from __future__ import print_function
__all__ = [
'debug', 'info', 'warn', 'critical',
'init_log_instance', 'set_log_level',
'ROTATION', 'INFINITE',
're_init_log_instance', 'get_inited_logger_name', 'parse',
'backtrace_info', 'backtrace_debug', 'backtrace_error',
'backtrace_critical'
]
import os
import re
import sys
import logging
import time
import threading
from logging import handlers
import u_base
from u_base import u_exception
from u_base import u_platform
ROTATION = 0
INFINITE = 1
# log rotation count
ROTATION_COUNTS = 30
# log level
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
MIN_LEVEL = logging.DEBUG
# global logging instance name
G_INITED_LOGGER = []
# log function
debug = logging.debug
info = logging.info
warn = logging.warning
error = logging.error
critical = logging.critical
class _Singleton(object):
"""
internal use for logging.
"""
_LOCK = threading.Lock()
def __init__(self, cls):
self.__instance = None
self.__cls = cls
def __call__(self, *args, **kwargs):
self._LOCK.acquire()
if self.__instance is None:
self.__instance = self.__cls(*args, **kwargs)
self._LOCK.release()
return self.__instance
class _MsgFilter(logging.Filter):
"""
Msg filters by log levels, extends from logging.Filter
"""
def __init__(self, msg_level=logging.WARNING):
"""
construct function
:param msg_level: the level of log level
"""
super().__init__()
self.msg_level = msg_level
def filter(self, record):
"""
overload, filter the msg by log level
:param record:
:return: true mean log, false mean don't need log
"""
if record.levelno >= self.msg_level:
return False
else:
return True
def _line(depth=0):
"""
get current code line number
:param depth: the depth of the frame at the top of the call stack.
:return: line number
"""
return sys._getframe(depth + 1).f_lineno
def _file(depth=0):
"""
get current call function name
:param depth: the depth of the frame at the top of the call stack.
:return: call function name
"""
return os.path.basename(sys._getframe(depth + 1).f_code.co_filename)
def _process_thread_id():
"""
get current process id and thread id
:return: a str of {process_id}:{thread_id}
"""
return str(os.getpid()) + ':' + str(threading.current_thread().ident)
def _log_file_func_info(msg, back_trace_len=0):
"""
log file and function info
:param msg: message
:param back_trace_len: back trace length
:return: the message added file function info
"""
temp_msg = ' * [%s] [%s:%s] ' % (
_process_thread_id(), _file(2 + back_trace_len),
_line(2 + back_trace_len)
)
msg = '%s%s' % (temp_msg, msg)
if isinstance(msg, str):
return msg
else:
return msg.decode('utf8')
def set_log_level(level):
"""
change log level during runtime
:param level: log level
:return: none
"""
logger_instance = _LoggerInstance()
logger_instance.getlogger().setLevel(level)
# 日志等级过滤
class MaxLevelFilter(logging.Filter):
"""Filters (lets through) all messages with level < LEVEL"""
def __init__(self, level):
super().__init__()
self.level = level
def filter(self, record):
# "<" instead of "<=": since logger.setLevel is inclusive, this should be exclusive
return record.levelno < self.level
@_Singleton
class _LoggerInstance(object):
"""
logger Instance object, for singleton
"""
_logger_instance = None
_max_size = 0
_log_file = ''
_log_type = ROTATION
_print_console = False
def __init__(self):
pass
def get_logger(self):
"""
get logger instance
:return: logger instance
"""
if self._logger_instance is None:
raise u_exception.LoggerException(
'The logger has not been initialized Yet. '
'Call init_log_instance first'
)
return self._logger_instance
def set_logger(self, logger):
"""
set the logger instance
:param logger: logging instance
:return: none
"""
if self._logger_instance is not None:
raise u_exception.LoggerException(
"""WARNING!!! The logger instance has been initialized already\
.Please do NOT set twice, or call reset_log_instance replace""")
self._logger_instance = logger
def reset_logger(self, logger):
"""
reset logging instance
:param logger: logger instance
:return: none
"""
del self._logger_instance
self._logger_instance = logger
logging.root = logger
def is_initialized(self):
"""
judge the log instance is Initialized or not
:return: True or False
"""
if self._logger_instance is None:
return False
else:
return True
def config_file_logger(self, log_level, log_file, log_type,
max_size, print_console=True, generate_wf_file=False):
"""
config logging instance
:param log_level: the log level
:param log_file: log file path
:param log_type: log type
:param max_size: str max size
:param print_console: Decide whether or not print to console
:param generate_wf_file: Decide whether or not decide warning or fetal log file
:return: none
"""
# if not log file path, create dir
if not os.path.exists(log_file):
try:
os.mknod(log_file)
except IOError:
# create exception
raise u_exception.LoggerException(
'log file does not exist. '
'try to create it. but file creation failed'
)
# config object property
self._log_file = log_file
self._log_type = log_type
self._logger_instance.setLevel(log_level)
self._max_size = max_size
self._print_console = print_console
# '%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s'
# log format
formatter = logging.Formatter(
'%(levelname)s:\t %(asctime)s * '
'[%(process)d:%(thread)x] [%(filename)s:%(lineno)s]\t %(message)s'
)
# print to console
if print_console:
info('print_console enabled, will print to stdout')
# 避免默认basicConfig已经注册了root的StreamHandler,会重复输出日志,先移除掉
for handler in logging.getLogger().handlers:
if handler.name is None and isinstance(handler, logging.StreamHandler):
logging.getLogger().removeHandler(handler)
# DEBUG INFO 输出到 stdout
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
stdout_handler.setLevel(MIN_LEVEL)
stdout_handler.addFilter(MaxLevelFilter(WARNING))
# WARNING 以上输出到 stderr
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(formatter)
stderr_handler.setLevel(max(log_level, WARNING))
self._logger_instance.addHandler(stdout_handler)
self._logger_instance.addHandler(stderr_handler)
# set RotatingFileHandler
rf_handler = None
if log_type == ROTATION:
rf_handler = handlers.RotatingFileHandler(self._log_file, 'a', max_size, ROTATION_COUNTS, encoding='utf-8')
else:
rf_handler = logging.FileHandler(self._log_file, 'a', encoding='utf-8')
rf_handler.setFormatter(formatter)
rf_handler.setLevel(log_level)
# generate warning and fetal log to wf file
if generate_wf_file:
# add warning and fetal handler
file_wf = str(self._log_file) + '.wf'
warn_handler = logging.FileHandler(file_wf, 'a', encoding='utf-8')
warn_handler.setLevel(logging.WARNING)
warn_handler.setFormatter(formatter)
self._logger_instance.addHandler(warn_handler)
rf_handler.addFilter(_MsgFilter(logging.WARNING))
self._logger_instance.addHandler(rf_handler)
def init_log_instance(name, level=logging.INFO, file='u_log', log_type=ROTATION,
max_size=1073741824, print_console=False, generate_wf=False):
"""
Initialize your logging
:param name: Unique logger name
:param level: 4 default levels: log.DEBUG log.INFO log.ERROR log.CRITICAL
:param file: log file. Will try to create it if no existence
:param log_type:
Two type: log.ROTATION and log.INFINITE
log.ROTATION will let logfile switch to a new one (30 files at most).
When logger reaches the 30th logfile, will overwrite from the
oldest to the most recent.
log.INFINITE will write on the logfile infinitely
:param max_size: max log size with byte
:param print_console: print to stdout or not?
:param generate_wf: print log msg with level >= WARNING to file (${logfile}.wf)
:return: none
*E.g.*
::
import logging
from cup import log
log.init_comlog(
'test',
log.DEBUG,
'/home/work/test/test.log',
log.ROTATION,
1024,
False
)
log.info('test xxx')
log.critical('test critical')
"""
logging_instance = _LoggerInstance()
if not logging_instance.is_initialized():
# initialize logging instance
logging_instance.set_logger(logging.getLogger())
# create log file
if os.path.exists(file) is False:
# create file on linux platform
if u_platform.is_linux():
os.mknod(file)
elif u_platform.is_windows():
with open(file, 'w+') as log_file_handle:
log_file_handle.write('---Windows Log File Creation ---\n')
else:
raise u_exception.LoggerException("not support platform\n")
elif os.path.isfile(file) is False:
raise u_exception.LoggerException('The log file exists. But it\'s not regular file\n')
# set log config
logging_instance.config_file_logger(level, file, log_type, max_size, print_console, generate_wf)
info('-' * 20 + 'Log Initialized Successfully' + '-' * 20)
# set global log name
global G_INITED_LOGGER
G_INITED_LOGGER.append(name)
else:
print('[{0}:{1}] init_log_instance has been already initialized'.format(_file(1), _line(1)))
return
def re_init_log_instance(name, level=logging.INFO, file='u_log',
log_type=ROTATION, max_size=1073741824, print_console=False, generate_wf=False):
"""
re initialize logging system, parameters same to initLogInstance.
re_init_log_instance will reset all logging parameters,
Make sure you used a different logger name from the old one!
"""
global G_INITED_LOGGER
if name in G_INITED_LOGGER:
msg = 'logger name:%s has been already initialized!!!' % name
raise ValueError(msg)
logging_instance = _LoggerInstance()
logging_instance.reset_logger(logging.getLogger(name))
# create log file
if os.path.exists(file) is False:
# create file on linux platform
if u_platform.is_linux():
os.mknod(file)
elif u_platform.is_windows():
with open(file, 'w+') as log_file_handle:
log_file_handle.write('---Windows Log File Creation ---\n')
else:
raise u_exception.LoggerException("not support platform\n")
elif os.path.isfile(file) is False:
raise u_exception.LoggerException('The log file exists. But it\'s not regular file\n')
# set log config
G_INITED_LOGGER.append(name)
logging_instance.config_file_logger(level, file, log_type, max_size, print_console, generate_wf)
info('-' * 20 + 'Log Reinitialized Successfully' + '-' * 20)
return
def get_inited_logger_name():
"""
get initialized logger name
"""
global G_INITED_LOGGER
return G_INITED_LOGGER
def _fail_handle(msg, e):
if not isinstance(msg, str):
msg = msg.decode('utf8')
print('{0}\nerror:{1}'.format(msg, e))
def backtrace_info(msg, back_trace_len=0):
"""
info with backtrace support
"""
try:
msg = _log_file_func_info(msg, back_trace_len)
logging_instance = _LoggerInstance()
logging_instance.get_logger().info(msg)
except u_exception.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def backtrace_debug(msg, back_trace_len=0):
"""
debug with backtrace support
"""
try:
msg = _log_file_func_info(msg, back_trace_len)
logging_instance = _LoggerInstance()
logging_instance.get_logger().debug(msg)
except u_exception.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def backtrace_warn(msg, back_trace_len=0):
"""
warning msg with backtrace support
"""
try:
msg = _log_file_func_info(msg, back_trace_len)
logging_instance = _LoggerInstance()
logging_instance.get_logger().warn(msg)
except u_exception.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def backtrace_error(msg, back_trace_len=0):
"""
error msg with backtarce support
"""
try:
msg = _log_file_func_info(msg, back_trace_len)
logging_instance = _LoggerInstance()
logging_instance.get_logger().error(msg)
except u_exception.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def backtrace_critical(msg, back_trace_len=0):
"""
logging.CRITICAL with backtrace support
"""
try:
msg = _log_file_func_info(msg, back_trace_len)
logging_instance = _LoggerInstance()
logging_instance.get_logger().critical(msg)
except u_exception.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def parse(log_line):
"""
return a dict if the line is valid.
Otherwise, return None
::
dict_info:= {
'loglevel': 'DEBUG',
| |
import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from learning.utils_learn import *
from learning.dataloader import SegList, SegListMS, get_loader, get_info
import logging
from learning.validate import validate
import data_transforms as transforms
from dataloaders.utils import decode_segmap
from torch.utils.tensorboard import SummaryWriter
import logging
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def mtask_forone_grad(val_loader, model, criterion, task_name, args, test_vis=False):
grad_sum = 0
cnt = 0
model.eval()
score = AverageMeter()
print('task to be calculated gradients', task_name)
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
input = input.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# input.requires_grad_()
input_var = torch.autograd.Variable(input, requires_grad=True)
# input.retain_grad()
output = model(input_var)
first_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
if first_loss is None:
first_loss = c_name
# print('l output target', output)
# print('ratget', target)
loss_dict[c_name] = criterion_fun(output, target)
# print('caname', c_name, loss_dict[c_name])
else:
loss_dict[c_name] = criterion_fun(output[c_name], target[c_name])
grad_total_loss = None
for each in task_name:
if grad_total_loss is None:
grad_total_loss = loss_dict[each]
else:
grad_total_loss = grad_total_loss + loss_dict[each]
# grad_total_loss = loss_dict['segmentsemantic'] + loss_dict['depth_zbuffer']
grad_total_loss.backward()
# print('deug val in grad in bugger grad', input_var.grad) # Interesting, here we also able to get the grad
if test_vis:
from learning.utils_learn import accuracy
score.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
# TODO: shit, the following code could not calculate the grad even if I specify. For unknown reason. drive me high fever
#
# first = True
# for c_name, criterion_fun in criterion.items():
# # print('if in',c_name, task_name)
#
# if c_name in task_name:
# print('get one')
# # loss_calculate = criterion[c_name](output[c_name], target[c_name])
# loss_calculate = criterion_fun(output[c_name], target[c_name])
#
#
# # loss_fn = lambda x, y: torch.nn.functional.cross_entropy(x.float(), y.long().squeeze(dim=1), ignore_index=0,
# # reduction='mean')
# # loss_calculate = loss_fn(output[c_name], target[c_name].float())
#
#
# # o2 = criterion[c_name](output[c_name], target[c_name])
# # import pdb; pdb.set_trace()
# # loss_calculate = torch.mean(output[c_name] - target[c_name].float())
# if first:
# total_loss = loss_calculate
# first = False
#
# else:
# total_loss = total_loss + loss_calculate #TODO: vikram told me cannot be += here, because grad will override
#
#
# input.retain_grad()
# total_loss.backward()
#
# import pdb; pdb.set_trace()
# print(input_var)
# print(input_var.grad)
data_grad = input_var.grad
# print('data grad', data_grad)
np_data_grad = data_grad.cpu().numpy()
L2_grad_norm = np.linalg.norm(np_data_grad)
grad_sum += L2_grad_norm
# increment the batch # counter
cnt += 1
if args.debug:
if cnt>200:
break
if test_vis:
print('Clean Acc for Seg: {}'.format(score.avg))
print('Vulnerability in Grad Norm')
print("average grad for task {} :".format(task_name), grad_sum * 1.0 /cnt)
return grad_sum * 1.0 /cnt
from learning.attack import PGD_attack_mtask, PGD_attack_mtask_L2, PGD_attack_mtask_city
from learning.utils_learn import accuracy
def mtask_forone_advacc(val_loader, model, criterion, task_name, args, info, epoch=0, writer=None,
comet=None, test_flag=False, test_vis=False, norm='Linf'):
"""
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
model.eval() # this is super important for correct including the batchnorm
print("using norm type", norm)
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
if args.steps == 0 or args.step_size == 0:
args.epsilon = 0
if norm == 'Linf':
if args.dataset == 'taskonomy':
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps, args.dataset,
args.step_size, info, args, using_noise=True)
elif args.dataset == 'cityscape':
adv_img = PGD_attack_mtask_city(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
elif norm == 'l2':
adv_img = PGD_attack_mtask_L2(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# image_var = input
if torch.cuda.is_available():
image_var = image_var.cuda()
for keys, m in mask.items():
mask[keys] = m.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
with torch.no_grad():
output = model(image_var)
sum_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
this_loss = criterion_fun(output[c_name].float(), target[c_name],
mask[c_name])
if sum_loss is None:
sum_loss = this_loss
else:
sum_loss = sum_loss + this_loss
loss_dict[c_name] = this_loss
avg_losses[c_name].update(loss_dict[c_name].data.item(), input.size(0))
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if i % 500 == 0:
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
# print(target['segmentsemantic'].shape)
decoded_target = decode_segmap(
target['segmentsemantic'][0][0].cpu().data.numpy() if torch.cuda.is_available() else
target['segmentsemantic'][0][0].data.numpy(),
args.dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().data.numpy() if torch.cuda.is_available() else class_prediction[
0].data.numpy(), args.dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
if not test_flag:
writer.add_image('Val/image clean ', back_transform(input, info)[0])
writer.add_image('Val/image adv ', back_transform(adv_img, info)[0])
writer.add_image('Val/image gt for adv ', decoded_target)
writer.add_image('Val/image adv prediction ', decoded_class_prediction)
# if comet is not None: comet.log_image(back_transform(input, info)[0].cpu(), name='Val/image clean ', image_channels='first')
# if comet is not None: comet.log_image(back_transform(adv_img, info)[0].cpu(), name='Val/image adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_target, name='Val/image gt for adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_class_prediction, name='Val/image adv prediction ', image_channels='first')
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if args.debug:
if i>1:
break
if test_vis:
print("clean seg accuracy: {}".format(seg_clean_accuracy.avg))
str_attack_result = ''
str_not_attacked_task_result = ''
for keys, loss_term in criterion.items():
if keys in task_name:
str_attack_result += 'Attacked Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
else:
str_not_attacked_task_result += 'Not att Task Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
# Tensorboard logger
if not test_flag:
for keys, _ in criterion.items():
if keys in task_name:
writer.add_scalar('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg, epoch)
if comet is not None: comet.log_metric('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
else:
writer.add_scalar('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if comet is not None: comet.log_metric('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if 'segmentsemantic' in criterion.keys() or 'segmentsemantic' in criterion.keys():
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
mIoU = round(np.nanmean(ious), 2)
str_attack_result += '\n Segment Score ({score.avg:.3f}) \t'.format(score=seg_accuracy)
str_attack_result += ' Segment ===> mAP {}\n'.format(mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked IOU', mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked Score', seg_accuracy)
print('clean task')
print(str_not_attacked_task_result)
if test_flag:
dict_losses = {}
for key, loss_term in criterion.items():
dict_losses[key] = avg_losses[key].avg
# print(str_attack_result, "\nnew", avg_losses[keys].avg, "\n")
if 'segmentsemantic' in criterion.keys():
dict_losses['segmentsemantic'] = {'iou' : mIoU,
'loss' : avg_losses['segmentsemantic'].avg,
'seg_acc': seg_accuracy.avg}
print("These losses are returned", dict_losses)
#Compute the dictionary of losses that we want. Desired: {'segmentsemantic:[mIoU, cel],'keypoints2d':acc,'}
return dict_losses
def mtask_test_all(val_loader, model, criterion, task_name, all_task_name_list, args, info, writer=None, epoch=0,
test_flag=False, test_vis=False):
"""
task name: is not sorted here, so can be rigorously define the sequence of tasks
all_task_name_list: make the task under attack first.
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
num_of_tasks = len(all_task_name_list)
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
matrix_cos_all = np.zeros((num_of_tasks, num_of_tasks))
matrix_cos = np.zeros((num_of_tasks, num_of_tasks))
grad_norm_list_all = np.zeros((num_of_tasks))
grad_norm_list = np.zeros((num_of_tasks))
grad_norm_joint_all = 0
model.eval() # this is super important for correct including the batchnorm
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(
accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
grad_list = []
if torch.cuda.is_available():
for keys, tar in mask.items():
mask[keys] = tar.cuda()
input = input.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
total_grad = None
for jj, each in enumerate(all_task_name_list):
input_var = torch.autograd.Variable(input, requires_grad=True)
output = model(input_var)
# total_loss = criterion['Loss'](output, target)
loss_task = criterion[each](output[each], target[each], mask[each])
loss_task.backward()
grad = input_var.grad.cpu().numpy()
grad_norm_list[jj] = np.linalg.norm(grad)
grad_normalized | |
"""Contains the grid scenario class."""
from flow.scenarios.base_scenario import Scenario
from flow.core.params import InitialConfig
from flow.core.params import TrafficLightParams
from collections import defaultdict
ADDITIONAL_NET_PARAMS = {
# dictionary of grid array data
"grid_array": {
# number of horizontal rows of edges
"row_num": 3,
# number of vertical columns of edges
"col_num": 2,
# length of inner edges in the grid network
"inner_length": None,
# length of edges that vehicles start on
"short_length": None,
# length of final edge in route
"long_length": None,
# number of cars starting at the edges heading to the top
"cars_top": 20,
# number of cars starting at the edges heading to the bottom
"cars_bot": 20,
# number of cars starting at the edges heading to the left
"cars_left": 20,
# number of cars starting at the edges heading to the right
"cars_right": 20,
},
# number of lanes in the horizontal edges
"horizontal_lanes": 1,
# number of lanes in the vertical edges
"vertical_lanes": 1,
# speed limit for all edges, may be represented as a float value, or a
# dictionary with separate values for vertical and horizontal lanes
"speed_limit": {
"vertical": 35,
"horizontal": 35
},
}
class SimpleGridScenario(Scenario):
"""Grid scenario class.
The grid scenario consists of m vertical lanes and n horizontal lanes,
with a total of nxm intersections where the vertical and horizontal
edges meet.
Requires from net_params:
* **grid_array** : dictionary of grid array data, with the following keys
* **row_num** : number of horizontal rows of edges
* **col_num** : number of vertical columns of edges
* **inner_length** : length of inner edges in the grid network
* **short_length** : length of edges that vehicles start on
* **long_length** : length of final edge in route
* **cars_top** : number of cars starting at the edges heading to the top
* **cars_bot** : number of cars starting at the edges heading to the
bottom
* **cars_left** : number of cars starting at the edges heading to the
left
* **cars_right** : number of cars starting at the edges heading to the
right
* **horizontal_lanes** : number of lanes in the horizontal edges
* **vertical_lanes** : number of lanes in the vertical edges
* **speed_limit** : speed limit for all edges. This may be represented as a
float value, or a dictionary with separate values for vertical and
horizontal lanes.
In order for right-of-way dynamics to take place at the intersections,
set *no_internal_links* in net_params to False.
See flow/scenarios/base_scenario.py for description of params.
"""
def __init__(self,
name,
vehicles,
net_params,
initial_config=InitialConfig(),
traffic_lights=TrafficLightParams()):
"""Initialize an nxm grid scenario."""
optional = ["tl_logic"]
for p in ADDITIONAL_NET_PARAMS.keys():
if p not in net_params.additional_params and p not in optional:
raise KeyError('Network parameter "{}" not supplied'.format(p))
for p in ADDITIONAL_NET_PARAMS["grid_array"].keys():
if p not in net_params.additional_params["grid_array"]:
raise KeyError(
'Grid array parameter "{}" not supplied'.format(p))
# this is a (mx1)x(nx1)x2 array
# the third dimension is vertical length, horizontal length
self.grid_array = net_params.additional_params["grid_array"]
vertical_lanes = net_params.additional_params["vertical_lanes"]
horizontal_lanes = net_params.additional_params["horizontal_lanes"]
self.horizontal_junction_len = 2.9 + 3.3 * vertical_lanes
self.vertical_junction_len = 2.9 + 3.3 * horizontal_lanes
self.row_num = self.grid_array["row_num"]
self.col_num = self.grid_array["col_num"]
self.num_edges = (self.col_num+1) * self.row_num * 2 \
+ (self.row_num+1) * self.col_num * 2 + self.row_num * self.col_num
self.inner_length = self.grid_array["inner_length"]
self.short_length = self.grid_array["short_length"]
self.long_length = self.grid_array["long_length"]
# this is a dictionary containing inner length, long outer length,
# short outer length, and number of rows and columns
self.grid_array = net_params.additional_params["grid_array"]
self.node_mapping = defaultdict(list)
self.name = "BobLoblawsLawBlog" # DO NOT CHANGE
super().__init__(name, vehicles, net_params, initial_config,
traffic_lights)
def specify_nodes(self, net_params):
"""See parent class."""
nodes = []
nodes += self._build_inner_nodes()
nodes += self._build_outer_nodes()
return nodes
def specify_tll(self, net_params):
"""See parent class."""
return self._build_inner_nodes()
def specify_edges(self, net_params):
"""See parent class."""
edges = []
edges += self._build_inner_edges()
edges += self._build_outer_edges()
# Sort node_mapping in counterclockwise order
self._order_nodes()
return edges
def specify_routes(self, net_params):
"""See parent class."""
rts = {}
row_num = self.grid_array["row_num"]
col_num = self.grid_array["col_num"]
for i in range(row_num):
route_arr_bot = []
route_arr_top = []
for j in range(col_num + 1):
route_arr_bot += ["bot" + str(i) + '_' + str(j)]
route_arr_top += ["top" + str(i) + '_' + str(col_num - j)]
rts.update({"bot" + str(i) + '_' + '0': route_arr_bot})
rts.update({"top" + str(i) + '_' + str(col_num): route_arr_top})
for i in range(col_num):
route_arr_left = []
route_arr_right = []
for j in range(row_num + 1):
route_arr_right += ["right" + str(j) + '_' + str(i)]
route_arr_left += ["left" + str(row_num - j) + '_' + str(i)]
rts.update({"left" + str(row_num) + '_' + str(i): route_arr_left})
rts.update({"right" + '0' + '_' + str(i): route_arr_right})
return rts
def specify_types(self, net_params):
"""See parent class."""
add_params = net_params.additional_params
horizontal_lanes = add_params["horizontal_lanes"]
vertical_lanes = add_params["vertical_lanes"]
if isinstance(add_params["speed_limit"], int) or \
isinstance(add_params["speed_limit"], float):
speed_limit = {
"horizontal": add_params["speed_limit"],
"vertical": add_params["speed_limit"]
}
else:
speed_limit = add_params["speed_limit"]
types = [{
"id": "horizontal",
"numLanes": horizontal_lanes,
"speed": speed_limit["horizontal"]
}, {
"id": "vertical",
"numLanes": vertical_lanes,
"speed": speed_limit["vertical"]
}]
return types
# ===============================
# ============ UTILS ============
# ===============================
def _build_inner_nodes(self):
"""Build out the inner nodes of the system.
The nodes are numbered from bottom left and increasing first across the
columns and then across the rows. For example, in a 3x3 grid, we will
have four inner nodes with the bottom left being 0, the bottom right
being 1, the top left being 2, the top right being 3. The coordinate of
the bottom left inner node is (0, 0).
Yields
------
list <dict>
List of inner nodes
"""
lanes = max(self.net_params.additional_params["horizontal_lanes"],
self.net_params.additional_params["vertical_lanes"])
tls = self.net_params.additional_params.get("traffic_lights", True)
node_type = "traffic_light" if tls else "priority"
row_num = self.grid_array["row_num"]
col_num = self.grid_array["col_num"]
inner_length = self.grid_array["inner_length"]
nodes = []
# sweep up across columns
for i in range(row_num):
# sweep across rows
for j in range(col_num):
index = i * col_num + j
x_center = j * inner_length
y_center = i * inner_length
nodes.append({
"id": "center" + str(index),
"x": x_center,
"y": y_center,
"type": node_type,
"radius": (2.9 + 3.3 * lanes)/2,
})
return nodes
def _build_outer_nodes(self):
"""Build out the column nodes.
There are two in each column below the bottom row, and two in each
column above the top row. They are numbered with regards to the column
they are in. The bottom are labeled "bot_col_short" and "bot_col_long".
Top are named similarly. We then repeat the same process for the outer
row nodes
Yields
------
list <dict>
List of column, row nodes
"""
col_num = self.grid_array["col_num"]
row_num = self.grid_array["row_num"]
inner_length = self.grid_array["inner_length"]
short_length = self.grid_array["short_length"]
long_length = self.grid_array["long_length"]
nodes = []
for i in range(col_num):
# build the bottom nodes
nodes += [{
"id": "bot_col_short" + str(i),
"x": i * inner_length,
"y": -short_length,
"type": "priority"
}, {
"id": "bot_col_long" + str(i),
"x": i * inner_length,
"y": -long_length,
"type": "priority"
}]
# build the top nodes
nodes += [{
"id": "top_col_short" + str(i),
"x": i * inner_length,
"y": (row_num - 1) * inner_length + short_length,
"type": "priority"
}, {
"id": "top_col_long" + str(i),
"x": i * inner_length,
"y": (row_num - 1) * inner_length + long_length,
"type": "priority"
}]
for i in range(row_num):
# build the left nodes
nodes += [{
"id": "left_row_short" + str(i),
"x": -short_length,
"y": i * inner_length,
"type": "priority"
}, {
"id": "left_row_long" + str(i),
"x": -long_length,
"y": i * inner_length,
"type": "priority"
}]
# build the right nodes
nodes += [{
"id": "right_row_short" + str(i),
"x": (col_num - 1) * inner_length + short_length,
"y": i * inner_length,
"type": "priority"
}, {
"id": "right_row_long" + str(i),
"x": (col_num - 1) * inner_length + long_length,
"y": i * inner_length,
"type": "priority"
}]
return nodes
def _build_inner_edges(self):
"""Build the inner edges.
First we build all of the column edges. For the upper edge, it would be
called right_i_j or left_i_j where i is the row number and j is the
column to the right of it.
For the vertical edges the notation would be bot_i_j or top_i_j where
i is the row above it, and | |
#This file contains the code to build a Deep Factorization Machine model
#
import tensorflow as tf
import numpy as np
from keras.layers import Embedding, Reshape, Activation, Lambda, Input, Dropout, Dense,SpatialDropout1D
from keras.regularizers import l2
from keras.initializers import RandomNormal
from keras.models import Model
from keras.utils.generic_utils import Progbar
from keras.layers.merge import Add,Multiply,Concatenate,Dot
from keras.constraints import non_neg
from .scaler import Scaler
import keras.backend as K
import itertools
def flatten(list_of_lists):
flattened = []
for sublist in list_of_lists:
flattened.append("_".join(sublist))
return flattened
def NCEobj(probs):
'''
transforms the objective so that we can evaluate the model via noise contrastive estimation
noise_probs = k*q(w)
model_probs = p_theta (w|c)
NOISE_PROBS_MULTIPLIER is a constant used to scale noise_probabilities so they are "close" to 1.
so the model doesn't get stuck with astronomically low or high noise probs
'''
NOISE_PROB_MULTIPLIER = 1.
import tensorflow as tf
#model_probs,noise_probs,noise_multiplier = probs
model_probs,noise_probs = probs
#print model_probs,noise_probs
return tf.div( tf.exp(model_probs),tf.add(tf.exp(model_probs),noise_probs*NOISE_PROB_MULTIPLIER) )
def nce_output_shape(input_shape):
return (1)
class DeepFM():
def __init__(self, model_features, feature_dimensions, feature_names=None,
realval=None, obj='ns', mask_zero=False,
deepin_feature=None, deepin_inputs=[], deepin_layers =[]):
"""
Initializes a Deep Factorization Machine Model
:param model_features: a list of lists of columns for each feature / embedding in the model
:param feature_dimensions: A list where each entry represents the number of possible values
a discrete feature has if it is a categorical. Otherwise, if the feature is real-valued, it
indicates the dimensionality of the feature (how many cols)
(just a number)
If zero, only biases will be used, and there will be no embeddings.
If you only have one feature, this MUST be zero (because there are no interactions)
:param feature_names: the names of the features to be used in the training sample
:param realval: whether or not features are real-valued:
(either True or False which indicates all are Real-valued or categories/indices, respectively,
or a list as long as feature_dimensions of Booleans indicating status of each feature.
only relevant for features that are not deep-in
:param obj: the objective function used for evaluating the model (ns=negative sampling, nce=noise contrastive estimation)
:param deepin_feature: a bool list of length features that specifies whether the
feature requires a deep feature extraction. if a
:param deepin_layers: a list of keras layers, corresponding to each "deep" feature. this will be directly input into the FM
as if it were an factor. It is assumed that deepin_layers output a real-valued matrix of dimension = feature_dimensions
specified in the feature_dimensions list.
:param deepin_inputs: a list of keras layers, corresponding to each raw input feature for the feature extraction.
this will be directly input into the keras model as an Input tensor.
:param mask_zero: a toggle to mask ALL zero values for categoricals as zero vectors
"""
if realval is None:
self.realval = [False]*len(feature_dimensions) #default to all categoricals
else:
self.realval = realval
assert (type(self.realval)==list) and len(self.realval) == len(feature_dimensions), "realval must either be a boolean list with length = #features, or None"
self.model_features = model_features
self.feature_dimensions = feature_dimensions
if feature_names is None:
self.feature_names = flatten(model_features)
else:
self.feature_names = feature_names
assert len(self.feature_names) == len(self.feature_dimensions), "lengths do not match"
assert obj=='ns' or obj=='nce',"obj. function must be negative sampling (ns) or noise contrastive estimation (nce)"
self.obj = obj
self.mask_zero=mask_zero
#####
#Deep-in feature indicators
if deepin_feature == None:
self.deepin_feature = [False]*len(feature_dimensions) #default to all categoricals
else:
assert len(deepin_feature) == len(feature_dimensions), "must provide boolean list w/ length=#features"
self.deepin_feature = deepin_feature
#construct list of deep-in inputs
self.deepin_inputs = [deepin_inputs.pop(0) if self.deepin_feature[i] else None for i in range(len(feature_dimensions))]
assert(len(deepin_inputs) == 0), "provide deep input list of length = #deep features or # features"
#construct list of extracted deep-in features
self.deepin_layers = [deepin_layers.pop(0) if self.deepin_feature[i] else None for i in range(len(feature_dimensions)) ]
assert (len(deepin_layers) == 0), "provide deep feature layer list of length = #deep features or # features"
def check_build_params(self,l2_bias, l2_factors, l2_deep,bias_only,embeddings_only,deep_weight_groups,
deep_out_bias, deep_out_activation,
dropout_input,
dropout_layer):
'''
confirm that all passed params are of correct format and attach to model object
'''
assert type(l2_bias)==float and type(l2_factors)==float and type(l2_deep)==float, \
"L2 regularization terms must all be floats"
assert l2_bias >= 0. and l2_bias >= 0. and l2_bias >= 0., \
"L2 regularization terms must be non-negative"
self.l2_bias = l2_bias
self.l2_factors = l2_factors
self.l2_deep = l2_deep
if bias_only is None:
self.bias_only = [False]*len(self.feature_dimensions)
else:
assert type(bias_only)==list and len(bias_only) == len(self.feature_dimensions), \
"bias_only must be a boolean list with length = #features, or None"
self.bias_only = bias_only
if embeddings_only is None:
self.embeddings_only = [True]*len(self.feature_dimensions)
else:
assert type(embeddings_only)==list and len(embeddings_only) == len(self.feature_dimensions),\
"embeddings_only must either be a boolean list with length = #features, or None"
self.embeddings_only = embeddings_only
if deep_weight_groups is None:
self.deep_weight_groups= [ itertools.combinations(self.feature_names, 2) ]
else:
for g in deep_weight_groups:
if not isinstance(g, list):
raise RuntimeError("deep_weight_groups must be a list of lists. Where each element is a tuple for an interaction")
self.deep_weight_groups=deep_weight_groups
assert type(deep_out_bias) == bool,'deepout_bias must be a boolean'
self.deep_out_bias = deep_out_bias
self.deep_out_activation = deep_out_activation
assert dropout_layer>=0. and dropout_layer <=1. and dropout_input >=0. and dropout_input <=1, \
"Dropout args should be in [0,1]"
self.dropout_input = dropout_input
self.dropout_layer = dropout_layer
def build_discrete_feature_layers(self,feature_index):
'''
create keras bias and embedding layers (where relevant depending on bias_only, embeddings_only)
for a discrete categorical feature, where each integer represents a new category
args:
feature_index: the position of the feature in question in our list of features
'''
feature_dim = self.feature_dimensions[feature_index]
feature_cols = len(self.model_features[feature_index])
feature = Input(batch_shape=(None, feature_cols), name=self.feature_names[feature_index])
if (self.embedding_dimensions > 0) and (not self.bias_only[feature_index]):
ftemp = Embedding(input_dim=feature_dim,
output_dim=self.embedding_dimensions,
embeddings_regularizer=l2(self.l2_factors),
input_length=feature_cols,
embeddings_initializer='normal',
mask_zero = self.mask_zero,
name="embedding_{}".format(self.feature_names[feature_index]))(feature)
if self.dropout_input > 0:
ftemp_filtered = SpatialDropout1D(self.dropout_input,
name='dropout_embedding_{}'.format(self.feature_names[feature_index]))(ftemp)
else:
ftemp_filtered = ftemp
if feature_cols > 1:
ftemp_filtered = Lambda(lambda x: K.sum(x, axis=1, keepdims=True),
#output_shape=(self.embedding_dimensions,),
name="avg_embedding_{}".format(self.feature_names[feature_index]))(ftemp_filtered)
if self.mask_zero:
ftemp_filtered = Lambda(lambda x: x,
#output_shape=(self.embedding_dimensions,),
name='unmasker_{}'.format(self.feature_names[feature_index]))(ftemp_filtered)
factor = ftemp_filtered
#factor = Reshape((self.embedding_dimensions,),
# name="embedding_{}_reshaped".format(self.feature_names[feature_index]))(ftemp_filtered)
else:
factor=None
#bias term for categ. feature
if not self.embeddings_only[feature_index]:
btemp = Embedding(input_dim=feature_dim,
output_dim=1,
input_length=feature_cols,
mask_zero = self.mask_zero,
embeddings_regularizer=l2(self.l2_bias),
embeddings_initializer='normal',
name="bias_{}".format(self.feature_names[feature_index]))(feature)
if self.dropout_input > 0:
btemp_filtered = SpatialDropout1D(self.dropout_input,
name='dropout_biased_{}'.format(self.feature_names[feature_index]))(btemp)
else:
btemp_filtered = btemp
if feature_cols > 1:
btemp_filtered = Lambda(lambda x: K.sum(x, axis=1, keepdims=True), name="avg_bias_{}".format(self.feature_names[feature_index]))(btemp_filtered)
if self.mask_zero ==True:
btemp_filtered = Lambda(lambda x: x, name='unmasker_bias_{}'.format(self.feature_names[feature_index]))(btemp_filtered)
bias = Reshape((1,),
name="bias_{}_reshaped".format(self.feature_names[feature_index]))(btemp_filtered)
else:
bias=None
return feature,factor,bias
def build_realval_feature_layers(self,feature_index):
'''
create keras bias and embedding layers (where relevant depending on bias_only, embeddings_only)
realvalued variable. so each column in this feature is passed is interpreted as a number and passed through a linear fully connected layer
'''
feature_dim = self.feature_dimensions[feature_index]
feature = Input(batch_shape=(None, feature_dim), name=self.feature_names[feature_index])
if self.dropout_input > 0:
feature_filtered = Dropout(self.dropout_input,name='dropout_{}'.format(self.feature_names[feature_index]))(feature)
else:
feature_filtered = feature
if (self.embedding_dimensions > 0) and (not self.bias_only[feature_index]):
factor = Dense(units=self.embedding_dimensions,
use_bias = False,
kernel_regularizer=l2(self.l2_factors),
kernel_initializer='normal',
name="embedding_{}".format(self.feature_names[feature_index]))(feature_filtered)
else:
factor=None
if not self.embeddings_only[feature_index]:
bias = Dense(units=1,
use_bias=False,
kernel_regularizer=l2(self.l2_bias),
kernel_initializer='normal',
name="bias_{}".format(self.feature_names[feature_index]))(feature_filtered)
else:
bias=None
return feature, factor, bias
def build_variables(self, i, inputs, biases, factors, flattened_factors):
if inputs[i] is None: # The input hasn't been created, so we must do so:
#create bias/embeddings for each feature
if self.deepin_feature[i]:
feature, factor, bias = None, None, None
feature = self.deepin_inputs[i]
factor = self.deepin_layers[i]
if not self.embeddings_only[i]:
bias = Dense(units=1,
use_bias=False,
kernel_initializer='normal',
name="bias_{}".format(self.feature_names[i]))(factor)
elif self.realval[i]:
feature,factor,bias = self.build_realval_feature_layers(i)
else:
feature, factor, bias = self.build_discrete_feature_layers(i)
# Save layers:
inputs[i] = feature
biases[i] = bias
else: # We've created the input, so no need to do anything:
factor = factors[i]
# Make sure factor is (batch_size, num_words, embeddings) and not simply (batch_size, embeddings):
if factor is not None:
if (len(factor.shape) < 3):
if factors[i] is None:
factors[i] = Reshape((1, self.embedding_dimensions),
name="embedding_{}_reshaped".format(self.feature_names[i]))(factor)
flattened_factors[i] = factor
else:
factors[i] = factor
if flattened_factors[i] is None:
flattened_factors[i] = Reshape((self.embedding_dimensions,),
name="embedding_{}_flattened".format(self.feature_names[i]))(factor)
return flattened_factors[i]
def two_way_interactions(self, collapsed_type, deep_out, deep_kernel_constraint):
# Calculate interactions with a dot product
inputs = [None] * len(self.feature_names)
biases = [None] * len(self.feature_names)
factors = [None] * len(self.feature_names)
flattened_factors = [None] * len(self.feature_names)
interactions = []
for i, groups in enumerate(self.deep_weight_groups):
dot_products = []
for grp, (feature_i, feature_j) in enumerate(groups):
#factor_i = factors[self.feature_names.index(feature_i)]
index_i = self.feature_names.index(feature_i)
factor_i = self.build_variables(index_i, inputs, biases, factors, flattened_factors) # Does not create the variable if it already exists
if isinstance(feature_j, str) or isinstance(feature_j, unicode):
#factor_j = factors[ self.feature_names.index(feature_j) ]
index_j = | |
function
#: This allows the memoization to be the same
#: whether the function was called with
#: 1, b=2 is equivilant to a=1, b=2, etc.
new_args = []
arg_num = 0
# If the function uses VAR_KEYWORD type of parameters,
# we need to pass these further
kw_keys_remaining = list(kwargs.keys())
arg_names = get_arg_names(f)
args_len = len(arg_names)
for i in range(args_len):
arg_default = get_arg_default(f, i)
if i == 0 and arg_names[i] in ("self", "cls"):
#: use the id func of the class instance
#: this supports instance methods for
#: the memoized functions, giving more
#: flexibility to developers
arg = get_id(args[0])
arg_num += 1
elif arg_names[i] in kwargs:
arg = kwargs[arg_names[i]]
kw_keys_remaining.pop(kw_keys_remaining.index(arg_names[i]))
elif arg_num < len(args):
arg = args[arg_num]
arg_num += 1
elif arg_default:
arg = arg_default
arg_num += 1
else:
arg = None
arg_num += 1
#: Attempt to convert all arguments to a
#: hash/id or a representation?
#: Not sure if this is necessary, since
#: using objects as keys gets tricky quickly.
# if hasattr(arg, '__class__'):
# try:
# arg = hash(arg)
# except:
# arg = get_id(arg)
#: Or what about a special __cacherepr__ function
#: on an object, this allows objects to act normal
#: upon inspection, yet they can define a representation
#: that can be used to make the object unique in the
#: cache key. Given that a case comes across that
#: an object "must" be used as a cache key
# if hasattr(arg, '__cacherepr__'):
# arg = arg.__cacherepr__
new_args.append(arg)
new_args.extend(args[len(arg_names):])
return (
tuple(new_args),
OrderedDict(
sorted(
(k, v) for k, v in kwargs.items() if k in kw_keys_remaining
)
),
)
def _bypass_cache(self, unless, f, *args, **kwargs):
"""Determines whether or not to bypass the cache by calling unless().
Supports both unless() that takes in arguments and unless()
that doesn't.
"""
bypass_cache = False
if callable(unless):
argspec = inspect.getfullargspec(unless)
has_args = len(argspec.args) > 0 or argspec.varargs or argspec.varkw
# If unless() takes args, pass them in.
if has_args:
if unless(f, *args, **kwargs) is True:
bypass_cache = True
elif unless() is True:
bypass_cache = True
return bypass_cache
def memoize(
self,
timeout=None,
make_name=None,
unless=None,
forced_update=None,
response_filter=None,
hash_method=hashlib.md5,
cache_none=False,
):
"""Use this to cache the result of a function, taking its arguments
into account in the cache key.
Information on
`Memoization <http://en.wikipedia.org/wiki/Memoization>`_.
Example::
@cache.memoize(timeout=50)
def big_foo(a, b):
return a + b + random.randrange(0, 1000)
::
>>> big_foo(5, 2)
753
>>> big_foo(5, 3)
234
>>> big_foo(5, 2)
753
The returned decorated function now has three function attributes
assigned to it.
**uncached**
The original undecorated function. readable only
**cache_timeout**
The cache timeout value for this function.
For a custom value to take affect, this must be
set before the function is called.
readable and writable
**make_cache_key**
A function used in generating the cache_key used.
readable and writable
:param timeout: Default None. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.
:param make_name: Default None. If set this is a function that accepts
a single argument, the function name, and returns a
new string to be used as the function name.
If not set then the function name is used.
:param unless: Default None. Cache will *always* execute the caching
facilities unless this callable is true.
This will bypass the caching entirely.
:param forced_update: Default None. If this callable is true,
cache value will be updated regardless cache
is expired or not. Useful for background
renewal of cached functions.
:param response_filter: Default None. If not None, the callable is
invoked after the cached funtion evaluation,
and is given one arguement, the response
content. If the callable returns False, the
content will not be cached. Useful to prevent
caching of code 500 responses.
:param hash_method: Default hashlib.md5. The hash method used to
generate the keys for cached results.
:param cache_none: Default False. If set to True, add a key exists
check when cache.get returns None. This will likely
lead to wrongly returned None values in concurrent
situations and is not recommended to use.
"""
if not timeout:
timeout = self.cache_options["default_timeout"]
def memoize(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
#: bypass cache
if self._bypass_cache(unless, f, *args, **kwargs):
return f(*args, **kwargs)
try:
cache_key = decorated_function.make_cache_key(
f, *args, **kwargs
)
if (
callable(forced_update)
and (
forced_update(*args, **kwargs)
if wants_args(forced_update)
else forced_update()
)
is True
):
rv = None
found = False
else:
rv = self.cache.get(cache_key)
found = True
# If the value returned by cache.get() is None, it
# might be because the key is not found in the cache
# or because the cached value is actually None
if rv is None:
# If we're sure we don't need to cache None values
# (cache_none=False), don't bother checking for
# key existence, as it can lead to false positives
# if a concurrent call already cached the
# key between steps. This would cause us to
# return None when we shouldn't
if not cache_none:
found = False
else:
found = self.cache.has(cache_key)
except Exception:
if self.config['CACHE_DEBUG']:
raise
logger.exception("Exception possibly due to cache backend.")
return f(*args, **kwargs)
if not found:
rv = f(*args, **kwargs)
if response_filter is None or response_filter(rv):
try:
self.cache.set(
cache_key,
rv,
timeout=decorated_function.cache_timeout,
)
except Exception:
if self.config['CACHE_DEBUG']:
raise
logger.exception(
"Exception possibly due to cache backend."
)
return rv
decorated_function.uncached = f
decorated_function.cache_timeout = timeout
decorated_function.make_cache_key = self._memoize_make_cache_key(
make_name=make_name,
timeout=decorated_function,
forced_update=forced_update,
hash_method=hash_method,
)
decorated_function.delete_memoized = lambda: self.delete_memoized(f)
return decorated_function
return memoize
def delete_memoized(self, f, *args, **kwargs):
"""Deletes the specified functions caches, based by given parameters.
If parameters are given, only the functions that were memoized
with them will be erased. Otherwise all versions of the caches
will be forgotten.
Example::
@cache.memoize(50)
def random_func():
return random.randrange(1, 50)
@cache.memoize()
def param_func(a, b):
return a+b+random.randrange(1, 50)
::
>>> random_func()
43
>>> random_func()
43
>>> cache.delete_memoized(random_func)
>>> random_func()
16
>>> param_func(1, 2)
32
>>> param_func(1, 2)
32
>>> param_func(2, 2)
47
>>> cache.delete_memoized(param_func, 1, 2)
>>> param_func(1, 2)
13
>>> param_func(2, 2)
47
Delete memoized is also smart about instance methods vs class methods.
When passing a instancemethod, it will only clear the cache related
to that instance of that object. (object uniqueness can be overridden
by defining the __repr__ method, such as user id).
When passing a classmethod, it will clear all caches related across
all instances of that class.
Example::
class Adder(object):
@cache.memoize()
def add(self, b):
return b + random.random()
::
>>> adder1 = Adder()
>>> adder2 = Adder()
>>> adder1.add(3)
3.23214234
>>> adder2.add(3)
3.60898509
>>> cache.delete_memoized(adder1.add)
>>> adder1.add(3)
3.01348673
>>> adder2.add(3)
3.60898509
>>> cache.delete_memoized(Adder.add)
>>> adder1.add(3)
3.53235667
>>> adder2.add(3)
3.72341788
:param fname: The memoized function.
:param \*args: A list of positional parameters used with
memoized function.
:param \**kwargs: A dict of named parameters used with
memoized function.
.. note::
Falcon-Caching uses inspect to order kwargs into positional args when
the function is memoized. If you pass a function reference into
``fname``, Falcon-Caching will be able to place the args/kwargs in
the proper order, and delete the positional cache.
However, if ``delete_memoized`` is just called with the name of the
function, be sure to pass in potential arguments in the same order
as defined in your function as args only, otherwise Falcon-Caching
will not be able to compute the same cache key and delete all
memoized versions of it.
.. note::
Falcon-Caching maintains an internal random version hash for
the function. Using delete_memoized will only swap out
the version hash, causing the memoize function to recompute
results and put them into another key.
This leaves any computed caches for this memoized function within
the caching backend.
It is recommended to use a very high timeout with memoize if using
this function, so that when the version hash is swapped, the old
cached results would eventually be reclaimed by the caching
backend.
"""
if not | |
# import monkeytype
import pyorient
from pyorient.ogm import declarative
from pyorient.ogm.property import *
# with monkeytype.trace():
Node = declarative.declarative_node()
Relationships = declarative.declarative_relationship()
class Core(Node):
# element_type = 'asset'
# element_plural = 'assets'
type = String(nullable=False)
id_ = String(nullable=False)
created_by_ref = String()
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
class Asset(Node):
# element_type = '`asset`'
# element_plural = 'assets'
type = String(nullable=False)
id_ = String(nullable=False)
created_by_ref = String()
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
category = String()
kind_of_asset = String()
category_ext = EmbeddedList()
compromised = Boolean(default=False)
owner_aware = Boolean(default=False)
technical_characteristics = EmbeddedList()
class KillChainPhase(Node):
"""For more detailed information on this object's properties, see
`the STIX 2.1 specification <https://docs.oasis-open.org/cti/stix/v2.1/cs01/stix-v2.1-cs01.html#_i4tjv75ce50h>`__.
"""
kill_chain_name = String(nullable=False)
phase_name = String(nullable=True)
class MarkingDefinition(Node):
# element_type = '`marking-definition`'
# element_plural = 'markings'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
definition_type = String()
definition = EmbeddedMap()
class AttackPattern(Node):
# element_type = '`attack-pattern`'
# element_plural = 'attack_patterns'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
aliases = EmbeddedList()
kill_chain_phases = EmbeddedList()
class Campaign(Node):
# element_type = '`campaign`'
# element_plural = 'campaigns'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
aliases = EmbeddedList()
first_seen = DateTime()
last_seen = DateTime()
objective = String()
class CourseOfAction(Node):
# element_type = '`course-of-action`'
# element_plural = 'actions'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
action = String()
class ExternalReference(Node):
source_name = String()
description = String()
url = String()
hashes = EmbeddedMap()
external_id = String()
class GranularMarking(Node):
lang = String()
marking_ref = EmbeddedList()
selectors = EmbeddedList()
class Grouping(Node):
# element_type = '`grouping`'
# element_plural = 'groupings'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
context = String()
object_refs = EmbeddedList()
class Identity(Node):
# element_type = '`identity`'
# element_plural = 'identities'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
identity_class = String()
sectors = EmbeddedList()
contact_information = String()
class Indicator(Node):
# element_type = '`indicator`'
# element_plural = 'indicators'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
indicator_types = EmbeddedList()
pattern = String(nullable=False)
pattern_type = String()
pattern_version = String()
valid_from = DateTime()
valid_until = DateTime()
kill_chain_phases = EmbeddedList()
class Infrastructure(Node):
# element_type = '`infrastructure`'
# element_plural = 'infrastructures'
type = String(nullable=False)
spec_version = String(nullable=False, default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
infrastructure_types = EmbeddedList()
aliases = EmbeddedList()
kill_chain_phases = EmbeddedList(linked_to=KillChainPhase)
first_seen = DateTime()
last_seen = DateTime()
class IntrusionSet(Node):
# element_type = '`intrusion-set`'
# element_plural = 'intrusion_sets'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
aliases = EmbeddedList()
first_seen = DateTime()
last_seen = DateTime()
goals = EmbeddedList()
resource_level = String()
primary_motivation = String()
secondary_motivations = EmbeddedList()
class Location(Node):
# element_type = '`location`'
# element_plural = 'locations'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
latitude = Float()
longitude = Float()
precision = Float()
region = String()
country = String()
administrative_area = String()
city = String()
street_address = String()
postal_code = String()
class Malware(Node):
# element_type = '`malware`'
# element_plural = 'malwares'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String()
description = String()
malware_types = EmbeddedList()
is_family = Boolean(default=False)
aliases = EmbeddedList()
kill_chain_phases = EmbeddedList()
first_seen = DateTime()
last_seen = DateTime()
operating_system_refs = EmbeddedList()
architecture_execution_envs = EmbeddedList()
implementation_languages = EmbeddedList()
capabilities = EmbeddedList()
sample_refs = EmbeddedList()
class MalwareAnalysis(Node):
# element_type = '`malware`'
# element_plural = 'malwares'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
product = String()
version = String()
host_vm_ref = String()
operating_system_ref = String()
installed_software_ref = EmbeddedList()
configuration_version = String()
modules = EmbeddedList()
analysis_engine_version = String()
analysis_definition_version = String()
submitted = DateTime()
analysis_started = DateTime()
analysis_ended = DateTime()
result_name = String()
result = String()
analysis_sco_refs = EmbeddedList()
sample_ref = String()
class Note(Node):
# element_type = '`note`'
# element_plural = 'notes'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
abstract = String()
content = String()
authors = EmbeddedList()
object_refs = EmbeddedList()
class ObservedData(Node):
# element_type = '`observed-data`'
# element_plural = 'observed_data'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
first_observed = DateTime()
last_observed = DateTime()
number_observed = Integer(nullable=False, default=1)
objects_ = EmbeddedMap()
object_refs = EmbeddedList()
class Opinion(Node):
# element_type = '`opinion`'
# element_plural = 'opinions'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
explanation = String()
authors = EmbeddedList()
opinion = String()
object_refs = EmbeddedList()
class Report(Node):
# element_type = '`report`'
# element_plural = 'reports'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
created = DateTime(nullable=False)
modified = DateTime(nullable=False)
created_by_ref = String()
revoked = Boolean(default=False, nullable=False)
labels = EmbeddedList()
confidence = Integer()
lang = String()
external_references = EmbeddedList()
object_marking_refs = EmbeddedList()
granular_markings = EmbeddedList()
name = String(nullable=False)
description = String()
report_types = EmbeddedList()
published = DateTime()
object_refs = EmbeddedList()
class ThreatActor(Node):
# element_type = '`threat-actor`'
# element_plural = 'actors'
type = String(nullable=False)
spec_version = String(default="2.1")
id_ = String(nullable=False)
| |
raise InvalidConfigFileException(
"{}.{} has to be a list of key-value pairs".format(
SAMPLE_MODS_KEY, IMPLIED_KEY
)
)
_LOGGER.debug("Sample attribute implications: {}".format(implications))
for implication in implications:
if not all([key in implication for key in IMPLIED_COND_KEYS]):
raise InvalidConfigFileException(
"{}.{} section is invalid: {}".format(
SAMPLE_MODS_KEY, IMPLIED_KEY, implication
)
)
implier_attrs = list(implication[IMPLIED_IF_KEY].keys())
implied_attrs = list(implication[IMPLIED_THEN_KEY].keys())
for sample in self.samples:
_LOGGER.debug(
"Setting Sample attributes implied by '{}'".format(implier_attrs)
)
for implier_attr in implier_attrs:
implier_val = implication[IMPLIED_IF_KEY][implier_attr]
if implier_attr not in sample:
_LOGGER.debug(
"Sample lacks implier attr ({}), "
"skipping:".format(implier_attr)
)
break
sample_val = sample[implier_attr]
if sample_val not in implier_val:
_LOGGER.debug(
"Sample attr value does not match any of implier "
"requirements ({} not in {}), skipping".format(
sample_val, implier_val
)
)
break
else:
# only executed if the inner loop did NOT break
for implied_attr in implied_attrs:
imp_val = implication[IMPLIED_THEN_KEY][implied_attr]
_LOGGER.debug(
"Setting implied attr: '{}={}'".format(
implied_attr, imp_val
)
)
sample.__setitem__(implied_attr, imp_val)
def attr_derive(self, attrs=None):
"""
Set derived attributes for all Samples tied to this Project instance
"""
if not self._modifier_exists(DERIVED_KEY):
return
da = self[CONFIG_KEY][SAMPLE_MODS_KEY][DERIVED_KEY][DERIVED_ATTRS_KEY]
ds = self[CONFIG_KEY][SAMPLE_MODS_KEY][DERIVED_KEY][DERIVED_SOURCES_KEY]
derivations = attrs or (da if isinstance(da, list) else [da])
_LOGGER.debug("Derivations to be done: {}".format(derivations))
for sample in self.samples:
for attr in derivations:
if not hasattr(sample, attr):
_LOGGER.debug("sample lacks '{}' attribute".format(attr))
continue
elif attr in sample._derived_cols_done:
_LOGGER.debug("'{}' has been derived".format(attr))
continue
_LOGGER.debug(
"Deriving '{}' attribute for '{}'".format(attr, sample.sample_name)
)
# Set {atr}_key, so the original source can also be retrieved
setattr(sample, ATTR_KEY_PREFIX + attr, getattr(sample, attr))
derived_attr = sample.derive_attribute(ds, attr)
if derived_attr:
_LOGGER.debug("Setting '{}' to '{}'".format(attr, derived_attr))
setattr(sample, attr, derived_attr)
else:
_LOGGER.debug(
"Not setting null/empty value for data source"
" '{}': {}".format(attr, type(derived_attr))
)
sample._derived_cols_done.append(attr)
def activate_amendments(self, amendments):
"""
Update settings based on amendment-specific values.
This method will update Project attributes, adding new values
associated with the amendments indicated, and in case of collision with
an existing key/attribute the amendments' values will be favored.
:param Iterable[str] amendments: A string with amendment
names to be activated
:return peppy.Project: Updated Project instance
:raise TypeError: if argument to amendment parameter is null
:raise NotImplementedError: if this call is made on a project not
created from a config file
"""
amendments = [amendments] if isinstance(amendments, str) else amendments
if amendments is None:
raise TypeError(
"The amendment argument can not be null. To deactivate an "
"amendment use the deactivate_amendments method."
)
if not self[CONFIG_FILE_KEY]:
raise NotImplementedError(
"amendment activation isn't supported on a project not "
"created from a config file"
)
prev = [(k, v) for k, v in self.items() if not k.startswith("_")]
conf_file = self[CONFIG_FILE_KEY]
self.__init__(cfg=conf_file, amendments=amendments)
for k, v in prev:
if k.startswith("_"):
continue
if k not in self or (self.is_null(k) and v is not None):
_LOGGER.debug("Restoring {}: {}".format(k, v))
self[k] = v
self[ACTIVE_AMENDMENTS_KEY] = amendments
return self
def deactivate_amendments(self):
"""
Bring the original project settings back.
:return peppy.Project: Updated Project instance
:raise NotImplementedError: if this call is made on a project not
created from a config file
"""
if ACTIVE_AMENDMENTS_KEY not in self or self[ACTIVE_AMENDMENTS_KEY] is None:
_LOGGER.warning("No amendments have been activated.")
return self
if not self[CONFIG_FILE_KEY]:
raise NotImplementedError(
"amendments deactivation isn't supported on a project that "
"lacks a config file."
)
self._reinit()
return self
def add_samples(self, samples):
"""
Add list of Sample objects
:param peppy.Sample | Iterable[peppy.Sample] samples: samples to add
"""
samples = [samples] if isinstance(samples, Sample) else samples
for sample in samples:
if isinstance(sample, Sample):
self._samples.append(sample)
self[SAMPLE_EDIT_FLAG_KEY] = True
else:
_LOGGER.warning("not a peppy.Sample object, not adding")
def infer_name(self):
"""
Infer project name from config file path.
First assume the name is the folder in which the config file resides,
unless that folder is named "metadata", in which case the project name
is the parent of that folder.
:return str: inferred name for project.
:raise InvalidConfigFileException: if the project lacks both a name and
a configuration file (no basis, then, for inference)
:raise InvalidConfigFileException: if specified Project name is invalid
"""
if CONFIG_KEY not in self:
return
if hasattr(self[CONFIG_KEY], "name"):
if " " in self[CONFIG_KEY].name:
raise InvalidConfigFileException(
"Specified Project name ({}) contains whitespace".format(
self[CONFIG_KEY].name
)
)
return self[CONFIG_KEY].name.replace(" ", "_")
if not self[CONFIG_FILE_KEY]:
raise NotImplementedError(
"Project name inference isn't supported "
"on a project that lacks a config file."
)
config_folder = os.path.dirname(self[CONFIG_FILE_KEY])
project_name = os.path.basename(config_folder)
if project_name == METADATA_KEY:
project_name = os.path.basename(os.path.dirname(config_folder))
return project_name.replace(" ", "_")
def get_description(self):
"""
Infer project description from config file.
The provided description has to be of class coercible to string
:return str: inferred name for project.
:raise InvalidConfigFileException: if description is not of class
coercible to string
"""
if CONFIG_KEY not in self:
return
if hasattr(self[CONFIG_KEY], DESC_KEY):
desc_str = str(self[CONFIG_KEY][DESC_KEY])
if not isinstance(desc_str, str):
try:
desc_str = str(desc_str)
except Exception as e:
raise InvalidConfigFileException(
"Could not convert the specified Project description "
"({}) to string. Caught exception: {}".format(
desc_str, getattr(e, "message", repr(e))
)
)
return desc_str
def __str__(self):
""" Representation in interpreter. """
if len(self) == 0:
return "{}"
msg = "Project"
if NAME_KEY in self:
msg += " '{}'".format(self[NAME_KEY])
if CONFIG_FILE_KEY in self:
msg += " ({})".format(self[CONFIG_FILE_KEY])
if DESC_KEY in self and self[DESC_KEY] is not None:
msg += "\n{}: {}".format(DESC_KEY, self[DESC_KEY])
try:
num_samples = len(self._samples)
except (AttributeError, TypeError):
_LOGGER.debug("No samples established on project")
num_samples = 0
if num_samples > 0:
msg = "{}\n{} samples".format(msg, num_samples)
sample_names = list(self[SAMPLE_DF_KEY][self.sample_name_colname])
repr_names = sample_names[:MAX_PROJECT_SAMPLES_REPR]
context = (
" (showing first {})".format(MAX_PROJECT_SAMPLES_REPR)
if num_samples > MAX_PROJECT_SAMPLES_REPR
else ""
)
msg = "{}{}: {}".format(msg, context, ", ".join(repr_names))
else:
msg = "{} {}".format(msg, "0 samples")
if CONFIG_KEY not in self:
return msg
msg = "{}\nSections: {}".format(
msg, ", ".join([s for s in self[CONFIG_KEY].keys()])
)
if (
PROJ_MODS_KEY in self[CONFIG_KEY]
and AMENDMENTS_KEY in self[CONFIG_KEY][PROJ_MODS_KEY]
):
msg = "{}\nAmendments: {}".format(
msg, ", ".join(self[CONFIG_KEY][PROJ_MODS_KEY][AMENDMENTS_KEY].keys())
)
if self.amendments:
msg = "{}\nActivated amendments: {}".format(
msg, ", ".join(self[ACTIVE_AMENDMENTS_KEY])
)
return msg
@property
def amendments(self):
"""
Return currently active list of amendments or None if none was activated
:return Iterable[str]: a list of currently active amendment names
"""
return self[ACTIVE_AMENDMENTS_KEY] if ACTIVE_AMENDMENTS_KEY in self else None
@property
def list_amendments(self):
"""
Return a list of available amendments or None if not declared
:return Iterable[str]: a list of available amendment names
"""
try:
return self[CONFIG_KEY][PROJ_MODS_KEY][AMENDMENTS_KEY].keys()
except Exception as e:
_LOGGER.debug(
"Could not retrieve available amendments: {}".format(
getattr(e, "message", repr(e))
)
)
return None
@property
def config(self):
"""
Get the config mapping
:return Mapping: config. May be formatted to comply with the most
recent version specifications
"""
return self[CONFIG_KEY]
@property
def config_file(self):
"""
Get the config file path
:return str: path to the config file
"""
return self[CONFIG_FILE_KEY]
@property
def samples(self):
"""
Generic/base Sample instance for each of this Project's samples.
:return Iterable[Sample]: Sample instance for each
of this Project's samples
"""
if self._samples:
return self._samples
if SAMPLE_DF_KEY not in self or self[SAMPLE_DF_KEY] is None:
_LOGGER.debug("No samples are defined")
return []
@property
def sample_name_colname(self):
"""
Name of the effective sample name containing column in the sample table.
It is "sample_name" bu default, but when it's missing it could be
replaced by the selected sample table index, defined on the
object instantiation stage.
:return str: name of the column that consist of sample identifiers
"""
return SAMPLE_NAME_ATTR if SAMPLE_NAME_ATTR == self.st_index else self.st_index
@property
def sample_table(self):
"""
Get sample table. If any sample edits were performed,
it will be re-generated
:return pandas.DataFrame: a data frame with current samples attributes
"""
if self[SAMPLE_EDIT_FLAG_KEY]:
_LOGGER.debug("Generating new sample_table DataFrame")
self[SAMPLE_EDIT_FLAG_KEY] = False
new_df = self._get_table_from_samples(index=self.st_index)
self._sample_table = new_df
return new_df
_LOGGER.debug("Returning stashed sample_table DataFrame")
return self._sample_table
@property
def subsample_table(self):
"""
Get subsample table
:return pandas.DataFrame: a data frame with subsample attributes
"""
sdf = self[SUBSAMPLE_DF_KEY]
if sdf is None:
return
index = self.sst_index
sdf = make_list(sdf, pd.DataFrame)
for sst in sdf:
if not all([i in sst.columns for i in index]):
_LOGGER.info(
"Could not set {} index. At least one of the"
" requested columns does not exist: {}".format(
CFG_SUBSAMPLE_TABLE_KEY, index
)
| |
<filename>flfm/shell/rules.py
"""
Rules and Permissions
~~~~~~~~~~~~~~~~~~~~~
Objects pertaining to the rules and permissions controlling FLFM.
"""
import collections.abc
import copy
import os
import re
from functools import wraps
from werkzeug.datastructures import MultiDict
from flask import current_app, g, flash, abort
from flask_login import current_user
from .paths import ShellDirectory
def read_rules_file(rule_file):
"""Generate (key, value) tuples from the rules file.
:param rule_file: The ``rules`` file to read from.
:type rule_file: str
"""
lines = rule_file.readlines()
good_lines = list(filter(lambda line: len(line) > 2, lines))
sorted_lines = sorted(good_lines,
key=lambda line: re.match(r"^(\w+)\=", line).group(1))
for line in sorted_lines:
pair = re.search(r"(\w*)\=([/\.\w]*)", line)
yield (pair.group(1), pair.group(2))
def enforce_mapped(mapped_dirs, requested_path, for_upload=False):
"""Enforce the rules from the rules file on requested_path.
:param mapped_dirs: A collection of mapped directories.
:type mapped_dirs: An instance of :class:`MappedDirectories`.
:param requested_path: The path of the directory to check permissions of.
:type requested_path: str
:param for_upload: Whether or not to enforce for an upload. **Default: False**
:type for_upload: bool
"""
requested_md = mapped_dirs.get_mapped_dir(requested_path)
for mapped_dir in mapped_dirs:
if for_upload:
if requested_md == mapped_dir:
if not mapped_dir.dir_allowuploads:
break
return
else:
if requested_md == mapped_dir:
if not mapped_dir.dir_allowed:
break
return
# Can't find a damn thing? Abort!
abort(403)
def needs_rules(needing_method):
"""A decorator to wrap around ``routes`` requiring rules.
"""
@wraps(needing_method)
def load_rules(*args, **kwargs):
rules_file = current_app.config['RULES_FILE']
if not hasattr(g, 'fm_rules'):
the_rules = Rules(rules_file)
if current_user.is_authenticated:
users_rules = VirtualRules.make_virtual(the_rules)
users_rules.allowed(current_user.home_folder)
users_rules.allow_uploads(current_user.home_folder)
# Add shares
if current_user.shares_received.count() > 0:
for share in current_user.shares_received.all():
users_rules.allowed(share.owner.home_folder)
the_rules = users_rules
g.fm_rules = the_rules
# RULES ARE PRIMED AND READY!
return needing_method(*args, **kwargs)
return load_rules
class Rules:
"""Class representing the ``rules`` file.
:param rule_file: Path to the ``rules`` file
:type rule_file: str
"""
def __init__(self, rule_file):
try:
if rule_file is not None:
with open(rule_file, 'r') as f:
self._rules = MultiDict(read_rules_file(f))
else:
self._rules = MultiDict()
except FileNotFoundError:
self._rules = MultiDict()
@property
def rules(self):
"""A *werkzeug* **MultiDict** of rules.
"""
return self._rules
@property
def num_rules(self):
"""The number of rules in the ``rules`` file.
"""
rule_keys = ('Allowed', 'AllowUpload', 'AllowUploads', 'Disallowed',
'DisAllowed')
count_o_rules = 0
for key, count_us in self._rules.lists():
if key in rule_keys:
count_o_rules += len(count_us)
return count_o_rules
def __len__(self):
return self.num_rules
# C'mon pylint VirtualRules derives from Rules
# Derived classes get them juicy protecteds
# pylint: disable=protected-access
class VirtualRules(Rules):
"""Mutable version of :class:`Rules`.
Construction from a file in this derivation is handled by ``template`` param.
To copy from a :class:`Rules` use :meth:`make_virtual`.
:param template: Identical to the ``rule_file`` param in :class:`Rules`.
:type template: str
"""
def __init__(self, template=None):
Rules.__init__(self, template)
def _remove_item(self, key, value):
value_list = self._rules.poplist(key)
if not value_list:
return
for val in value_list:
if val == value:
continue
self._rules.add(key, val)
@classmethod
def make_virtual(cls, rules_class):
"""Converts an immutable :class:`Rules` into a mutable :class:`VirtualRules`.
:param rules_class: What to convert.
:type rules_class: Instance of :class:`Rules`
"""
now_virtual = cls(None)
now_virtual._rules = copy.copy(rules_class._rules)
return now_virtual
def allowed(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('Allowed', directory)
return
self._rules.add('Allowed', directory)
def allow_uploads(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('AllowUploads', directory)
return
self._rules.add('AllowUploads', directory)
def disallowed(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('Disallowed', directory)
return
self._rules.add('Disallowed', directory)
class MappedDirectory:
"""Represents a directory that is in the rules file, having rules.
:param dir_path: Path of the directory.
:type dir_path: str
:param dir_allowed: Whether or not to allow access.
:type dir_allowed: bool
:param dir_allowuploads: Whether or not to allow uploads.
:type dir_allowuploads: bool
"""
def __init__(self, dir_path, dir_allowed, dir_allowuploads):
self._dir_path = dir_path
self._dir_allowed = dir_allowed
self._dir_allowuploads = dir_allowuploads
@classmethod
def create_from_mapping(cls, mapping, path_key):
"""Instantiate a :class:`MappedDirectory` from a path corresponding to an
entry within :class:`MappedDirectories`.
:param mapping: The container to operate on.
:type mapping: An instance of :class:`MappedDirectories`
:param path_key: The path of the directory; It will be within **mapping**.
:type path_key: str
"""
try:
allowed, allowuploads = mapping.get(path_key)
except TypeError:
allowed, allowuploads = False, False
return cls(path_key, allowed, allowuploads)
@property
def dir_path(self):
"""The path of this :class:`MappedDirectory`.
"""
return self._dir_path
@property
def dir_allowed(self):
"""Whether or not FLFM is allowed in this :class:`MappedDirectory`.
"""
return self._dir_allowed
@property
def dir_allowuploads(self):
"""Whether or not uploads are allowed in this :class:`MappedDirectory`.
"""
return self._dir_allowuploads
def __repr__(self):
return '<MappedDirectory \'{}\': {}>'.format(self.dir_path, self.__dict__)
def __eq__(self, other):
total_equates = 3
equates = 0
if self.dir_path == other.dir_path:
equates += 1
if self.dir_allowed == other.dir_allowed:
equates += 1
if self.dir_allowuploads == other.dir_allowuploads:
equates += 1
return equates is total_equates
def is_in_tree(self, check_path):
"""Is a path denoted in ``check_path`` a subdirectory or in tree??
:param check_path: The path to check against.
:type check_path: str
:returns: bool
"""
common_path = os.path.commonpath([self.dir_path, check_path])
if common_path.count('\\') > 0:
common_path = common_path.replace('\\', '/')
if common_path == self.dir_path:
return True
return False
def as_shell(self):
"""Convert this :class:`MappedDirectory` into a
:class:`~flfm.shell.paths.ShellPath`.
:returns: A :class:`~flfm.shell.paths.ShellPath` representing this directory.
"""
return ShellDirectory.from_str_loc(self.dir_path)
# `D` is an inherited property
# pylint: disable=invalid-name
# MappedDirectories' __iter__ must use yield
# pylint: disable=stop-iteration-return
class MappedDirectories(collections.abc.Mapping):
"""A mapping, `collections.abc.Mapping <https://docs.python.org/3/library/collections.abc.html#collections.abc.Mapping>`_,
of :class:`MappedDirectory`'s.
Internally, the mapped directories are a dictionary of *Path*, *tuple*.
:param some_dict: A dictionary to populate this :class:`MappedDirectories`.
:type some_dict: dict
"""
def __init__(self, some_dict):
self.D = some_dict
@classmethod
def from_rules(cls, rules):
"""Create from a :class:`Rules`.
:param rules: The rules to create this mapping from.
:type rules: A :class:`Rules` instance.
"""
rule_dict = dict()
if rules.num_rules > 0:
# Tuple entries are as such:
# (ALLOWED??, UPLOAD_ALLOWED??)
for k, v in rules.rules.items(True):
if 'Allowed' in k:
current = rule_dict.get(v, None)
if current is None:
rule_dict[v] = (True, False)
else:
rule_dict[v] = (True, current[1])
elif 'Disallowed' in k or 'DisAllowed' in k:
# what is the point of other properties in a disallow??
# just overwrite
rule_dict[v] = (False, False)
elif 'AllowUploads' in k or 'AllowUpload' in k:
current = rule_dict.get(v, None)
if current is None:
# Mark as allowed also since not in dict
rule_dict[v] = (True, True)
else:
rule_dict[v] = (current[0], True)
else:
continue
return cls(rule_dict)
@classmethod
def from_shell_path(cls, shell_path):
"""Create from a :class:`~flfm.shell.paths.ShellPath`
:param shell_path: Create this mapping, without permission, from this.
:type shell_path: A :class:`~flfm.shell.paths.ShellPath` instance.
"""
the_dict = dict()
default_tuple = (False, False)
current_dir_path = shell_path.str_path
the_dict[current_dir_path] = default_tuple
for subdir in shell_path.directories:
the_dict[subdir.path] = default_tuple
return cls(the_dict)
def __getitem__(self, key):
return self.D.get(key)
def __setitem__(self, key, item):
if isinstance(item, MappedDirectory):
new_item = (item.dir_allowed, item.dir_allowuploads)
self.D[key] = new_item
return
self.D[key] = item
def __len__(self):
return len(self.D)
def __iter__(self):
num_yielded = 0
iterator = iter(self.D)
while True:
if num_yielded >= len(self):
break
# guard the next()
try:
mapped_dir = next(iterator)
except StopIteration:
break
num_yielded += 1
yield MappedDirectory(mapped_dir, self.D[mapped_dir][0],
self.D[mapped_dir][1])
def __contains__(self, value):
if isinstance(value, MappedDirectory):
return value.dir_path in self.D
return super().__contains__(value)
def __eq__(self, other):
if not isinstance(other, MappedDirectories):
return False
return self.D == other.D
def get_mapped_dir(self, dir_path):
"""Select a specific mapped directory from within this container.
:param dir_path: The path to select.
:type dir_path: str
:returns: A :class:`MappedDirectory`.
.. note::
If ``_dir_path`` does not exist, the returned :class:`MappedDirectory`
will have no permissions assigned. They will all be ``False``.
"""
return MappedDirectory.create_from_mapping(self, dir_path)
def apply_rule_map(self, rule_map):
"""Merge the rules of this :class:`MappedDirectories` and another.
:param rule_map: Another rule/directories map.
:type rule_map: Another :class:`MappedDirectories`
:returns: ``self``, this instance but updated.
"""
def length_paths(other_map):
for md in other_map:
yield len(md.dir_path)
def difference_length(my_length, all_lengths):
for length in all_lengths:
yield abs(length-my_length)
# the length of each path in the rule mapping
rule_map_lens = list(length_paths(rule_map))
for my_dir in self:
# apply rule directly on top
# iterate, because it's been explicitly set
if my_dir in rule_map:
self[my_dir.dir_path] = rule_map.get_mapped_dir(my_dir.dir_path)
continue
for rule_dir in rule_map:
| |
- 0.5)
edges_idx = np.round(edges_pix + 0.5) - 0.5
edges_idx = np.unique(edges_idx)
edges_ref = self.pix_to_coord(edges_idx)
groups = Table()
groups[f"{self.name}_min"] = edges_ref[:-1]
groups[f"{self.name}_max"] = edges_ref[1:]
groups["idx_min"] = (edges_idx[:-1] + 0.5).astype(int)
groups["idx_max"] = (edges_idx[1:] - 0.5).astype(int)
if len(groups) == 0:
raise ValueError("No overlap between reference and target edges.")
groups["bin_type"] = "normal "
edge_idx_start, edge_ref_start = edges_idx[0], edges_ref[0]
if edge_idx_start > 0:
underflow = {
"bin_type": "underflow",
"idx_min": 0,
"idx_max": edge_idx_start,
f"{self.name}_min": self.pix_to_coord(-0.5),
f"{self.name}_max": edge_ref_start,
}
groups.insert_row(0, vals=underflow)
edge_idx_end, edge_ref_end = edges_idx[-1], edges_ref[-1]
if edge_idx_end < (self.nbin - 0.5):
overflow = {
"bin_type": "overflow",
"idx_min": edge_idx_end + 1,
"idx_max": self.nbin - 1,
f"{self.name}_min": edge_ref_end,
f"{self.name}_max": self.pix_to_coord(self.nbin - 0.5),
}
groups.add_row(vals=overflow)
group_idx = Column(np.arange(len(groups)))
groups.add_column(group_idx, name="group_idx", index=0)
return groups
def upsample(self, factor):
"""Upsample map axis by a given factor.
When up-sampling for each node specified in the axis, the corresponding
number of sub-nodes are introduced and preserving the initial nodes. For
node type "edges" this results in nbin * factor new bins. For node type
"center" this results in (nbin - 1) * factor + 1 new bins.
Parameters
----------
factor : int
Upsampling factor.
Returns
-------
axis : `MapAxis`
Usampled map axis.
"""
if self.node_type == "edges":
pix = self.coord_to_pix(self.edges)
nbin = int(self.nbin * factor) + 1
pix_new = np.linspace(pix.min(), pix.max(), nbin)
edges = self.pix_to_coord(pix_new)
return self.from_edges(edges, name=self.name, interp=self.interp)
else:
pix = self.coord_to_pix(self.center)
nbin = int((self.nbin - 1) * factor) + 1
pix_new = np.linspace(pix.min(), pix.max(), nbin)
nodes = self.pix_to_coord(pix_new)
return self.from_nodes(nodes, name=self.name, interp=self.interp)
def downsample(self, factor):
"""Downsample map axis by a given factor.
When down-sampling each n-th (given by the factor) bin is selected from
the axis while preserving the axis limits. For node type "edges" this
requires nbin to be dividable by the factor, for node type "center" this
requires nbin - 1 to be dividable by the factor.
Parameters
----------
factor : int
Downsampling factor.
Returns
-------
axis : `MapAxis`
Downsampled map axis.
"""
if self.node_type == "edges":
nbin = self.nbin / factor
if np.mod(nbin, 1) > 0:
raise ValueError(
f"Number of {self.name} bins is not divisible by {factor}"
)
edges = self.edges[::factor]
return self.from_edges(edges, name=self.name, interp=self.interp)
else:
nbin = (self.nbin - 1) / factor
if np.mod(nbin, 1) > 0:
raise ValueError(
f"Number of {self.name} bins - 1 is not divisible by {factor}"
)
nodes = self.center[::factor]
return self.from_nodes(nodes, name=self.name, interp=self.interp)
def to_header(self, format="ogip", idx=0):
"""Create FITS header
Parameters
----------
format : {"ogip"}
Format specification
idx : int
Column index of the axis.
Returns
-------
header : `~astropy.io.fits.Header`
Header to extend.
"""
header = fits.Header()
if format in ["ogip", "ogip-sherpa"]:
header["EXTNAME"] = "EBOUNDS", "Name of this binary table extension"
header["TELESCOP"] = "DUMMY", "Mission/satellite name"
header["INSTRUME"] = "DUMMY", "Instrument/detector"
header["FILTER"] = "None", "Filter information"
header["CHANTYPE"] = "PHA", "Type of channels (PHA, PI etc)"
header["DETCHANS"] = self.nbin, "Total number of detector PHA channels"
header["HDUCLASS"] = "OGIP", "Organisation devising file format"
header["HDUCLAS1"] = "RESPONSE", "File relates to response of instrument"
header["HDUCLAS2"] = "EBOUNDS", "This is an EBOUNDS extension"
header["HDUVERS"] = "1.2.0", "Version of file format"
elif format in ["gadf", "fgst-ccube", "fgst-template"]:
key = f"<KEY>
name = self.name.upper()
if self.name == "energy" and self.node_type == "edges":
header[key] = "E_MIN,E_MAX"
elif self.name == "energy" and self.node_type == "center":
header[key] = "ENERGY"
elif self.node_type == "edges":
header[key] = f"{name}_MIN,{name}_MAX"
elif self.node_type == "center":
header[key] = name
else:
raise ValueError(f"Invalid node type {self.node_type!r}")
key_interp = f"INTERP{idx}"
header[key_interp] = self.interp
else:
raise ValueError(f"Unknown format {format}")
return header
def to_table(self, format="ogip"):
"""Convert `~astropy.units.Quantity` to OGIP ``EBOUNDS`` extension.
See https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html#tth_sEc3.2
The 'ogip-sherpa' format is equivalent to 'ogip' but uses keV energy units.
Parameters
----------
format : {"ogip", "ogip-sherpa", "gadf-dl3", "gtpsf"}
Format specification
Returns
-------
table : `~astropy.table.Table`
Table HDU
"""
table = Table()
edges = self.edges
if format in ["ogip", "ogip-sherpa"]:
self.assert_name("energy")
if format == "ogip-sherpa":
edges = edges.to("keV")
table["CHANNEL"] = np.arange(self.nbin, dtype=np.int16)
table["E_MIN"] = edges[:-1]
table["E_MAX"] = edges[1:]
elif format in ["ogip-arf", "ogip-arf-sherpa"]:
self.assert_name("energy_true")
if format == "ogip-arf-sherpa":
edges = edges.to("keV")
table["ENERG_LO"] = edges[:-1]
table["ENERG_HI"] = edges[1:]
elif format == "gadf-sed":
if self.is_energy_axis:
table["e_ref"] = self.center
table["e_min"] = self.edges_min
table["e_max"] = self.edges_max
elif format == "gadf-dl3":
from gammapy.irf.io import IRF_DL3_AXES_SPECIFICATION
if self.name == "energy":
column_prefix = "ENERG"
else:
for column_prefix, spec in IRF_DL3_AXES_SPECIFICATION.items():
if spec["name"] == self.name:
break
if self.node_type == "edges":
edges_hi, edges_lo = edges[:-1], edges[1:]
else:
edges_hi, edges_lo = self.center, self.center
table[f"{column_prefix}_LO"] = edges_hi[np.newaxis]
table[f"{column_prefix}_HI"] = edges_lo[np.newaxis]
elif format == "gtpsf":
if self.name == "energy_true":
table["Energy"] = self.center.to("MeV")
elif self.name == "rad":
table["Theta"] = self.center.to("deg")
else:
raise ValueError(
"Can only convert true energy or rad axis to"
f"'gtpsf' format, got {self.name}"
)
else:
raise ValueError(f"{format} is not a valid format")
return table
def to_table_hdu(self, format="ogip"):
"""Convert `~astropy.units.Quantity` to OGIP ``EBOUNDS`` extension.
See https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html#tth_sEc3.2
The 'ogip-sherpa' format is equivalent to 'ogip' but uses keV energy units.
Parameters
----------
format : {"ogip", "ogip-sherpa", "gtpsf"}
Format specification
Returns
-------
hdu : `~astropy.io.fits.BinTableHDU`
Table HDU
"""
table = self.to_table(format=format)
if format == "gtpsf":
name = "THETA"
else:
name = None
hdu = fits.BinTableHDU(table, name=name)
if format in ["ogip", "ogip-sherpa"]:
hdu.header.update(self.to_header(format=format))
return hdu
@classmethod
def from_table(cls, table, format="ogip", idx=0, column_prefix=""):
"""Instantiate MapAxis from table HDU
Parameters
----------
table : `~astropy.table.Table`
Table
format : {"ogip", "ogip-arf", "fgst-ccube", "fgst-template", "gadf", "gadf-dl3"}
Format specification
idx : int
Column index of the axis.
column_prefix : str
Column name prefix of the axis, used for creating the axis.
Returns
-------
axis : `MapAxis`
Map Axis
"""
if format in ["ogip", "fgst-ccube"]:
energy_min = table["E_MIN"].quantity
energy_max = table["E_MAX"].quantity
energy_edges = (
np.append(energy_min.value, energy_max.value[-1]) * energy_min.unit
)
axis = cls.from_edges(energy_edges, name="energy", interp="log")
elif format == "ogip-arf":
energy_min = table["ENERG_LO"].quantity
energy_max = table["ENERG_HI"].quantity
energy_edges = (
np.append(energy_min.value, energy_max.value[-1]) * energy_min.unit
)
axis = cls.from_edges(energy_edges, name="energy_true", interp="log")
elif format in ["fgst-template", "fgst-bexpcube"]:
allowed_names = ["Energy", "ENERGY", "energy"]
for colname in table.colnames:
if colname in allowed_names:
tag = colname
break
nodes = table[tag].data
axis = cls.from_nodes(
nodes=nodes, name="energy_true", unit="MeV", interp="log"
)
elif format == "gadf":
axcols = table.meta.get("AXCOLS{}".format(idx + 1))
colnames = axcols.split(",")
node_type = "edges" if len(colnames) == 2 else "center"
# TODO: check why this extra case is needed
if colnames[0] == "E_MIN":
name = "energy"
else:
name = colnames[0].replace("_MIN", "").lower()
# this is need for backward compatibility
if name == "theta":
name = "rad"
interp = table.meta.get("INTERP{}".format(idx + 1), "lin")
if node_type == "center":
nodes = np.unique(table[colnames[0]].quantity)
else:
edges_min = np.unique(table[colnames[0]].quantity)
edges_max = np.unique(table[colnames[1]].quantity)
nodes = edges_from_lo_hi(edges_min, edges_max)
axis = MapAxis(nodes=nodes, node_type=node_type, interp=interp, name=name)
elif format == "gadf-dl3":
from gammapy.irf.io import IRF_DL3_AXES_SPECIFICATION
spec = IRF_DL3_AXES_SPECIFICATION[column_prefix]
name, interp = spec["name"], spec["interp"]
# background models are stored in reconstructed energy
hduclass = table.meta.get("HDUCLAS2")
if hduclass in {"BKG", "RAD_MAX"} and column_prefix == "ENERG":
name = "energy"
edges_lo = table[f"{column_prefix}_LO"].quantity[0]
edges_hi = table[f"{column_prefix}_HI"].quantity[0]
if np.allclose(edges_hi, edges_lo):
axis = MapAxis.from_nodes(edges_hi, interp=interp, name=name)
else:
edges = edges_from_lo_hi(edges_lo, edges_hi)
axis = MapAxis.from_edges(edges, interp=interp, name=name)
elif format == "gtpsf":
try:
energy = table["Energy"].data * u.MeV
axis = MapAxis.from_nodes(energy, name="energy_true", interp="log")
except KeyError:
rad = table["Theta"].data * u.deg
axis = MapAxis.from_nodes(rad, name="rad")
elif format == "gadf-sed-energy":
if "e_min" in table.colnames and "e_max" in table.colnames:
e_min = flat_if_equal(table["e_min"].quantity)
e_max = flat_if_equal(table["e_max"].quantity)
edges = edges_from_lo_hi(e_min, e_max)
axis = MapAxis.from_energy_edges(edges)
elif "e_ref" in table.colnames:
e_ref = flat_if_equal(table["e_ref"].quantity)
axis = MapAxis.from_nodes(e_ref, name="energy", interp="log")
else:
raise ValueError(
"Either 'e_ref', 'e_min' or 'e_max' column " "names are required"
)
elif format == "gadf-sed-norm":
# TODO: guess interp here
nodes = flat_if_equal(table["norm_scan"][0])
axis = MapAxis.from_nodes(nodes, name="norm")
elif format == "gadf-sed-counts":
if "datasets" in table.colnames:
labels = np.unique(table["datasets"])
axis = LabelMapAxis(labels=labels, name="dataset")
else:
shape = table["counts"].shape
edges = np.arange(shape[-1] + 1) - 0.5
axis = MapAxis.from_edges(edges, name="dataset")
elif format == "profile":
if "datasets" in table.colnames:
labels = np.unique(table["datasets"])
axis = LabelMapAxis(labels=labels, name="dataset")
else:
x_ref = table["x_ref"].quantity
axis = MapAxis.from_nodes(x_ref, name="projected-distance")
else:
raise ValueError(f"Format '{format}' not supported")
return axis
@classmethod
def from_table_hdu(cls, hdu, format="ogip", idx=0):
"""Instantiate MapAxis from table HDU
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
Table HDU
| |
<filename>drillbit/coverage/coverage.py
#!/usr/bin/env python
# Generates a Coverage Matrix given the following inputs:
# - Drillbit Test Coverage (Android, iOS)
# - Existing API points (Android, iOS)
# - TDoc2
import os
import sys
import re
import platform
import optparse
import logging
import codecs
import zipfile
import shutil
import yaml
import subprocess
from itertools import chain
coverageDir = os.path.dirname(os.path.abspath(__file__))
drillbitDir = os.path.dirname(coverageDir)
mobileDir = os.path.dirname(drillbitDir)
supportCommonDir = os.path.join(mobileDir, "support", "common")
supportAndroidDir = os.path.join(mobileDir, "support", "android")
sys.path.append(supportCommonDir)
sys.path.append(supportAndroidDir)
try:
import json
except ImportError, e:
import simplejson as json
from mako.template import Template
from mako import exceptions
import bindings
import mappings
logging.basicConfig(
format = '[%(asctime)s] [%(levelname)s] %(message)s',
level = logging.INFO)
log = logging.getLogger("coverage")
def upperFirst(str):
return str[0:1].upper() + str[1:]
def lowerFirst(str):
return str[0:1].lower() + str[1:]
# lazily initialize a hierarchy of map of maps, and returns the final map
def lazyInitMap(root, *mapKeys):
top = root
for mapKey in mapKeys:
if mapKey not in top:
top[mapKey] = {}
top = top[mapKey]
return top
# iterate N levels of a map-of-map-of...N maps
def mapDeepIter(deepMap, nLevels, *mapKeys):
if nLevels == 0:
yield tuple(mapKeys)
for key in deepMap.keys():
obj = deepMap[key]
if isinstance(obj, dict):
mapDeepIter(obj, nLevels - 1)
# load up JSON blacklists for API sets
def loadBlacklist(apiname, platform):
blacklistPath = os.path.join(coverageDir, "blacklist", platform.lower(), "%s.json" % apiname)
if not os.path.isfile(blacklistPath):
return { "functions": [], "properties": [] }
print blacklistPath
return json.load(open(blacklistPath))
class CoverageData(object):
CATEGORY_TDOC = "tdoc"
CATEGORY_BINDING = "binding"
CATEGORY_DRILLBIT = "drillbit"
TOP_LEVEL = "[Top Level]"
TOTAL = "total"
TOTAL_YES = "totalYes"
ALL_CATEGORIES = [CATEGORY_TDOC, CATEGORY_BINDING, CATEGORY_DRILLBIT]
categoryDesc = {
CATEGORY_TDOC: "API Docs (TDoc)",
CATEGORY_BINDING: "API Bindings",
CATEGORY_DRILLBIT: "Drillbit Tests"
}
PLATFORM_ANDROID = "android"
PLATFORM_IOS = "ios"
ALL_PLATFORMS = [PLATFORM_ANDROID, PLATFORM_IOS]
platformDesc = {
PLATFORM_ANDROID: "Android",
PLATFORM_IOS: "iOS"
}
STATUS_YES = "yes"
STATUS_NO = "no"
STATUS_NA = "na"
statusDesc = {
STATUS_YES: "Yes",
STATUS_NO: "No",
STATUS_NA: "N/A"
}
def __init__(self):
self.modules = {}
self.proxies = {}
self.topLevel = {}
self.disabledCategories = []
self.category = None
self.apiCount = { self.TOTAL: 0 }
for category in self.ALL_CATEGORIES:
self.apiCount[category] = { self.TOTAL : 0, self.TOTAL_YES: 0 }
for platform in self.ALL_PLATFORMS:
self.apiCount[category][platform] = {
self.STATUS_YES: 0,
self.STATUS_NO: 0,
self.STATUS_NA: 0,
self.TOTAL: 0
}
def getCategoryDesc(self, category):
return self.categoryDesc[category]
def disableCategory(self, category, platform):
self.disabledCategories.append((category, platform))
def setCategory(self, category):
self.category = category
def lazyGet(self, map, key):
if not key.startswith("Titanium") and map != self.topLevel:
key = "Titanium.%s" % key
key = mappings.mapType(key)
if key not in map:
map[key] = {
"properties": {}, "functions": {}
}
return map[key]
def lazyGetFunctions(self, map, key):
return self.lazyGet(map, key)["functions"]
def lazyGetProperties(self, map, key):
return self.lazyGet(map, key)["properties"]
def addCreateFunction(self, module, proxy, platforms):
self.addFunction("create" + proxy, module, platforms, isModule=True)
def countAPI(self, map, key, platforms):
if key not in map:
map[key] = {}
for category in self.ALL_CATEGORIES:
map[key][category] = {}
for platform in self.ALL_PLATFORMS:
disabled = (category, platform) in self.disabledCategories
if platform not in platforms or disabled:
map[key][category][platform] = self.STATUS_NA
else:
map[key][category][platform] = self.STATUS_NO
for platform in platforms:
if platform in platforms:
map[key][self.category][platform] = self.STATUS_YES
def countAPIs(self):
for componentType in [self.modules, self.proxies, self.topLevel]:
for componentName in componentType.keys():
component = componentType[componentName]
for apiType in component.keys():
apiTypeMap = component[apiType]
for api in apiTypeMap.keys():
apiMap = apiTypeMap[api]
for category in apiMap.keys():
categoryMap = apiMap[category]
hasYes = False
for platform in categoryMap.keys():
status = categoryMap[platform]
self.apiCount[category][platform][status] += 1
if status != self.STATUS_NA:
self.apiCount[category][platform][self.TOTAL] += 1
if status == self.STATUS_YES:
hasYes = True
self.apiCount[category][self.TOTAL] += 1
if hasYes:
self.apiCount[category][self.TOTAL_YES] += 1
self.apiCount[self.TOTAL] += 1
def addFunction(self, fn, component, platforms, isModule=False, isTopLevel=False):
if isModule:
fns = self.lazyGetFunctions(self.modules, component)
elif isTopLevel:
fns = self.lazyGetFunctions(self.topLevel, component)
else:
fns = self.lazyGetFunctions(self.proxies, component)
self.countAPI(fns, fn, platforms)
def addProperty(self, property, component, platforms, isModule=False, getter=False, setter=False):
if isModule:
properties = self.lazyGetProperties(self.modules, component)
else:
properties = self.lazyGetProperties(self.proxies, component)
self.countAPI(properties, property, platforms)
upper = upperFirst(property)
if getter:
self.addFunction("get" + upper, component, platforms, isModule=isModule)
if setter:
self.addFunction("set" + upper, component, platforms, isModule=isModule)
def getPlatformAPICount(self, platform):
apiCount = 0
for category in self.apiCount:
if category == self.TOTAL: continue
apiCount = max(apiCount, self.apiCount[category][platform][self.TOTAL])
return apiCount
def getPlatformCategoryPercent(self, category, platform):
return self.formatPercent(
self.apiCount[category][platform][self.STATUS_YES],
self.getPlatformAPICount(platform))
def formatPercent(self, n1, n2):
if n2 == 0: return "100.00%"
return "%.2f" % (100 * (n1 / float(n2)))
def toJSON(self):
return CoverageEncoder().encode({
"modules": self.modules,
"proxies": self.proxies,
"topLevel": self.topLevel,
"apiCount": self.apiCount
})
class CoverageEncoder(json.JSONEncoder):
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return json.JSONEncoder.default(self, o)
class CoverageMatrix(object):
def __init__(self, seedData=None):
self.data = CoverageData()
self.delta = None
if seedData:
self.drillbitTests = seedData["drillbitTests"]
self.androidBindings = seedData["androidBindings"]
self.iosBindings = seedData["iosBindings"]
self.drillbitCoverage = seedData["drillbitCoverage"]
self.tdocTypes = seedData["tdocTypes"]
if "delta" in seedData:
self.delta = seedData["delta"]
def initSources(self, options):
self.initAndroidBindings(options.distAndroidDir)
self.initIOSBindings(options.distIOSDir)
self.initDrillbitCoverage(options.drillbitResultsDir)
self.initDrillbitTests()
self.initTDocData(options.tdocDir)
def initAndroidBindings(self, distAndroidDir=None):
log.info("Initializing Android bindings")
self.androidBindings = []
if distAndroidDir == None:
distAndroidDir = os.path.join(mobileDir, "dist", "android")
if not os.path.exists(distAndroidDir):
log.warn("Skipping Android bindings, %s not found" % distAndroidDir)
self.data.disableCategory(self.data.CATEGORY_BINDING, self.data.PLATFORM_ANDROID)
return
for jar in os.listdir(distAndroidDir):
if not jar.endswith('.jar'): continue
jarPath = os.path.join(distAndroidDir, jar)
moduleBindings = bindings.get_module_bindings(zipfile.ZipFile(jarPath))
if moduleBindings != None:
self.androidBindings.append(moduleBindings)
def initIOSBindings(self, distIOSDir=None):
log.info("Initializing iOS bindings")
self.iosBindings = []
if distIOSDir == None:
distIOSDir = os.path.join(mobileDir, "dist", "ios")
if not os.path.exists(distIOSDir):
log.warn("Skipping iOS bindings, %s not found" % distIOSDir)
self.data.disableCategory(self.data.CATEGORY_BINDING, self.data.PLATFORM_IOS)
return
for file in os.listdir(distIOSDir):
if not file.endswith('.json'): continue
abspath = os.path.join(distIOSDir, file)
data = open(abspath, "r").read()
if data != '': self.iosBindings.append(json.loads(data))
def initDrillbitCoverage(self, drillbitResultsDir=None):
log.info("Initializing Drillbit coverage")
self.drillbitCoverage = {}
if drillbitResultsDir == None:
drillbitBuildDir = os.path.join(mobileDir, "build", "drillbit")
if platform.system() == "Darwin":
drillbitContentsDir = os.path.join(drillbitBuildDir, "Drillbit.app", "Contents")
else:
drillbitContentDir = os.path.join(drillbitBuildDir, "Drillbit")
drillbitResultsDir = os.path.join(drillbitContentsDir, "Resources", "test_results")
if not os.path.exists(drillbitResultsDir):
log.warn("Skipping Drillbit Test Coverage, %s not found" % drillbitResultsDir)
for p in self.data.ALL_PLATFORMS:
self.data.disableCategory(self.data.CATEGORY_DRILLBIT, p)
return
for f in os.listdir(drillbitResultsDir):
if f.endswith("Coverage.json"):
data = json.loads(open(os.path.join(drillbitResultsDir, f), "r").read())
self.mergeDrillbitCoverage(data)
self.pruneDrillbitCoverage()
for p in self.data.ALL_PLATFORMS:
if p not in self.drillbitCoverage:
log.warn("Skipping Drillbit Test Coverage for %s platform, no data was found" % p)
self.data.disableCategory(self.data.CATEGORY_DRILLBIT, p)
def mergeDrillbitCoverage(self, data):
for platform in data.keys():
if platform not in self.drillbitCoverage:
self.drillbitCoverage[platform] = {}
for apiType in data[platform].keys():
if apiType not in self.drillbitCoverage[platform]:
self.drillbitCoverage[platform][apiType] = {}
for component in data[platform][apiType].keys():
if component not in self.drillbitCoverage[platform][apiType]:
self.drillbitCoverage[platform][apiType][component] = {}
for api in data[platform][apiType][component].keys():
if api not in self.drillbitCoverage[platform][apiType][component]:
self.drillbitCoverage[platform][apiType][component][api] = {}
for coverageType in data[platform][apiType][component][api].keys():
if coverageType == "_type":
self.drillbitCoverage[platform][apiType][component][api]["_type"] = data[platform][apiType][component][api]["_type"]
continue
start = 0
if coverageType in self.drillbitCoverage[platform][apiType][component][api]:
start = self.drillbitCoverage[platform][apiType][component][api][coverageType]
self.drillbitCoverage[platform][apiType][component][api][coverageType] = start + data[platform][apiType][component][api][coverageType]
def pruneDrillbitCoverage(self):
for platform in self.drillbitCoverage.keys():
platformMap = self.drillbitCoverage[platform]
for apiType in platformMap:
componentMap = platformMap[apiType]
for componentName in componentMap:
allBlacklist = loadBlacklist(componentName, "all")
platformBlacklist = loadBlacklist(componentName, platform)
if apiType in platformMap and componentName in componentMap:
component = componentMap[componentName]
for fn in chain(allBlacklist["functions"], platformBlacklist["functions"]):
log.warn("Removing %s.%s from drillbit coverage" % (componentName, fn))
if fn in component:
del component[fn]
for property in chain(allBlacklist["properties"], platformBlacklist["properties"]):
log.warn("Removing %s.%s from drillbit coverage" % (componentName, property))
if property in component:
del component[property]
if platform == "android":
for yahooMethod in ["b64_hmac_sha1", "oauthRequest",\
"percentEscape", "setOAuthParameters", "yql", "yqlO"]:
platformMap["modules"]["Titanium.Yahoo"][yahooMethod]["_type"] = "function"
def initDrillbitTests(self):
os.chdir(drillbitDir)
rhinoJar = os.path.join(mobileDir, "android", "runtime", "rhino", "lib", "js.jar")
self.drillbitTests = json.loads(subprocess.Popen(
["java", "-jar", rhinoJar, "drillbit.js"], stdout=subprocess.PIPE).communicate()[0])
def initTDocData(self, tdocDir=None):
log.info("Initializing TDoc data")
self.tdocTypes = []
if tdocDir == None:
tdocDir = os.path.join(mobileDir, "apidoc")
if not os.path.exists(tdocDir):
log.warn("Skipping TDoc data, %s not found" % tdocDir)
for platform in self.data.ALL_PLATFORMS:
self.data.disabledCategory(self.data.CATEGORY_TDOC, platform)
return
for root, dirs, files in os.walk(tdocDir):
for file in files:
if file.endswith(".yml") and not file.endswith("template.yml"):
absolutePath = os.path.join(root, file)
self.tdocTypes.extend([t for t in yaml.load_all(codecs.open(absolutePath, 'r', 'utf8').read())])
def findAndroidBinding(self, className):
for binding in self.androidBindings:
if "proxies" in binding:
for proxyClass in binding["proxies"].keys():
if proxyClass == className: return binding["proxies"][proxyClass]
return None
def findAndroidModuleForPackage(self, packageName):
for binding in self.androidBindings:
if "proxies" in binding and "modules" in binding:
for proxyClass in binding["proxies"].keys():
if binding["proxies"][proxyClass]["isModule"] and binding["proxies"][proxyClass]["packageName"] == packageName:
return binding["proxies"][proxyClass]
return None
def genAndroidBindingData(self):
log.info("Generating coverage for Android bindings")
self.data.setCategory(self.data.CATEGORY_BINDING)
proxyDefault = "org.appcelerator.kroll.annotations.Kroll.DEFAULT"
platforms = [self.data.PLATFORM_ANDROID]
allowModuleTopLevelMethods = ["decodeURIComponent", "encodeURIComponent"]
for binding in self.androidBindings:
for proxyClass in binding["proxies"].keys():
proxy = binding["proxies"][proxyClass]
isModule = proxy['isModule']
proxyFullAPI = proxy["proxyAttrs"]["fullAPIName"]
allBlacklist = loadBlacklist(proxyFullAPI, "all")
androidBlacklist = loadBlacklist(proxyFullAPI, self.data.PLATFORM_ANDROID)
blacklistFns = [fn for fn in chain(allBlacklist["functions"], androidBlacklist["functions"])]
blacklistProps = [p for p in chain(allBlacklist["properties"], androidBlacklist["properties"])]
if "creatableInModule" in proxy["proxyAttrs"]:
moduleClass = proxy["proxyAttrs"]["creatableInModule"]
if moduleClass != proxyDefault:
moduleAPI = self.findAndroidBinding(moduleClass)["proxyAttrs"]["fullAPIName"]
self.data.addCreateFunction(moduleAPI, proxy["proxyAttrs"]["name"], platforms)
elif not isModule:
# Proxies w/o "creatableInModule" need a namespace fix
module = self.findAndroidModuleForPackage(proxy["packageName"])
if module != None:
proxyFullAPI = module["proxyAttrs"]["fullAPIName"] + "." + proxyFullAPI
if "propertyAccessors" in proxy["proxyAttrs"]:
for accessor in proxy["proxyAttrs"]["propertyAccessors"]:
self.data.addProperty(accessor, proxyFullAPI, platforms, isModule=isModule, getter=True, setter=True)
if "methods" in proxy:
for method in proxy["methods"].keys():
methodName = proxy["methods"][method]["apiName"]
topLevel = False
if "topLevelMethods" in proxy and method in proxy["topLevelMethods"]:
for topLevelName in proxy["topLevelMethods"][method]:
parent = self.data.TOP_LEVEL
if "." in topLevelName:
parts = topLevelName.split(".")
parent = ".".join(parts[0:-1])
name = parts[-1]
else:
name = topLevelName
topLevel = True
if name not in blacklistFns:
self.data.addFunction(name, parent, platforms, isTopLevel=True)
if not topLevel or methodName in allowModuleTopLevelMethods:
# For the sake of coverage, we only add a top level method once
# even though technically it may be bound in two places
if methodName not in blacklistFns:
self.data.addFunction(methodName, proxyFullAPI, platforms, isModule=isModule)
if "properties" in proxy:
for prop in proxy["properties"].keys():
property = proxy["properties"][prop]["name"]
# ignore getter/setter here for now
#getter = proxy["properties"][prop]["get"]
#setter = proxy["properties"][prop]["set"]
if property not in blacklistProps:
self.data.addProperty(property, proxyFullAPI, platforms, isModule=isModule)
if "dynamicProperties" in proxy:
for dynProp in proxy["dynamicProperties"].keys():
property = proxy["dynamicProperties"][dynProp]["name"]
getter = proxy["dynamicProperties"][dynProp]["get"]
setter = proxy["dynamicProperties"][dynProp]["set"]
if property not in blacklistProps:
self.data.addProperty(property, proxyFullAPI, platforms, isModule=isModule, getter=getter, setter=setter)
if "dynamicApis" in proxy:
if "properties" in proxy["dynamicApis"]:
for property in proxy["dynamicApis"]["properties"]:
self.data.addProperty(property, proxyFullAPI, platforms, isModule=isModule)
if "methods" in proxy["dynamicApis"]:
for method in proxy["dynamicApis"]["methods"]:
self.data.addFunction(method, proxyFullAPI, platforms, isModule=isModule)
if "constants" in proxy:
for constant in proxy["constants"].keys():
self.data.addProperty(constant, proxyFullAPI, platforms, isModule=isModule)
def genIOSBindingData(self):
iosSubmodules = ['iOS', 'iPhone', 'iPad', 'Socket', 'Properties']
iosInternals = ['Proxy', 'Module', 'Animation', 'Toolbar',
'Window', 'View', 'File', 'Stream', 'DataStream',
'Rect', 'TextWidget']
log.info("Generating coverage for iOS...")
platforms = [self.data.PLATFORM_IOS]
for binding in self.iosBindings:
for iosClass in binding.keys():
superclass = binding[iosClass]["super"] #TODO: load superclass props, etc.
isModule = (superclass == "TiModule")
# Easy detection for API space for modules
if isModule:
match = re.search('^(.*)Module$', iosClass)
if match: # better match
moduleName = match.group(1)
if moduleName == "TopTi":
fullAPI = "Titanium"
else:
fullAPI = "Titanium." + match.group(1)
else:
# Trim Ti(.*)Proxy if necessary
actualClass = iosClass
if iosClass == "TiUIiOS3DMatrix":
actualClass = "TiUIiOS3DMatrixProxy"
match = re.search('^Ti(.*?)(Proxy)?$', iosClass)
canCreate = False
if match:
relevant = match.group(1)
if match.group(2) == 'Proxy':
canCreate = True
# These are the modules with names which don't
match = re.search('^((?:API)|(?:UI)|(?:XML))(.*)', relevant)
if match:
moduleName = match.group(1)
proxyName = match.group(2)
else:
split = re.split('([A-Z])', relevant, maxsplit=2)
if len(split) > 4:
moduleName = split[1] + split[2]
proxyName = split[3] + split[4]
else:
moduleName = None
proxyName = relevant
# 1. Are we a submodule?
if proxyName in iosSubmodules:
isModule = True
else:
# 2. Are we located in a submodule namespace?
for submodule in iosSubmodules:
pos = proxyName.find(submodule)
if pos != | |
"""API for Home Connect bound to Home Assistant OAuth."""
import logging
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS, TIME_SECONDS, VOLUME_MILLILITERS # pylint: disable=import-error, no-name-in-module
from homeassistant.helpers.dispatcher import dispatcher_send # pylint: disable=import-error, no-name-in-module
from .const import SIGNAL_UPDATE_ENTITIES
_LOGGER = logging.getLogger(__name__)
class Appliance:
"""Home Connect generic device."""
def __init__(self, hass, appliance):
"""Constructor"""
self.hass = hass
self.appliance = appliance
self.binary_sensors = []
self.sensors = []
self.switches = []
self.lights = []
self.binary_sensors.append({"device": self, "key": "BSH.Common.Setting.PowerState", "description": "Power", "device_class": "power"})
def initialize(self):
"""Initialize appliance."""
# update status of appliance like setting, prograam, temperature, spin speed, etc.
self.appliance.update_properties()
# listen to events sent from appliance
self.appliance.listen_events(callback=self.event_callback)
def event_callback(self, appliance):
"""Handle event."""
_LOGGER.debug("Update triggered on %s", appliance.name)
# Dump the entire status buffer
for key in self.appliance.status:
value = self.appliance.status[key]
# remove uri due to better readability
if "uri" in value:
value["uri"] = "none"
_LOGGER.debug("%s: %s", key, value)
# forward the event to home assistant entities
dispatcher_send(self.hass, SIGNAL_UPDATE_ENTITIES, appliance.haId)
def get_binary_sensors(self):
"""Get a dictionary with info about all binary sensors."""
return self.binary_sensors
def get_sensors(self):
"""Get a dictionary with info about all sensors."""
return self.sensors
def get_switches(self):
"""Get a dictionary with info about all switches."""
return self.switches
def get_lights(self):
"""Get a dictionary with info about all lights sensors."""
return self.lights
class Washer(Appliance):
"""Washer."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.RemoteControlStartAllowed", "description": "Remote Control", "device_class": None},
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.OperationState", "description": "Operation State", "unit": None, "icon": None, "device_class": "home_connect_operation"},
{"device": self, "key": "BSH.Common.Option.RemainingProgramTime", "description": "Remaining Time", "unit": TIME_SECONDS, "icon": "mdi:update", "device_class": None},
{"device": self, "key": "BSH.Common.Option.ProgramProgress", "description": "Progress", "unit": PERCENTAGE, "icon": "mdi:progress-clock", "device_class": None},
{"device": self, "key": "BSH.Common.Root.SelectedProgram", "description": "Program", "unit": None, "icon": "mdi:format-list-bulleted", "device_class": "home_connect_washer_program"},
{"device": self, "key": "LaundryCare.Washer.Option.Temperature", "description": "Temperature", "unit": None, "icon": "mdi:coolant-temperature", "device_class": "home_connect_washer_temperatur"},
{"device": self, "key": "LaundryCare.Washer.Option.SpinSpeed", "description": "Spin Speed", "unit": None, "icon": "mdi:rotate-right", "device_class": "home_connect_washer_spin_speed"},
]
)
self.switches.append({"device": self, "key": "BSH.Common.Start", "description": "Start"})
class Dryer(Appliance):
"""Dryer."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.RemoteControlStartAllowed", "description": "Remote Control", "device_class": None},
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.OperationState", "description": "Operation State", "unit": None, "icon": None, "device_class": "home_connect_operation"},
{"device": self, "key": "BSH.Common.Option.RemainingProgramTime", "description": "Remaining Time", "unit": TIME_SECONDS, "icon": "mdi:update", "device_class": None},
{"device": self, "key": "BSH.Common.Option.ProgramProgress", "description": "Progress", "unit": PERCENTAGE, "icon": "mdi:progress-clock", "device_class": None},
{"device": self, "key": "BSH.Common.Root.SelectedProgram", "description": "Program", "unit": None, "icon": "mdi:format-list-bulleted", "device_class": "home_connect_dryer_program"},
{"device": self, "key": "LaundryCare.Dryer.Option.DryingTarget", "description": "Drying Target", "unit": None, "icon": "mdi:water-percent", "device_class": "home_connect_drying_target"},
]
)
self.switches.append({"device": self, "key": "BSH.Common.Start", "description": "Start"})
class WasherDryer(Appliance):
"""Washer Dryer."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.RemoteControlStartAllowed", "description": "Remote Control", "device_class": None},
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.OperationState", "description": "Operation State", "unit": None, "icon": None, "device_class": "home_connect_operation"},
{"device": self, "key": "BSH.Common.Option.RemainingProgramTime", "description": "Remaining Time", "unit": TIME_SECONDS, "icon": "mdi:update", "device_class": None},
{"device": self, "key": "BSH.Common.Option.ProgramProgress", "description": "Progress", "unit": PERCENTAGE, "icon": "mdi:progress-clock", "device_class": None},
{"device": self, "key": "BSH.Common.Root.SelectedProgram", "description": "Program", "unit": None, "icon": "mdi:format-list-bulleted", "device_class": "home_connect_washer_program"},
{"device": self, "key": "LaundryCare.Washer.Option.Temperature", "description": "Temperature", "unit": None, "icon": "mdi:coolant-temperature", "device_class": "home_connect_washer_temperatur"},
{"device": self, "key": "LaundryCare.Washer.Option.SpinSpeed", "description": "Spin Speed", "unit": None, "icon": "mdi:rotate-right", "device_class": "home_connect_washer_spin_speed"},
{"device": self, "key": "LaundryCare.Dryer.Option.DryingTarget", "description": "Drying Target", "unit": None, "icon": "mdi:water-percent", "device_class": "home_connect_drying_target"},
]
)
self.switches.append({"device": self, "key": "BSH.Common.Start", "description": "Start"})
class Dishwasher(Appliance):
"""Dishwasher."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.RemoteControlStartAllowed", "description": "Remote Control", "device_class": None},
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.OperationState", "description": "Operation State", "unit": None, "icon": None, "device_class": "home_connect_operation"},
{"device": self, "key": "BSH.Common.Option.RemainingProgramTime", "description": "Remaining Time", "unit": TIME_SECONDS, "icon": "mdi:update", "device_class": None},
{"device": self, "key": "BSH.Common.Option.ProgramProgress", "description": "Progress", "unit": PERCENTAGE, "icon": "mdi:progress-clock", "device_class": None},
{"device": self, "key": "BSH.Common.Root.SelectedProgram", "description": "Program", "unit": None, "icon": "mdi:format-list-bulleted", "device_class": "home_connect_dishcare_program"},
]
)
self.switches.append({"device": self, "key": "BSH.Common.Start", "description": "Start"})
self.lights.append({"device": self, "key": "BSH.Common.Setting.AmbientLightEnabled", "description": "Ambient Light"})
class Refrigerator(Appliance):
"""Refrigerator."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "Refrigeration.Common.Setting.BottleCooler.SetpointTemperature", "description": "Bottle Coller Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.ChillerLeft.SetpointTemperature", "description": "Chiller Left Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.ChillerCommon.SetpointTemperature", "description": "Chiller Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.ChillerRight.SetpointTemperature", "description": "Chiller Right Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SetpointTemperatureRefrigerator", "description": "Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
]
)
self.switches.extend(
[
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SuperModeRefrigerator", "description": "Super Mode Refrigerator"},
{"device": self, "key": "Refrigeration.Common.Setting.EcoMode", "description": "Eco Mode"},
{"device": self, "key": "Refrigeration.Common.Setting.SabbathMode", "description": "Sabbath Mode"},
{"device": self, "key": "Refrigeration.Common.Setting.VacationMode", "description": "Vacation Mode"},
{"device": self, "key": "Refrigeration.Common.Setting.FreshMode", "description": "Fresh Mode"},
]
)
class WineCooler(Appliance):
"""Wine Cooler."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "Refrigeration.Common.Setting.WineCompartment.SetpointTemperature", "description": "Temperature 1", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.WineCompartment2.SetpointTemperature", "description": "Temperature 2", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.WineCompartment3.SetpointTemperature", "description": "Temperature 3", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
]
)
self.switches.append({"device": self, "key": "Refrigeration.Common.Setting.SabbathMode", "description": "Sabbath Mode"})
class Freezer(Appliance):
"""Freezer."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SetpointTemperatureFreezer", "description": "Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
]
)
self.switches.extend(
[
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SuperModeFreezer", "description": "Super Mode Freezer"},
{"device": self, "key": "Refrigeration.Common.Setting.EcoMode", "description": "Eco Mode"},
{"device": self, "key": "Refrigeration.Common.Setting.SabbathMode", "description": "Sabbath Mode"},
]
)
class FridgeFreezer(Appliance):
"""FridgeFreezer."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SetpointTemperatureFreezer", "description": "Freezer Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SetpointTemperatureRefrigerator", "description": "Refrigerator Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.BottleCooler.SetpointTemperature", "description": "Bottle Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.ChillerLeft.SetpointTemperature", "description": "Chiller Left Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.ChillerCommon.SetpointTemperature", "description": "Chiller Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Refrigeration.Common.Setting.ChillerRight.SetpointTemperature", "description": "Chiller Right Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
]
)
self.switches.extend(
[
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SuperModeRefrigerator", "description": "Super Mode Refrigerator"},
{"device": self, "key": "Refrigeration.FridgeFreezer.Setting.SuperModeFreezer", "description": "Super Mode Freezer"},
{"device": self, "key": "Refrigeration.Common.Setting.EcoMode", "description": "Eco Mode"},
{"device": self, "key": "Refrigeration.Common.Setting.SabbathMode", "description": "Sabbath Mode"},
{"device": self, "key": "Refrigeration.Common.Setting.VacationMode", "description": "Vacation Mode"},
{"device": self, "key": "Refrigeration.Common.Setting.FreshMode", "description": "Fresh Mode"},
]
)
class Oven(Appliance):
"""Oven."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.RemoteControlStartAllowed", "description": "Remote Control", "device_class": None},
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.OperationState", "description": "Operation State", "unit": None, "icon": None, "device_class": "home_connect_operation"},
{"device": self, "key": "BSH.Common.Option.RemainingProgramTime", "description": "Remaining Time", "unit": TIME_SECONDS, "icon": "mdi:update", "device_class": None},
{"device": self, "key": "BSH.Common.Option.Duration", "description": "Duration", "unit": TIME_SECONDS, "icon": "mdi:update", "device_class": None},
{"device": self, "key": "BSH.Common.Option.ElapsedProgramTime", "description": "Elapsed Program Time", "unit": TIME_SECONDS, "icon": "mdi:update", "device_class": None},
{"device": self, "key": "BSH.Common.Option.ProgramProgress", "description": "Progress", "unit": PERCENTAGE, "icon": "mdi:progress-clock", "device_class": None},
{"device": self, "key": "BSH.Common.Root.SelectedProgram", "description": "Program", "unit": None, "icon": "mdi:format-list-bulleted", "device_class": "home_connect_oven_program"},
{"device": self, "key": "Cooking.Oven.Status.CurrentCavityTemperature", "description": "Current Cavity Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
{"device": self, "key": "Cooking.Oven.Option.SetpointTemperature", "description": "Temperature", "unit": TEMP_CELSIUS, "icon": "mdi:thermometer", "device_class": None},
]
)
self.switches.append({"device": self, "key": "BSH.Common.Start", "description": "Start"})
class CoffeeMaker(Appliance):
"""Coffee Maker."""
def __init__(self, hass, appliance):
super().__init__(hass, appliance)
self.binary_sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.RemoteControlStartAllowed", "description": "Remote Control", "device_class": None},
{"device": self, "key": "BSH.Common.Status.DoorState", "description": "Door", "device_class": "door"},
]
)
self.sensors.extend(
[
{"device": self, "key": "BSH.Common.Status.OperationState", "description": "Operation State", "unit": None, "icon": None, "device_class": "home_connect_operation"},
{"device": self, "key": "BSH.Common.Root.SelectedProgram", "description": "Program", "unit": None, "icon": "mdi:format-list-bulleted", "device_class": "home_connect_coffee_maker_program"},
{"device": self, "key": "ConsumerProducts.CoffeeMaker.Option.CoffeeTemperature", "description": "Temperature", "unit": None, "icon": "mdi:thermometer", "device_class": "home_connect_coffee_maker_temperature"},
{"device": self, "key": "ConsumerProducts.CoffeeMaker.Option.FillQuantity", "description": "Fill Quantity", "unit": VOLUME_MILLILITERS, "icon": "mdi:water-outline", "device_class": "None"},
{"device": self, "key": "ConsumerProducts.CoffeeMaker.Option.BeanAmount", "description": "Bean | |
# Copyright (c) 2021, Zenqi
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from sidle.errors import PasswordError
from typing import (
Any,
List,
Optional,
Iterable,
AnyStr,
TextIO,
Tuple
)
from sidle.enc import SidleEncryption
from sidle.utils import (
convert_string,
convert_bytes,
password_with_asterisk
)
import ast
import os
class Sidle:
"""
Sidle is a secure data storing with password and
simple encryption. This class handles all sidle
operation that can be storing sidle data on a
filename and more..
Parameters:
filename (str):
The filename to be used for storing data
password (Any):
The password for the file in order to do
Sidle operation
**options (Keyword Arguments):
Additional option for the operation.
"""
def __init__(
self,
filename: str,
password: Any,
**options
):
#: The filename to be used for storing data
self.filename = filename
#: The password for the file in order to do
#: Sidle operation
self.password = password
#: A sidle data instance for handling data storing.
self.sidle: SidleData = SidleData(
password=self.password
)
#: Additional option for the operation.
self.options = options
self.__check_for_filename()
def __getitem__(self, key: Any):
"""
Get the item from the filename given.
"""
raw = self.read_raw(self.filename, True)
if len(raw) == 0:
return None
data = self.sidle.load(raw)
return data.__getitem__(
key=key
)
def __setitem__(self, key: Any, value: Any):
"""
Set the item from the filename given
"""
self.insert(
key=key,
value=value
)
def __check_for_filename(self):
"""
Check if the given filename is already existed.
If not then create a new one.
"""
_ = os.path.splitext(self.filename)
if _[1] == '':
self.filename = self.filename + '.sd'
else:
self.filename = self.filename
if not os.path.isfile(self.filename):
with open(self.filename, 'wb') as f:
pass
def read_raw(
self,
filename: str,
writable: Optional[bool] = False
) -> TextIO:
"""
Read the raw file and return a TextIO object
"""
if writable:
mode = 'rb+'
else:
mode = 'rb'
with open(filename, mode) as f:
return f.read()
def write_raw(self, filename: str, data: bytes) -> TextIO:
"""
Write a raw data to the file
"""
with open(filename, 'wb') as f:
f.write(data)
def read(self):
"""
Return the representation of the
SidleData.
"""
return repr(self.sidle)
def insert(
self,
key: str,
value: Any
):
"""
Insert the key and a value to the file.
"""
raw = self.read_raw(
filename=self.filename
)
if len(raw) == 0:
self.sidle.set(
key=key,
value=value
)
self.sidle.save(self.filename)
else:
data = self.sidle.load(raw)
data.set(
key=key,
value=value
)
data.save(
filename=self.filename
)
def __repr__(self):
return self._get_repr_value()
def _get_repr_value(self):
raw = self.read_raw(
filename=self.filename
)
if len(raw) == 0:
_data = None
else:
_data = self.sidle.load(raw)
name = type(self).__name__
filename = self.filename
password = <PASSWORD>_asterisk(
password=self.password
)
if _data:
_len = len(_data)
keys = [
self.sidle._enc.decrypt(k)
for k in _data.keys()
]
else:
_len = 0
keys = None
return "%s(filename=%s, password=%s, length=%s, keys=%s)" % (
name,
filename,
password,
_len,
keys
)
class SidleData:
"""
A data structure for handling Sidle data
with encryption: `SidleEncryption.`
Parameters:
password (Any):
The password that can be used for encryption.
"""
_enc: SidleEncryption
_list: list
def __init__(
self,
password: Any,
data: List[Tuple[Any]] = None
):
self.password = password
if data:
self._list = data
else:
self._list = []
self._enc = SidleEncryption(
password=self.password
)
@property
def raw(self) -> bytes:
"""
Return the encrypted raw string of the data.
"""
return self._enc.encrypt(str(self._list))
def __getitem__(self, key: str, error: Optional[bool]=False):
"""
Get the value of the key given.
Parameter:
key (str):
The key for the value to get
error (Optional[bool]):
Set if the function throws `KeyError`
"""
encrypted_data = [
(self._enc.decrypt(k), self._enc.decrypt(v))
for k,v in self._list
]
if not error:
if isinstance(key, int):
return encrypted_data[key]
elif isinstance(key, slice):
return self.__class__(encrypted_data[key])
_: str = key.lower()
for k, v in encrypted_data:
if k.lower() == _:
return v
#: If the error is true, raise `KeyError`
#: exception if the key doesn't exitst
if error:
raise KeyError(key)
def __setitem__(self, key: str, value: Any):
"""
Set the value of the item. This magic method
can be used on:
>>> sidle = SidleData('password')
>>> sidle['username'] = 'zenqi'
"""
if isinstance(key, (slice, int)):
if isinstance(key, int):
value = [value]
value = [
(k, v)
for k,v in value
]
if isinstance(key, int):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(
key=key,
value=value
)
def __delitem__(self, key: str, index: Optional[bool] = True):
"""
Delete the key and its value from the list
of header.
Parameter:
key (str):
The key to be deleted
index (Optional[bool]):
Set if the slicing can be index.
"""
encrypted_data = [
(self._enc.decrypt(k), self._enc.decrypt(v))
for k,v in self._list
]
if index and isinstance(key, (int, slice)):
del encrypted_data[key]
return
_: str = key.lower()
_new: list = []
for k, v in encrypted_data:
if k.lower() != _:
_new.append((k,v))
encrypted_data[:] = _new
self._list = encrypted_data
def __eq__(self, other):
def lowered(item):
return (item[0].lower(),) + item[1:]
return other.__class__ is self.__class__ and set(
map(lowered, other.__list)
) == set(map(lowered, self._list))
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __copy__(self):
return self.copy()
def __contains__(self, key):
"""
Check if a key is present.
Example:
"""
try:
self.__getitem__(key)
except KeyError:
return False
return True
def get(
self,
key: str,
default: Optional[Any] = None,
type: Optional[Any] = None,
bytes: Optional[bool]=False
):
"""
Get the given key and return. If the type is defined
then return the given type.
Parameter:
key (str):
The key for the value to get.
default (Optional[any]):
Set the default value if the key doesn't exist
type (Optional[any]):
Set the type of the given value.
bytes (Optional[bool]):
Set if the value will be converted as bytes or no
Example:
"""
try:
_value = self.__getitem__(key)
except KeyError:
return default
if bytes:
_value = convert_bytes(_value)
if type != None:
try:
return type(_value)
except ValueError:
return default
return _value
def set(
self,
key: str,
value: Any
):
_key = self._enc.encrypt(key)
_value = self._enc.encrypt(value)
if self._list == None:
self._list.append(
(_key, _value)
)
return
_iter = iter(self._list)
_k = key.lower()
for i, (o_key, o_value) in enumerate(_iter):
if self._enc.decrypt(o_key).lower() == _k:
self._list[i] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[i + 1:] = [x for x in _iter if x[0].lower() != _k]
def add(self,
key: AnyStr,
value: Any
):
"""
Add a new tuple of header to the list of headers.
Parameters:
key (str):
The key to be added
value (Any):
The value for the key to be added
**kwargs(Dict[str, Any]):
A keyword arguments for additional
option for the headers.
"""
self._list.append((key, value))
def remove(self, key: str):
"""
Remove the key and its value from the list
of header.
Parameter:
key (str):
The key to be removed.
"""
return self.__delitem__(key, False)
def copy(self):
return self.__class__(self._list)
def clear(self):
"""
Clear all data from the list.
"""
del self._list[:]
def pop(self, key: Optional[str] = None, default: Optional[Any] = None):
"""
Remove the key and return | |
<filename>mws/mws.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Basic interface to Amazon MWS
# Based on http://code.google.com/p/amazon-mws-python
#
import hashlib
import hmac
import base64
import re
import six
try:
from xml.etree.ElementTree import ParseError as XMLError
except ImportError:
from xml.parsers.expat import ExpatError as XMLError
from urllib.parse import quote
from time import strftime, gmtime
from requests import request
from requests.exceptions import HTTPError
from mws import utils
__all__ = [
'Feeds',
'Inventory',
'MWSError',
'Reports',
'Orders',
'Products',
'Recommendations',
'Sellers',
'Finances',
]
# See https://images-na.ssl-images-amazon.com/images/G/01/mwsportal/doc/en_US/bde/MWSDeveloperGuide._V357736853_.pdf page 8
# for a list of the end points and marketplace IDs
MARKETPLACES = {
"CA" : "https://mws.amazonservices.ca", #A2EUQ1WTGCTBG2
"US" : "https://mws.amazonservices.com", #ATVPDKIKX0DER",
"DE" : "https://mws-eu.amazonservices.com", #A1PA6795UKMFR9
"ES" : "https://mws-eu.amazonservices.com", #A1RKKUPIHCS9HS
"FR" : "https://mws-eu.amazonservices.com", #A13V1IB3VIYZZH
"IN" : "https://mws.amazonservices.in", #A21TJRUUN4KGV
"IT" : "https://mws-eu.amazonservices.com", #APJ6JRA9NG5V4
"UK" : "https://mws-eu.amazonservices.com", #A1F83G8C2ARO7P
"JP" : "https://mws.amazonservices.jp", #A1VC38T7YXB528
"CN" : "https://mws.amazonservices.com.cn", #AAHKV2X7AFYLW
"MX" : "https://mws.amazonservices.com.mx", #A1AM78C64UM0Y8
}
class MWSError(Exception):
"""
Main MWS Exception class
"""
# Allows quick access to the response object.
# Do not rely on this attribute, always check if its not None.
response = None
def calc_md5(string):
"""Calculates the MD5 encryption for the given string
"""
md = hashlib.md5()
md.update(string)
return base64.encodestring(md.digest()).strip('\n')
def remove_empty(d):
"""
Helper function that removes all keys from a dictionary (d),
that have an empty value.
"""
return dict((key, value) for key, value in six.iteritems(d) if value)
def remove_namespace(xml):
regex = re.compile(' xmlns(:ns2)?="[^"]+"|(ns2:)|(xml:)')
return regex.sub('', xml.decode('utf-8'))
class DictWrapper(object):
def __init__(self, xml, rootkey=None):
self.original = xml
self._rootkey = rootkey
self._mydict = utils.xml2dict().fromstring(remove_namespace(xml))
self._response_dict = self._mydict.get([*self._mydict][0], self._mydict)
@property
def parsed(self):
if self._rootkey:
return self._response_dict.get(self._rootkey)
else:
return self._response_dict
class DataWrapper(object):
"""
Text wrapper in charge of validating the hash sent by Amazon.
"""
def __init__(self, data, header):
self.original = data
if 'content-md5' in header:
hash_ = calc_md5(self.original)
if header['content-md5'] != hash_:
raise MWSError("Wrong Contentlength, maybe amazon error...")
@property
def parsed(self):
return self.original
class MWS(object):
""" Base Amazon API class """
# This is used to post/get to the different uris used by amazon per api
# ie. /Orders/2011-01-01
# All subclasses must define their own URI only if needed
URI = "/"
# The API version varies in most amazon APIs
VERSION = "2009-01-01"
# There seem to be some xml namespace issues. therefore every api subclass
# is recommended to define its namespace, so that it can be referenced
# like so AmazonAPISubclass.NS.
# For more information see http://stackoverflow.com/a/8719461/389453
NS = ''
# Some APIs are available only to either a "Merchant" or "Seller"
# the type of account needs to be sent in every call to the amazon MWS.
# This constant defines the exact name of the parameter Amazon expects
# for the specific API being used.
# All subclasses need to define this if they require another account type
# like "Merchant" in which case you define it like so.
# ACCOUNT_TYPE = "Merchant"
# Which is the name of the parameter for that specific account type.
ACCOUNT_TYPE = "SellerId"
def __init__(self, access_key, secret_key, account_id, region='US', domain='', uri="", version="", auth_token=""):
self.access_key = access_key
self.secret_key = secret_key
self.account_id = account_id
self.auth_token = auth_token
self.version = version or self.VERSION
self.uri = uri or self.URI
if domain:
self.domain = domain
elif region in MARKETPLACES:
self.domain = MARKETPLACES[region]
else:
error_msg = "Incorrect region supplied ('%(region)s'). Must be one of the following: %(marketplaces)s" % {
"marketplaces" : ', '.join(MARKETPLACES.keys()),
"region" : region,
}
raise MWSError(error_msg)
def make_request(self, extra_data, method="GET", **kwargs):
"""Make request to Amazon MWS API with these parameters
"""
# Remove all keys with an empty value because
# Amazon's MWS does not allow such a thing.
extra_data = remove_empty(extra_data)
params = {
'AWSAccessKeyId': self.access_key,
self.ACCOUNT_TYPE: self.account_id,
'SignatureVersion': '2',
'Timestamp': self.get_timestamp(),
'Version': self.version,
'SignatureMethod': 'HmacSHA256',
}
if self.auth_token:
params['MWSAuthToken'] = self.auth_token
params.update(extra_data)
request_description = '&'.join(['%s=%s' % (k, quote(params[k], safe='-_.~')) for k in sorted(params)])
signature = self.calc_signature(method, request_description)
url = '%s%s?%s&Signature=%s' % (self.domain, self.uri, request_description, quote(signature))
headers = {'User-Agent': 'python-amazon-mws/0.0.1 (Language=Python)'}
headers.update(kwargs.get('extra_headers', {}))
try:
# Some might wonder as to why i don't pass the params dict as the params argument to request.
# My answer is, here i have to get the url parsed string of params in order to sign it, so
# if i pass the params dict as params to request, request will repeat that step because it will need
# to convert the dict to a url parsed string, so why do it twice if i can just pass the full url :).
response = request(method, url, data=kwargs.get('body', ''), headers=headers)
response.raise_for_status()
# When retrieving data from the response object,
# be aware that response.content returns the content in bytes while response.text calls
# response.content and converts it to unicode.
data = response.content
# I do not check the headers to decide which content structure to server simply because sometimes
# Amazon's MWS API returns XML error responses with "text/plain" as the Content-Type.
try:
parsed_response = DictWrapper(data, extra_data.get("Action") + "Result")
except XMLError:
parsed_response = DataWrapper(data, response.headers)
except HTTPError as e:
error = MWSError(str(e.response.text))
error.response = e.response
raise error
# Store the response object in the parsed_response for quick access
parsed_response.response = response
return parsed_response
def get_service_status(self):
"""
Returns a GREEN, GREEN_I, YELLOW or RED status.
Depending on the status/availability of the API its being called from.
"""
return self.make_request(extra_data=dict(Action='GetServiceStatus'))
def calc_signature(self, method, request_description):
"""Calculate MWS signature to interface with Amazon
"""
sig_data = method + '\n' + self.domain.replace('https://', '').lower() + '\n' + self.uri + '\n' + request_description
return base64.b64encode(hmac.new(self.secret_key.encode('utf-8'), sig_data.encode('utf-8'), hashlib.sha256).digest())
def get_timestamp(self):
"""
Returns the current timestamp in proper format.
"""
return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
def enumerate_param(self, param, values):
"""
Builds a dictionary of an enumerated parameter.
Takes any iterable and returns a dictionary.
ie.
enumerate_param('MarketplaceIdList.Id', (123, 345, 4343))
returns
{
MarketplaceIdList.Id.1: 123,
MarketplaceIdList.Id.2: 345,
MarketplaceIdList.Id.3: 4343
}
"""
params = {}
if values is not None:
if not param.endswith('.'):
param = "%s." % param
for num, value in enumerate(values):
params['%s%d' % (param, (num + 1))] = value
return params
class Feeds(MWS):
""" Amazon MWS Feeds API """
ACCOUNT_TYPE = "Merchant"
def submit_feed(self, feed, feed_type, marketplaceids=None,
content_type="text/xml", purge='false'):
"""
Uploads a feed ( xml or .tsv ) to the seller's inventory.
Can be used for creating/updating products on Amazon.
"""
data = dict(Action='SubmitFeed',
FeedType=feed_type,
PurgeAndReplace=purge)
data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids))
md = calc_md5(feed)
return self.make_request(data, method="POST", body=feed,
extra_headers={'Content-MD5': md, 'Content-Type': content_type})
def get_feed_submission_list(self, feedids=None, max_count=None, feedtypes=None,
processingstatuses=None, fromdate=None, todate=None):
"""
Returns a list of all feed submissions submitted in the previous 90 days.
That match the query parameters.
"""
data = dict(Action='GetFeedSubmissionList',
MaxCount=max_count,
SubmittedFromDate=fromdate,
SubmittedToDate=todate,)
data.update(self.enumerate_param('FeedSubmissionIdList.Id', feedids))
data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes))
data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def get_submission_list_by_next_token(self, token):
data = dict(Action='GetFeedSubmissionListByNextToken', NextToken=token)
return self.make_request(data)
def get_feed_submission_count(self, feedtypes=None, processingstatuses=None, fromdate=None, todate=None):
data = dict(Action='GetFeedSubmissionCount',
SubmittedFromDate=fromdate,
SubmittedToDate=todate)
data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes))
data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def cancel_feed_submissions(self, feedids=None, feedtypes=None, fromdate=None, todate=None):
data = dict(Action='CancelFeedSubmissions',
SubmittedFromDate=fromdate,
SubmittedToDate=todate)
data.update(self.enumerate_param('FeedSubmissionIdList.Id.', feedids))
data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes))
return self.make_request(data)
def get_feed_submission_result(self, feedid):
data = dict(Action='GetFeedSubmissionResult', FeedSubmissionId=feedid)
return self.make_request(data)
class Reports(MWS):
""" Amazon MWS Reports API """
ACCOUNT_TYPE = "Merchant"
## REPORTS ###
def get_report(self, report_id):
data = dict(Action='GetReport', ReportId=report_id)
return self.make_request(data)
def get_report_count(self, report_types=(), acknowledged=None, fromdate=None, todate=None):
data = dict(Action='GetReportCount',
Acknowledged=acknowledged,
AvailableFromDate=fromdate,
AvailableToDate=todate)
data.update(self.enumerate_param('ReportTypeList.Type.', report_types))
return self.make_request(data)
def get_report_list(self, requestids=(), max_count=None, types=(), acknowledged=None,
fromdate=None, todate=None):
data = dict(Action='GetReportList',
Acknowledged=acknowledged,
AvailableFromDate=fromdate,
AvailableToDate=todate,
MaxCount=max_count)
data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids))
data.update(self.enumerate_param('ReportTypeList.Type.', types))
return self.make_request(data)
def get_report_list_by_next_token(self, token):
data = dict(Action='GetReportListByNextToken', NextToken=token)
return self.make_request(data)
def get_report_request_count(self, report_types=(), processingstatuses=(), fromdate=None, todate=None):
data = dict(Action='GetReportRequestCount',
RequestedFromDate=fromdate,
RequestedToDate=todate)
data.update(self.enumerate_param('ReportTypeList.Type.', report_types))
data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def get_report_request_list(self, requestids=(), types=(), processingstatuses=(),
max_count=None, fromdate=None, todate=None):
data = dict(Action='GetReportRequestList',
MaxCount=max_count,
RequestedFromDate=fromdate,
RequestedToDate=todate)
data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids))
data.update(self.enumerate_param('ReportTypeList.Type.', types))
data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses))
return self.make_request(data)
def get_report_request_list_by_next_token(self, token):
data = dict(Action='GetReportRequestListByNextToken', NextToken=token)
return self.make_request(data)
def request_report(self, report_type, start_date=None, end_date=None, marketplaceids=()):
data = dict(Action='RequestReport',
ReportType=report_type,
StartDate=start_date,
EndDate=end_date)
data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids))
return self.make_request(data)
### ReportSchedule ###
def get_report_schedule_list(self, types=()):
data = dict(Action='GetReportScheduleList')
data.update(self.enumerate_param('ReportTypeList.Type.', types))
return self.make_request(data)
def get_report_schedule_count(self, types=()):
data = dict(Action='GetReportScheduleCount')
data.update(self.enumerate_param('ReportTypeList.Type.', types))
return self.make_request(data)
class Orders(MWS):
""" Amazon Orders API """
URI = "/Orders/2011-01-01"
VERSION = "2011-01-01"
NS = '{https://mws.amazonservices.com/Orders/2011-01-01}'
def list_orders(self, marketplaceids, created_after=None, created_before=None, lastupdatedafter=None,
lastupdatedbefore=None, orderstatus=(), fulfillment_channels=(),
payment_methods=(), buyer_email=None, seller_orderid=None, max_results='100'):
data = dict(Action='ListOrders',
| |
normalisation factor
For this formula, we assume most proteins are multi-pass, and that approximately 30% of the
residues are TM residues. Therefore a rand_30TM_70nonTM can be calculated, that roughly
gives the random identity for the full protein.
rand_30TM_70nonTM = 0.3 * rand_perc_ident_TM + 0.7 * rand_perc_ident_nonTM
Parameters
----------
observed_perc_ident_full_seq: float
the observed average identity of TM region in your MSA which needs to be normalised
rand_perc_ident_TM: float
random identity in TM region, calculated based on your dataset using radomisation method (calc_random_aa_ident)
rand_perc_ident_nonTM: float
random identity in non-TM region, calculated based on your dataset using radomisation method (calc_random_aa_ident)
proportion_seq_TM_residues : float
proportion of the sequence length that is the TM region
To roughly calculate the observed percentage identity of the TM region from the full percentage
identity, it is necessary to estimate the percentage length of the TM region.
For the single-pass human dataset this is 0.0681 (6.8% TM region)
For the multi-pass human dataset this is 0.330 (34% TM region)
For the non-redundant beta-barrel dataset this is 0.348 (35% TM region)
Returns
-------
MSA_aa_ident_norm_factor: float
normalisation factor which will be applied to your observed TM identity
Example:
observed_perc_ident_TM = 0.78, rand_perc_ident_TM = 0.126, rand_perc_ident_nonTM = 0.059
calculated real_perc_identity = 0.748
calculated observed_perc_ident_nonTM = 0.763
calculated n_factor = 0.78/0.763 = 1.022
"""
# calculate proportion of length of full sequence that is nonTM
proportion_seq_nonTM_residues = 1 - proportion_seq_TM_residues
# random percentage identity of the full protein, assuming 30% TM region and 70% nonTM region
rand_perc_ident_full_protein = proportion_seq_TM_residues * rand_perc_ident_TM + proportion_seq_nonTM_residues * rand_perc_ident_nonTM
# calculation of real conservation rate based on the random identity in TM region
# solved for R from observed_perc_ident_full_seq = real_perc_identity + (1-real_perc_identity)*rand_perc_ident_full_protein
# as usual, we assume that the unobserved conservation is a proportion of the observed_changes (i.e. (1-real_perc_identity))
# and that this proportion is exactly the rand_perc_ident_full_protein * real_changes
real_perc_identity = (observed_perc_ident_full_seq - rand_perc_ident_full_protein)/(1 - rand_perc_ident_full_protein)
# from the estimated real_perc_identity of the full protein, calculate the observed percentage identity for the TM region
observed_perc_ident_TM = (1 - real_perc_identity)*rand_perc_ident_TM + real_perc_identity
# from the estimated real_perc_identity of the full protein, calculate the observed percentage identity for the nonTM region
observed_perc_ident_nonTM = (1 - real_perc_identity)*rand_perc_ident_nonTM + real_perc_identity
#calculation of normalisation factor
# for randomised sequences, the aa propensity is the ONLY factor giving an effect
# therefore the ratio of the observed identities gives the normalisation factor
MSA_aa_ident_norm_factor = observed_perc_ident_TM/observed_perc_ident_nonTM
#sys.stdout.write('\nnormalisation factor: %.3f' %MSA_TM_nonTM_aa_ident_norm_factor)
return MSA_aa_ident_norm_factor
def create_matrix_artificial_homologues(aa_prop_ser, seq_len, number_seq, number_mutations):
""" Create a matrix (array) of artificially generated homologue sequences.
1) creates an original random sequence, based on a given aa propensity
2) creates an array of "homologues"
3) in each "homologue", replaces some original aa with a randomly chosen AA, based on the AA propensity
Parameters
----------
aa_prop_ser : pd.Series
Amino acid propensity series
index = A, C, D etc
values = 0.05, 0.06, 0.14, etc
seq_len : int
Length of generated sequences.
number_seq : int
Numbef of sequences to be generated
number_mutations : int
number of mutations to introduce into the "homologues"
Returns
-------
orig_seq : str
original randomly generated sequence
matrix : list
list of strings of artificial homologues
"""
# create the original template sequence
orig_seq = "".join(np.random.choice(aa_prop_ser.index, p=aa_prop_ser) for _ in range(int(seq_len)))
matrix = []
for n in range(number_seq):
# create indices for each AA in orig sequence
inds = list(range(seq_len))
# choose a random sample of AA to mutation
sam = random.sample(inds, number_mutations)
# convert orig sequence to a list
seq_list = list(orig_seq)
# for each index in the random sample, replace the AA with a random AA
for ind in sam:
seq_list[ind] = np.random.choice(aa_prop_ser.index, p=aa_prop_ser)
# # join to make a new sequence
# new_seq = "".join(seq_list)
# # append to the matrix of "homologues"
# matrix.append(new_seq)
matrix.append(seq_list)
# convert to a numpy array
matrix = np.array(matrix)
return orig_seq, matrix
def count_aa_freq(seq):
# function to calculate aa propensity for each residue
# input should be a string
# output is a pd.DataFrame
aa_dict = {}
for aa in seq:
aa_dict[aa] = 0
for aa in seq:
aa_dict[aa] += 1
prop_dict = {}
for aa in aa_dict:
# sys.stdout.write(aa)
prop_dict[aa] = aa_dict['%s' % aa] / len(seq)
df = pd.Series(prop_dict)
df = pd.DataFrame(df, columns=['freq'])
df = df.transpose()
return df
def OLD_calc_MSA_ident_n_factor(observed_perc_ident_TM, rand_perc_ident_TM, rand_perc_ident_nonTM):
"""Calculation of the MSA identity normalisation factor
To roughly calculate the observed percentage identity of the TM region from the full percentage
identity, it is necessary to estimate the percentage length of the TM region.
For the single-pass human dataset this is 0.0681 (6.8% TM region)
For the multi-pass human dataset this is 0.330 (34% TM region)
For the non-redundant beta-barrel dataset this is 0.348 (35% TM region)
For this formula, we assume most proteins are multi-pass, and that approximately 30% of the
residues are TM residues. Therefore a rand_30TM_70nonTM can be calculated, that roughly
gives the random identity for the full protein.
rand_30TM_70nonTM = 0.3 * rand_perc_ident_TM + 0.7 * rand_perc_ident_nonTM
Parameters
----------
observed_perc_ident_full_seq: float
the observed average identity of TM region in your MSA which needs to be normalised
rand_perc_ident_TM: float
random identity in TM region, calculated based on your dataset using radomisation method (calc_random_aa_ident)
rand_perc_ident_nonTM: float
random identity in non-TM region, calculated based on your dataset using radomisation method (calc_random_aa_ident)
Returns
-------
n_factor: float
normalisation factor which will be applied to your observed TM identity
TM_ident_n: float
normalised TM identity for MSA
Example:
observed_perc_ident_TM = 0,78, rand_perc_ident_TM = 0.126, rand_perc_ident_nonTM = 0.059
calculated real_perc_identity = 0.748
calculated observed_perc_ident_nonTM = 0.763
calculated n_factor = 0.78/0.763 = 1.022
"""
# calculation of real conservation rate based on the random identity in TM region
# solved for R from observed_perc_ident_full_seq = real_perc_identity + (1-real_perc_identity)*rand_perc_ident_full_protein
# as usual, we assume that the unobserved conservation is a proportion of the observed_changes (i.e. (1-real_perc_identity))
# and that this proportion is exactly the rand_perc_ident_full_protein * real_changes
real_perc_identity_TM = (observed_perc_ident_TM - rand_perc_ident_TM)/(1 - rand_perc_ident_TM)
# # from the estimated real_perc_identity of the full protein, calculate the observed percentage identity for the TM region
# observed_perc_ident_TM = (1 - real_perc_identity_TM)*rand_perc_ident_TM + real_perc_identity_TM
# from the estimated real_perc_identity of the full protein, calculate the observed percentage identity for the nonTM region
observed_perc_ident_nonTM = (1 - real_perc_identity_TM)*rand_perc_ident_nonTM + real_perc_identity_TM
#calculation of normalisation factor
# for randomised sequences, the aa propensity is the ONLY factor giving an effect
# therefore the ratio of the observed identities gives the normalisation factor
MSA_TM_nonTM_aa_ident_norm_factor = observed_perc_ident_TM/observed_perc_ident_nonTM
#sys.stdout.write('\nnormalisation factor: %.3f' %MSA_TM_nonTM_aa_ident_norm_factor)
return MSA_TM_nonTM_aa_ident_norm_factor
############################################################################################
# #
# Using argparse to enable usage from #
# command line #
# #
############################################################################################
# create a parser object to read user inputs from the command line
parser = argparse.ArgumentParser()
# add command-line options
parser.add_argument("-f", "--function",
required=True,
help=r"Function to be run. Choices are calc_aa_prop, calc_rand_aa_ident, or calc_unobserved_AA_ident")
parser.add_argument("-i", "--input",
default=None,
help=r'Full path of input file.'
r'E.g. "C:\Path\to\your\file.xlsx"')
parser.add_argument("-o", "--output",
default=None,
help=r'Full path of output file.'
r'E.g. "C:\Path\to\your\file.xlsx"')
parser.add_argument("-c", "--column_name",
default=None,
help='Column name in input file that should be used for analysis.')
# INPUT for random identity via the randomisation method is not really necessary
# parser.add_argument("-l", "--length",
# default=1000,
# help='Sequence length for calc_rand_aa_ident.')
# parser.add_argument("-n", "--number_seq",
# default=1000,
# help='Number of sequences for calc_rand_aa_ident.')
# parser.add_argument("-d", "--ident_in_matrix",
# default=0.7,
# help='Amino acid identity in mutation matrix for calc_rand_aa_ident.')
parser.add_argument("-x", "--full_length_identity",
default=None,
help='Average amino acid identity of full sequences in alignment for calc_MSA_n_factor.')
parser.add_argument("-a", "--rand_aa_ident_1",
default=None,
help='Random aa identity for region 1 (e.g. transmembrane).')
parser.add_argument("-b", "--rand_aa_ident_2",
default=None,
help='Random aa identity for region 2 (e.g. non-transmembrane).')
parser.add_argument("-af", "--fraction_of_region1_in_full_protein",
default=0.3,
help="""Average fraction of sequence that is from region 1 (e.g. fract of residues that are
transmembrane residues) for use in calc_MSA_n_factor.""")
parser.add_argument("-r", "--region",
default=1,
help="""Either 1 or 2. Region of position of interest in sequence.""")
# if MSA_normalisation.py is run as the main python script, obtain the options from the command line.
if __name__ == '__main__':
#sys.stdout.write("\nFor help, run \npython MSA_normalisation.py -h\n")
# obtain command-line arguments
args = parser.parse_args()
if "h" in args:
parser.print_help()
sys.stdout.write("")
if args.function == | |
"""
IMC2 client module. Handles connecting to and communicating with an IMC2 server.
"""
from time import time
from twisted.application import internet
from twisted.internet import protocol
from twisted.conch import telnet
from django.conf import settings
from src.utils import logger, create, search, utils
from src.server.sessionhandler import SESSIONS
from src.scripts.scripts import Script
from src.comms.models import Channel, ExternalChannelConnection
from src.comms.imc2lib import imc2_packets as pck
from src.comms.imc2lib.imc2_trackers import IMC2MudList, IMC2ChanList
from src.comms.imc2lib.imc2_listeners import handle_whois_reply
from django.utils.translation import ugettext as _
# IMC2 network setup
IMC2_MUDNAME = settings.SERVERNAME
IMC2_NETWORK = settings.IMC2_NETWORK
IMC2_PORT = settings.IMC2_PORT
IMC2_CLIENT_PWD = settings.IMC2_CLIENT_PWD
IMC2_SERVER_PWD = settings.IMC2_SERVER_PWD
# channel to send info to
INFOCHANNEL = Channel.objects.channel_search(settings.CHANNEL_MUDINFO[0])
# all linked channel connections
IMC2_CLIENT = None
# IMC2 debug mode
IMC2_DEBUG = False
# Use this instance to keep track of the other games on the network.
IMC2_MUDLIST = IMC2MudList()
# Tracks the list of available channels on the network.
IMC2_CHANLIST = IMC2ChanList()
#
# Helper method
#
def msg_info(message):
"""
Send info to default info channel
"""
try:
INFOCHANNEL[0].msg(message)
message = '[%s][IMC2]: %s' % (INFOCHANNEL[0].key, message)
except Exception:
logger.log_infomsg("MUDinfo (imc2): %s" % message)
#
# Regular scripts
#
class Send_IsAlive(Script):
"""
Sends periodic keepalives to network neighbors. This lets the other
games know that our game is still up and connected to the network. Also
provides some useful information about the client game.
"""
def at_script_creation(self):
self.key = 'IMC2_Send_IsAlive'
self.interval = 900
self.desc = _("Send an IMC2 is-alive packet")
self.persistent = True
def at_repeat(self):
IMC2_CLIENT.send_packet(pck.IMC2PacketIsAlive())
def is_valid(self):
"Is only valid as long as there are channels to update"
return any(service for service in SESSIONS.server.services if service.name.startswith("imc2_"))
class Send_Keepalive_Request(Script):
"""
Event: Sends a keepalive-request to connected games in order to see who
is connected.
"""
def at_script_creation(self):
self.key = "IMC2_Send_Keepalive_Request"
self.interval = 3500
self.desc = _("Send an IMC2 keepalive-request packet")
self.persistent = True
def at_repeat(self):
IMC2_CLIENT.channel.send_packet(pck.IMC2PacketKeepAliveRequest())
def is_valid(self):
"Is only valid as long as there are channels to update"
return any(service for service in SESSIONS.server.services if service.name.startswith("imc2_"))
class Prune_Inactive_Muds(Script):
"""
Prunes games that have not sent is-alive packets for a while. If
we haven't heard from them, they're probably not connected or don't
implement the protocol correctly. In either case, good riddance to them.
"""
def at_script_creation(self):
self.key = "IMC2_Prune_Inactive_Muds"
self.interval = 1800
self.desc = _("Check IMC2 list for inactive games")
self.persistent = True
self.inactive_threshold = 3599
def at_repeat(self):
for name, mudinfo in IMC2_MUDLIST.mud_list.items():
if time() - mudinfo.last_updated > self.inactive_threshold:
del IMC2_MUDLIST.mud_list[name]
def is_valid(self):
"Is only valid as long as there are channels to update"
return any(service for service in SESSIONS.server.services if service.name.startswith("imc2_"))
class Sync_Server_Channel_List(Script):
"""
Re-syncs the network's channel list. This will
cause a cascade of reply packets of a certain type
from the network. These are handled by the protocol,
gradually updating the channel cache.
"""
def at_script_creation(self):
self.key = "IMC2_Sync_Server_Channel_List"
self.interval = 24 * 3600 # once every day
self.desc = _("Re-sync IMC2 network channel list")
self.persistent = True
def at_repeat(self):
checked_networks = []
network = IMC2_CLIENT.factory.network
if not network in checked_networks:
channel.send_packet(pkg.IMC2PacketIceRefresh())
checked_networks.append(network)
def is_valid(self):
return any(service for service in SESSIONS.server.services if service.name.startswith("imc2_"))
#
# IMC2 protocol
#
class IMC2Protocol(telnet.StatefulTelnetProtocol):
"""
Provides the abstraction for the IMC2 protocol. Handles connection,
authentication, and all necessary packets.
"""
def __init__(self):
global IMC2_CLIENT
IMC2_CLIENT = self
self.is_authenticated = False
self.auth_type = None
self.server_name = None
self.network_name = None
self.sequence = None
def connectionMade(self):
"""
Triggered after connecting to the IMC2 network.
"""
self.auth_type = "plaintext"
if IMC2_DEBUG:
logger.log_infomsg("IMC2: Connected to network server.")
logger.log_infomsg("IMC2: Sending authentication packet.")
self.send_packet(pck.IMC2PacketAuthPlaintext())
def connectionLost(self, reason=None):
"""
This is executed when the connection is lost for
whatever reason.
"""
try:
service = SESSIONS.server.services.getServiceNamed("imc2_%s:%s(%s)" % (IMC2_NETWORK, IMC2_PORT, IMC2_MUDNAME))
except Exception:
return
if service.running:
service.stopService()
def send_packet(self, packet):
"""
Given a sub-class of IMC2Packet, assemble the packet and send it
on its way to the IMC2 server.
Evennia -> IMC2
"""
if self.sequence:
# This gets incremented with every command.
self.sequence += 1
packet.imc2_protocol = self
packet_str = utils.to_str(packet.assemble(self.factory.mudname, self.factory.client_pwd, self.factory.server_pwd))
if IMC2_DEBUG and not (hasattr(packet, 'packet_type') and packet.packet_type == "is-alive"):
logger.log_infomsg("IMC2: SENT> %s" % packet_str)
logger.log_infomsg(str(packet))
self.sendLine(packet_str)
def _parse_auth_response(self, line):
"""
Parses the IMC2 network authentication packet.
"""
if self.auth_type == "plaintext":
# Plain text passwords.
# SERVER Sends: PW <servername> <serverpw> version=<version#> <networkname>
if IMC2_DEBUG:
logger.log_infomsg("IMC2: AUTH< %s" % line)
line_split = line.split(' ')
pw_present = line_split[0] == 'PW'
autosetup_present = line_split[0] == 'autosetup'
if "reject" in line_split:
auth_message = _("IMC2 server rejected connection.")
logger.log_infomsg(auth_message)
msg_info(auth_message)
return
if pw_present:
self.server_name = line_split[1]
self.network_name = line_split[4]
elif autosetup_present:
logger.log_infomsg(_("IMC2: Autosetup response found."))
self.server_name = line_split[1]
self.network_name = line_split[3]
self.is_authenticated = True
self.sequence = int(time())
# Log to stdout and notify over MUDInfo.
auth_message = _("Successfully authenticated to the '%s' network.") % self.factory.network
logger.log_infomsg('IMC2: %s' % auth_message)
msg_info(auth_message)
# Ask to see what other MUDs are connected.
self.send_packet(pck.IMC2PacketKeepAliveRequest())
# IMC2 protocol states that KeepAliveRequests should be followed
# up by the requester sending an IsAlive packet.
self.send_packet(pck.IMC2PacketIsAlive())
# Get a listing of channels.
self.send_packet(pck.IMC2PacketIceRefresh())
def _msg_evennia(self, packet):
"""
Handle the sending of packet data to Evennia channel
(Message from IMC2 -> Evennia)
"""
conn_name = packet.optional_data.get('channel', None)
# If the packet lacks the 'echo' key, don't bother with it.
if not conn_name or not packet.optional_data.get('echo', None):
return
imc2_channel = conn_name.split(':', 1)[1]
# Look for matching IMC2 channel maps mapping to this imc2 channel.
conns = ExternalChannelConnection.objects.filter(db_external_key__startswith="imc2_")
conns = [conn for conn in conns if imc2_channel in conn.db_external_config.split(",")]
if not conns:
# we are not listening to this imc2 channel.
return
# Format the message to send to local channel(s).
for conn in conns:
message = '[%s] %s@%s: %s' % (conn.channel.key, packet.sender, packet.origin, packet.optional_data.get('text'))
conn.to_channel(message)
def _format_tell(self, packet):
"""
Handle tells over IMC2 by formatting the text properly
"""
return _("{c%(sender)s@%(origin)s{n {wpages (over IMC):{n %(msg)s") % {"sender":packet.sender,
"origin":packet.origin,
"msg":packet.optional_data.get('text', 'ERROR: No text provided.')}
def lineReceived(self, line):
"""
Triggered when text is received from the IMC2 network. Figures out
what to do with the packet.
IMC2 -> Evennia
"""
line = line.strip()
if not self.is_authenticated:
self._parse_auth_response(line)
else:
if IMC2_DEBUG and not 'is-alive' in line:
# if IMC2_DEBUG mode is on, print the contents of the packet
# to stdout.
logger.log_infomsg("IMC2: RECV> %s" % line)
# Parse the packet and encapsulate it for easy access
packet = pck.IMC2Packet(self.factory.mudname, packet_str=line)
if IMC2_DEBUG and packet.packet_type not in ('is-alive', 'keepalive-request'):
# Print the parsed packet's __str__ representation.
# is-alive and keepalive-requests happen pretty frequently.
# Don't bore us with them in stdout.
logger.log_infomsg(str(packet))
# Figure out what kind of packet we're dealing with and hand it
# off to the correct handler.
if packet.packet_type == 'is-alive':
IMC2_MUDLIST.update_mud_from_packet(packet)
elif packet.packet_type == 'keepalive-request':
# Don't need to check the destination, we only receive these
# packets when they are intended for us.
self.send_packet(pck.IMC2PacketIsAlive())
elif packet.packet_type == 'ice-msg-b':
self._msg_evennia(packet)
elif packet.packet_type == 'whois-reply':
handle_whois_reply(packet)
elif packet.packet_type == 'close-notify':
IMC2_MUDLIST.remove_mud_from_packet(packet)
elif packet.packet_type == 'ice-update':
IMC2_CHANLIST.update_channel_from_packet(packet)
elif packet.packet_type == 'ice-destroy':
IMC2_CHANLIST.remove_channel_from_packet(packet)
elif packet.packet_type == 'tell':
player = search.players(packet.target)
if not player:
return
player[0].msg(self._format_tell(packet))
def msg_imc2(self, message, from_obj=None, packet_type="imcbroadcast", data=None):
"""
Called by Evennia to send a message through the imc2 connection
"""
if from_obj:
if hasattr(from_obj, 'key'):
from_name = from_obj.key
else:
from_name = from_obj
else:
from_name = self.factory.mudname
if packet_type == "imcbroadcast":
if type(data) == dict:
conns = ExternalChannelConnection.objects.filter(db_external_key__startswith="imc2_",
db_channel=data.get("channel", "Unknown"))
if not conns:
return
# we remove the extra channel info since imc2 supplies this anyway
if ":" in message:
header, message = [part.strip() for part in message.split(":", 1)]
# send the packet
imc2_channel = conns[0].db_external_config.split(',')[0] # only send to the first channel
self.send_packet(pck.IMC2PacketIceMsgBroadcasted(self.factory.servername, imc2_channel,
from_name, message))
elif packet_type == "imctell":
# send a tell
if type(data) == dict:
target = data.get("target", "Unknown")
destination = data.get("destination", "Unknown")
self.send_packet(pck.IMC2PacketTell(from_name, target, destination, message))
elif packet_type == "imcwhois":
# send a whois request
if type(data) == dict:
target = data.get("target", "Unknown")
self.send_packet(pck.IMC2PacketWhois(from_obj.id, target))
class IMC2Factory(protocol.ClientFactory):
"""
Creates instances of the IMC2Protocol. Should really only ever
need to create one connection. Tied in via src/server.py.
"""
protocol = IMC2Protocol
def __init__(self, network, port, mudname, client_pwd, server_pwd):
self.pretty_key = "%s:%s(%s)" % (network, port, mudname)
self.network = network
sname, host = network.split(".", 1)
self.servername = | |
injected a fault in the plugin
self._validate_behavior_on_bulk_failure(res, 'networks')
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
orig = QuantumManager.get_plugin().create_network
with mock.patch.object(QuantumManager.get_plugin(),
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk('json', 2, 'test', True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(res, 'networks')
def test_list_networks(self):
with self.network(name='net1') as net1:
with self.network(name='net2') as net2:
req = self.new_list_request('networks')
res = self.deserialize('json', req.get_response(self.api))
self.assertEquals(res['networks'][0]['name'],
net1['network']['name'])
self.assertEquals(res['networks'][1]['name'],
net2['network']['name'])
def test_list_networks_with_parameters(self):
with self.network(name='net1', admin_status_up=False) as net1:
with self.network(name='net2') as net2:
req = self.new_list_request('networks',
params='admin_state_up=False')
res = self.deserialize('json', req.get_response(self.api))
self.assertEquals(1, len(res['networks']))
self.assertEquals(res['networks'][0]['name'],
net1['network']['name'])
req = self.new_list_request('networks',
params='admin_state_up=true')
res = self.deserialize('json', req.get_response(self.api))
self.assertEquals(1, len(res['networks']))
self.assertEquals(res['networks'][0]['name'],
net2['network']['name'])
def test_list_networks_with_parameters_invalid_values(self):
with self.network(name='net1', admin_status_up=False) as net1:
with self.network(name='net2') as net2:
req = self.new_list_request('networks',
params='admin_state_up=fake')
res = req.get_response(self.api)
self.assertEquals(422, res.status_int)
def test_show_network(self):
with self.network(name='net1') as net:
req = self.new_show_request('networks', net['network']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEquals(res['network']['name'],
net['network']['name'])
def test_invalid_admin_status(self):
fmt = 'json'
value = [[7, False, 400], [True, True, 201], ["True", True, 201],
["true", True, 201], [1, True, 201], ["False", False, 201],
[False, False, 201], ["false", False, 201],
["7", False, 400]]
for v in value:
data = {'network': {'name': 'net',
'admin_state_up': v[0],
'tenant_id': self._tenant_id}}
network_req = self.new_create_request('networks', data)
req = network_req.get_response(self.api)
self.assertEquals(req.status_int, v[2])
if v[2] == 201:
res = self.deserialize(fmt, req)
self.assertEquals(res['network']['admin_state_up'], v[1])
class TestSubnetsV2(QuantumDbPluginV2TestCase):
def _test_create_subnet(self, network=None, **kwargs):
keys = kwargs.copy()
keys.setdefault('cidr', '10.0.0.0/24')
keys.setdefault('ip_version', 4)
keys.setdefault('enable_dhcp', True)
with self.subnet(network=network, **keys) as subnet:
# verify the response has each key with the correct value
for k in keys:
self.assertIn(k, subnet['subnet'])
self.assertEquals(subnet['subnet'][k], keys[k])
return subnet
def test_create_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
subnet = self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr)
self.assertTrue('name' in subnet['subnet'])
def test_create_two_subnets(self):
gateway_ips = ['10.0.0.1', '10.0.1.1']
cidrs = ['10.0.0.0/24', '10.0.1.0/24']
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ips[0],
cidr=cidrs[0]):
with self.subnet(network=network,
gateway_ip=gateway_ips[1],
cidr=cidrs[1]):
net_req = self.new_show_request('networks',
network['network']['id'])
raw_res = net_req.get_response(self.api)
net_res = self.deserialize('json', raw_res)
for subnet_id in net_res['network']['subnets']:
sub_req = self.new_show_request('subnets', subnet_id)
raw_res = sub_req.get_response(self.api)
sub_res = self.deserialize('json', raw_res)
self.assertIn(sub_res['subnet']['cidr'], cidrs)
self.assertIn(sub_res['subnet']['gateway_ip'],
gateway_ips)
def test_create_two_subnets_same_cidr_returns_400(self):
gateway_ip_1 = '10.0.0.1'
cidr_1 = '10.0.0.0/24'
gateway_ip_2 = '10.0.0.10'
cidr_2 = '10.0.0.0/24'
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ip_1,
cidr=cidr_1):
with self.assertRaises(
webob.exc.HTTPClientError) as ctx_manager:
with self.subnet(network=network,
gateway_ip=gateway_ip_2,
cidr=cidr_2):
pass
self.assertEquals(ctx_manager.exception.code, 400)
def test_create_subnets_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
with self.network() as net:
res = self._create_subnet_bulk('json', 2, net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.network() as net:
res = self._create_subnet_bulk('json', 2,
net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
orig = QuantumManager.get_plugin().create_subnet
with mock.patch.object(QuantumManager.get_plugin(),
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk('json', 2,
net['network']['id'],
'test')
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(res, 'subnets')
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
orig = QuantumManager._instance.plugin.create_subnet
with mock.patch.object(QuantumManager._instance.plugin,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk('json', 2,
net['network']['id'],
'test')
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(res, 'subnets')
def test_delete_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
fmt = 'json'
# Create new network
res = self._create_network(fmt=fmt, name='net',
admin_status_up=True)
network = self.deserialize(fmt, res)
subnet = self._make_subnet(fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_delete_network(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
fmt = 'json'
# Create new network
res = self._create_network(fmt=fmt, name='net',
admin_status_up=True)
network = self.deserialize(fmt, res)
subnet = self._make_subnet(fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('networks', network['network']['id'])
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_create_subnet_bad_tenant(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': 'bad_tenant_id',
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_create_subnet_defaults(self):
gateway = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
enable_dhcp = True
subnet = self._test_create_subnet()
# verify cidr & gw have been correctly generated
self.assertEquals(subnet['subnet']['cidr'], cidr)
self.assertEquals(subnet['subnet']['gateway_ip'], gateway)
self.assertEquals(subnet['subnet']['enable_dhcp'], enable_dhcp)
self.assertEquals(subnet['subnet']['allocation_pools'],
allocation_pools)
def test_create_subnet_with_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_none_gateway(self):
cidr = '10.0.0.0/24'
self._test_create_subnet(gateway_ip=None,
cidr=cidr)
def test_create_subnet_with_none_gateway_fully_allocated(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.254'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_none_gateway_allocation_pool(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_v6_allocation_pool(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::0/80'
allocation_pools = [{'start': 'fe80::2',
'end': 'fe80::ffff:fffa:ffff'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_large_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/8'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.1.0.0',
'end': '10.200.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_multiple_allocation_pools(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.0.0.110',
'end': '10.0.0.150'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_dhcp_disabled(self):
enable_dhcp = False
self._test_create_subnet(enable_dhcp=enable_dhcp)
def test_create_subnet_gateway_in_allocation_pool_returns_409(self):
gateway_ip = '10.0.0.50'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.100'}]
with self.assertRaises(webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEquals(ctx_manager.exception.code, 409)
def test_create_subnet_overlapping_allocation_pools_returns_409(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.150'},
{'start': '10.0.0.140',
'end': '10.0.0.180'}]
with self.assertRaises(webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEquals(ctx_manager.exception.code, 409)
def test_create_subnet_invalid_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.256'}]
with self.assertRaises(webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEquals(ctx_manager.exception.code, 400)
def test_create_subnet_out_of_range_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.1.6'}]
with self.assertRaises(webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEquals(ctx_manager.exception.code, 400)
def test_update_subnet(self):
with self.subnet() as subnet:
data = {'subnet': {'gateway_ip': '192.168.3.11'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'],
data['subnet']['gateway_ip'])
def test_show_subnet(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
req = self.new_show_request('subnets',
subnet['subnet']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEquals(res['subnet']['id'],
subnet['subnet']['id'])
self.assertEquals(res['subnet']['network_id'],
network['network']['id'])
def test_list_subnets(self):
# NOTE(jkoelker) This would be a good place to use contextlib.nested
# or just drop 2.6 support ;)
with self.network() as network:
with self.subnet(network=network, gateway_ip='10.0.0.1',
cidr='10.0.0.0/24') as subnet:
with self.subnet(network=network, gateway_ip='10.0.1.1',
cidr='10.0.1.0/24') as subnet2:
req = self.new_list_request('subnets')
res = self.deserialize('json',
req.get_response(self.api))
res1 = res['subnets'][0]
res2 = res['subnets'][1]
self.assertEquals(res1['cidr'],
subnet['subnet']['cidr'])
self.assertEquals(res2['cidr'],
subnet2['subnet']['cidr'])
def test_list_subnets_with_parameter(self):
# NOTE(jkoelker) This would be a good place to use contextlib.nested
# or just drop 2.6 support ;)
with self.network() as network:
with self.subnet(network=network, gateway_ip='10.0.0.1',
cidr='10.0.0.0/24') as subnet:
with self.subnet(network=network, gateway_ip='10.0.1.1',
cidr='10.0.1.0/24') as subnet2:
req = self.new_list_request(
'subnets',
params='ip_version=4&ip_version=6')
res = self.deserialize('json',
req.get_response(self.api))
self.assertEquals(2, len(res['subnets']))
req = self.new_list_request('subnets',
params='ip_version=6')
res = self.deserialize('json',
req.get_response(self.api))
self.assertEquals(0, len(res['subnets']))
def test_invalid_ip_version(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 7,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEquals(res.status_int, 422)
def test_invalid_subnet(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'invalid',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEquals(res.status_int, 422)
def test_invalid_ip_address(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'ipaddress'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEquals(res.status_int, 422)
def test_invalid_uuid(self):
with self.network() as network:
data = {'subnet': {'network_id': 'invalid-uuid',
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEquals(res.status_int, 422)
def test_create_subnet_with_one_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['172.16.58.3']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_two_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['172.16.58.3', '172.16.58.3']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_too_many_dns(self):
with self.network() as network:
dns_list = ['1.1.1.1', '172.16.17.32', '172.16.58.3']
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1',
'dns_nameservers': dns_list}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_create_subnet_with_one_host_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
host_routes = [{'destination': '192.168.3.11/16',
'nexthop': '172.16.58.3'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
host_routes=host_routes)
def test_create_subnet_with_two_host_routes(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
| |
[
ContentType.objects.get_for_model(m, for_concrete_model=False)
for m in deps
]
Comment.objects.filter(content_type__in=content_ids).delete()
# Prepare message
for m in deps:
messages.add_message(
request,
messages.INFO,
_("Erasing data from %(model)s")
% {"model": force_str(m._meta.verbose_name)},
)
# Finished successfully
return None
@classmethod
def parseCSVupload(cls, request):
"""
This method reads CSV data from a string (in memory) and creates or updates
the database records.
The data must follow the following format:
- the first row contains a header, listing all field names
- a first character # marks a comment line
- empty rows are skipped
"""
# Check permissions
if not cls.model:
yield "<div>%s</div>" % _("Invalid upload request")
return
permname = get_permission_codename("add", cls.model._meta)
if not cls.editable or not request.user.has_perm(
"%s.%s" % (cls.model._meta.app_label, permname)
):
yield "<div>%s</div>" % _("Permission denied")
return
# Choose the right delimiter and language
delimiter = (
get_format("DECIMAL_SEPARATOR", request.LANGUAGE_CODE, True) == ","
and ";"
or ","
)
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Handle the complete upload as a single database transaction
try:
with transaction.atomic(using=request.database):
# Erase all records and related tables
if "erase" in request.POST:
returnvalue = cls.erase(request)
if returnvalue:
yield format_lazy("<div>{}</div>", returnvalue)
return
yield (
'<div class="table-responsive">'
'<table class="table table-condensed" style="white-space: nowrap"><tbody>'
)
for filename, file in request.FILES.items():
numerrors = 0
numwarnings = 0
firsterror = True
yield '<tr style="text-align: center"><th colspan="5">%s</th></tr>' % filename
data = EncodedCSVReader(file, delimiter=delimiter)
for error in parseCSVdata(
cls.model,
data,
user=request.user,
database=request.database,
ping=True,
):
if error[0] == logging.DEBUG:
# Yield some result so we can detect disconnect clients and interrupt the upload
yield " "
continue
if firsterror and error[0] in (logging.ERROR, logging.WARNING):
yield '<tr><th class="sr-only">%s</th><th>%s</th><th>%s</th><th>%s</th><th>%s%s%s</th></tr>' % (
capfirst(_("worksheet")),
capfirst(_("row")),
capfirst(_("field")),
capfirst(_("value")),
capfirst(_("error")),
" / ",
capfirst(_("warning")),
)
firsterror = False
if error[0] == logging.ERROR:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
capfirst(_("error")),
error[4],
)
numerrors += 1
elif error[1] == logging.WARNING:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
capfirst(_("warning")),
error[4],
)
numwarnings += 1
else:
yield '<tr class=%s><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % (
"danger" if numerrors > 0 else "success",
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
error[4],
)
yield "</tbody></table></div>"
# Records are committed. Launch notification generator now.
NotificationFactory.launchWorker(
database=request.database,
url="%s://%s"
% ("https" if request.is_secure() else "http", request.get_host())
if request
else None,
)
except GeneratorExit:
logging.warning("Connection Aborted")
except NameError:
pass
@classmethod
def parseSpreadsheetUpload(cls, request):
"""
This method reads a spreadsheet file (in memory) and creates or updates
the database records.
The data must follow the following format:
- only the first tab in the spreadsheet is read
- the first row contains a header, listing all field names
- a first character # marks a comment line
- empty rows are skipped
"""
# Check permissions
if not cls.model:
yield "<div>%s</div>" % _("Invalid upload request")
return
permname = get_permission_codename("add", cls.model._meta)
if not cls.editable or not request.user.has_perm(
"%s.%s" % (cls.model._meta.app_label, permname)
):
yield "<div>%s</div>" % _("Permission denied")
return
# Choose the right language
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Handle the complete upload as a single database transaction
try:
with transaction.atomic(using=request.database):
# Erase all records and related tables
if "erase" in request.POST:
returnvalue = cls.erase(request)
if returnvalue:
yield '<br><samp style="padding-left: 15px;">%s</samp><br>' % returnvalue
raise StopIteration
# Header in output
yield (
'<div class="table-responsive">'
'<table class="table table-condensed" style="white-space: nowrap"><tbody>'
)
for filename, file in request.FILES.items():
numerrors = 0
numwarnings = 0
firsterror = True
yield '<tr style="text-align: center"><th colspan="5">%s<div class="recordcount pull-right"></div></th></tr>' % filename
# Loop through the data records
wb = load_workbook(filename=file, read_only=True, data_only=True)
numsheets = len(wb.sheetnames)
for ws_name in wb.sheetnames:
rowprefix = "" if numsheets == 1 else "%s " % ws_name
ws = wb[ws_name]
for error in parseExcelWorksheet(
cls.model,
ws,
user=request.user,
database=request.database,
ping=True,
):
if error[0] == logging.DEBUG:
# Yield some result so we can detect disconnect clients and interrupt the upload
yield "<tr class='hidden' data-cnt='%s'>" % error[1]
continue
if firsterror and error[0] in (
logging.ERROR,
logging.WARNING,
):
yield '<tr><th class="sr-only">%s</th><th>%s</th><th>%s</th><th>%s</th><th>%s%s%s</th></tr>' % (
capfirst(_("worksheet")),
capfirst(_("row")),
capfirst(_("field")),
capfirst(_("value")),
capfirst(_("error")),
" / ",
capfirst(_("warning")),
)
firsterror = False
if error[0] == logging.ERROR:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
capfirst(_("error")),
error[4],
)
numerrors += 1
elif error[1] == logging.WARNING:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
capfirst(_("warning")),
error[4],
)
numwarnings += 1
else:
yield '<tr class=%s><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % (
"danger" if numerrors > 0 else "success",
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
error[4],
)
yield "</tbody></table></div>"
# Records are committed. Launch notification generator now.
NotificationFactory.launchWorker(
database=request.database,
url="%s://%s"
% ("https" if request.is_secure() else "http", request.get_host())
if request
else None,
)
except GeneratorExit:
logger.warning("Connection Aborted")
except NameError:
pass
@classmethod
def _getRowByName(cls, request, name):
if not hasattr(cls, "_rowsByName"):
cls._rowsByName = {}
for i in request.rows:
cls._rowsByName[i.name] = i
if i.field_name != i.name:
cls._rowsByName[i.field_name] = i
return cls._rowsByName[name]
@staticmethod
def _filter_ne(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return ~models.Q(
**{"%s__exact" % reportrow.field_name: smart_str(data).strip()}
)
elif isinstance(reportrow, GridFieldChoice):
t = smart_str(data).strip().lower()
# Comparison also with the translated choices
for c in reportrow.choices:
if t == force_str(c[1]).lower():
return ~models.Q(**{"%s__iexact" % reportrow.field_name: c[0]})
return ~models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
else:
return ~models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_bn(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(reportrow, GridFieldChoice):
# Comparison with the translated choices only
accepted = []
t = smart_str(data).strip().lower()
for c in reportrow.choices:
if force_str(c[1]).lower().startswith(t):
accepted.append(c[0])
return ~models.Q(
**{"%s__in" % reportrow.field_name: accepted or ["--dummy--"]}
)
else:
return ~models.Q(
**{"%s__istartswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_en(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(reportrow, GridFieldChoice):
# Comparison with the translated choices only
accepted = []
t = smart_str(data).strip().lower()
for c in reportrow.choices:
if force_str(c[1]).lower().endswith(t):
accepted.append(c[0])
return ~models.Q(
**{"%s__in" % reportrow.field_name: accepted or ["--dummy--"]}
)
else:
return ~models.Q(
**{"%s__iendswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_nc(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return ~models.Q(
**{"%s__contains" % reportrow.field_name: smart_str(data).strip()}
)
elif isinstance(reportrow, GridFieldChoice):
# Comparison with the translated choices
accepted = []
t = data.strip().lower()
for c in reportrow.choices:
if t in force_str(c[1]).lower():
accepted.append(c[0])
return ~models.Q(
**{"%s__in" % reportrow.field_name: accepted or ["--dummy--"]}
)
else:
return ~models.Q(
**{"%s__icontains" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_ni(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(reportrow, GridFieldChoice):
# Comparison also with the translated choices
accepted = []
for f in smart_str(data).split(","):
t = f.strip().lower()
for c in reportrow.choices:
if t in (c[0].lower(), force_str(c[1]).lower()):
accepted.append(c[0])
return ~models.Q(**{"%s__in" % reportrow.field_name: accepted})
else:
return ~models.Q(
**{"%s__in" % reportrow.field_name: smart_str(data).strip().split(",")}
)
@staticmethod
def _filter_in(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(reportrow, GridFieldChoice):
# Comparison also with the translated choices
accepted = []
for f in smart_str(data).split(","):
t = f.strip().lower()
for c in reportrow.choices:
if t in (c[0].lower(), force_str(c[1]).lower()):
accepted.append(c[0])
return models.Q(**{"%s__in" % reportrow.field_name: accepted})
else:
return models.Q(
**{"%s__in" % reportrow.field_name: smart_str(data).strip().split(",")}
)
@staticmethod
def _filter_eq(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return models.Q(
**{"%s__exact" % reportrow.field_name: smart_str(data).strip()}
)
elif isinstance(reportrow, GridFieldChoice):
t = smart_str(data).strip().lower()
# Comparison with the translated choices only
for c in reportrow.choices:
if t == force_str(c[1]).lower():
return models.Q(**{"%s__iexact" % reportrow.field_name: c[0]})
return models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
else:
return models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_bw(query, reportrow, data, database=DEFAULT_DB_ALIAS):
if isinstance(reportrow, GridFieldChoice):
# Comparison with the translated choices only
accepted = []
t = data.strip().lower()
for c in reportrow.choices:
if force_str(c[1]).lower().startswith(t):
accepted.append(c[0])
return models.Q(
**{"%s__in" % reportrow.field_name: accepted or ["--dummy--"]}
)
else:
return models.Q(
**{"%s__istartswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_gt(query, reportrow, data, database=DEFAULT_DB_ALIAS):
return models.Q(**{"%s__gt" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_gte(query, reportrow, data, database=DEFAULT_DB_ALIAS):
return models.Q(**{"%s__gte" % reportrow.field_name: | |
<gh_stars>1-10
import os
import json
import base64
import socket
import sqlite3
import datetime
from time import time, sleep
from dkv import demez_key_values as dkv
from uuid import uuid4, UUID
from threading import Thread
from api2.ftp_server import FTPServerAPI
from api2.listener import SocketListener
from api2.dir_tools import CreateDirectory
from api2.shared import *
# ----- These are needed for comparing client and server version -----
# update this whenever the json dict format or encoding/decoding is changed,
# like something that won't be compatible across versions in SendPacket and/or Listener is changed
PROTOCOL_VERSION = 1
PACKET_VERSION = 1
# how messages are sent/displayed,
MESSAGE_VERSION = 1
USER_INFO_VERSION = 1
SERVER_CONFIG_PATH = "server_config.dkv"
CreateDirectory("channels")
class ServerClient(BaseClient, Thread):
def __init__(self, server, connection: socket.socket, ip: str, port: int) -> None:
BaseClient.__init__(self, connection, ip, port)
Thread.__init__(self)
# we can wait for events on multiple sockets and then read and write data when it’s ready
self.socket.setblocking(True) # maybe try designing this with this set to false?
self.server = server
self.private_uuid = None
self.public_uuid = None
self.username = None
self.user_tag = None
self._uuid_verified = False
self.listener = SocketListener(self, self.socket)
self.listener.start()
self.event_function_dict.update({
"init_uuid": self.InitCheckUUID,
"request_uuid": self.InitRequestUUID,
"init_version": self.InitVersionCheck,
"user_info": self.ReceiveUserInfo,
"full_update": self.FullUpdate,
"receive_message": self.ReceiveMessage,
"send_message": self.ReceiveMessage,
"channel_messages": self.SendChannelMessageRange,
})
def WaitForResponse(self) -> dict:
try:
while True:
if self.listener.event_queue:
event = self.listener.event_queue[0]
self.listener.event_queue.remove(event)
return event
elif not self.listener.connected:
return {}
sleep(0.1)
except Exception as F:
PrintException(F, "Exception Waiting for Response: ")
return {}
def InitCheckUUID(self, uuid: Packet) -> None:
self.private_uuid = uuid.content["private"]
self.public_uuid = uuid.content["public"]
all_uuids = self.server.user_info_file.GetAllPrivateUUIDS()
if self.private_uuid not in self.server.user_info_file.GetAllPrivateUUIDS(): # and \
# str(self.public_uuid) not in self.server.user_info_file.GetAllPublicUUIDS():
self.SendPacket("wrong_uuid")
self.WaitForResponse()
self.Disconnect()
else:
self.SendPacket("valid_uuid")
self._uuid_verified = True
self.listener.uuid_verified = True
def InitRequestUUID(self, placeholder: Packet = None) -> None:
self.private_uuid = str(self.server.user_info_file.MakePrivateUUID())
self.public_uuid = str(self.server.user_info_file.MakePublicUUID())
self.SendPacket("send_uuid", {"private": self.private_uuid, "public": self.public_uuid})
self.server.ftp_server.AddUser(self.public_uuid, self.private_uuid)
self._uuid_verified = True
self.listener.uuid_verified = True
def InitVersionCheck(self, client_version: int) -> None:
pass
def FullUpdate(self, placeholder: Packet = None) -> None:
self.SendPacket("channel_list", self.server.GetChannelList())
self.SendPacket("member_list", {"member_list": self.server.user_info_file.GetAllUsersPublic()})
self.SendPacket("server_info", {"server_name": self.server.name})
def ReceiveUserInfo(self, user_info: Packet) -> None:
self.username = user_info.content["username"]
if "user_info" not in user_info.content:
self.user_tag = self.server.user_info_file.MakeUserTag(self.username)
self.SendPacket("user_tag", {"user_tag": self.user_tag})
else:
self.user_tag = user_info.content["user_tag"]
self.server.user_info_file.HandleUserJoin(self.username, self.user_tag, str(self.public_uuid), str(self.private_uuid))
# self.FullUpdate()
def ReceiveMessage(self, message: Packet) -> None:
channel = self.server.GetChannel(message.content["channel"])
channel.AddMessage(message)
message.content["recv"] = message.recv
self.server.Broadcast("receive_message", message.content)
def SendChannelMessageRange(self, event_dict: Packet) -> None:
# ask for a section of the channel event history
channel = self.server.GetChannel(event_dict.content["channel_name"])
channel_page = channel.GetMessages(event_dict.content["message_index"],
50, # might allow client to request more than 50 messages at a time
# also would need to check across event function versions
event_dict.content["direction"])
# channel_page = channel.GetAllMessagesTest()
self.SendPacket("channel_messages", {
"channel_name": event_dict.content["channel_name"],
"start_message": event_dict.content["message_index"],
"message_count": 50,
"messages": channel_page,
})
def HandleEvent(self, packet: Packet) -> None:
if packet.event in self.event_function_dict.keys():
if packet.content:
self.event_function_dict[packet.event](packet)
else:
self.event_function_dict[packet.event]()
else:
TimePrint("Unknown Event: " + packet.event)
def Ping(self) -> None:
self.SendPacket("ping")
def SendDisconnect(self, reason: str):
self.SendPacket("disconnect", {"reason": reason})
def Disconnect(self):
self.socket.close()
self.listener.Stop()
self.server.RemoveClient(self)
self._stopping = True
TimePrint(f"Disconnected - {self.address}")
def run(self) -> None:
TimePrint("socket running")
try:
while True:
while len(self.listener.event_queue) > 0:
event = self.listener.event_queue[0]
self.listener.event_queue.remove(event)
self.HandleEvent(event)
if self._stopping or not self.listener.connected:
self.Disconnect()
break
# apparently this loop was causing the cpu usage to go up to 10%
# and slow the whole program down by a shit ton, so just sleep for 0.1 seconds
sleep(0.1)
except Exception as F:
self.SendDisconnect(" ".join(F.args))
PrintException(F, "Exception On Client Loop, Disconnecting Client: ")
self.Disconnect()
# do i need to have the socket have the channels or some shit?
# no, just connect to the file whenever you need to read/write something
class Channel:
def __init__(self, name: str, description: str = "") -> None:
self.name = name
self.description = description
file = sqlite3.connect("channels/" + name + ".db")
crsr = file.cursor()
# do we have a message table here?
try:
# why does this not work
# CHECK(TYPEOF(time) == 'FLOAT')
# CHECK(TYPEOF(user) == 'CHAR')
# CREATE TABLE if not exists messages
crsr.execute("""
CREATE TABLE messages (
time FLOAT,
user CHAR(36) NOT NULL,
text TEXT(4096)
);""")
except sqlite3.OperationalError as F:
print(str(F))
pass
def GetMessageCount(self) -> int:
file, cursor = self.OpenFile()
cursor.execute("select count (*) from messages;")
message_count = cursor.fetchone()[0]
file.close()
return message_count
def ConnectToFile(self) -> sqlite3.Connection:
return sqlite3.connect("channels/" + self.name + ".db")
@staticmethod
def SaveAndClose(file: sqlite3.Connection) -> None:
file.commit()
file.close()
def GetCursor(self) -> sqlite3.Cursor:
return self.ConnectToFile().cursor()
def OpenFile(self) -> tuple:
file = self.ConnectToFile()
return file, file.cursor()
def DeleteEvent(self, event) -> None:
file, cursor = self.OpenFile()
# delete
# cursor.execute("""DROP TABLE employee;""")
def ExceptExcute(self, ):
pass
# TODO: fix being able to put quotes in here, it doesn't work
def AddMessage(self, message: Packet) -> None:
file, cursor = self.OpenFile()
# time_received = str(datetime.datetime.fromtimestamp(message["time_received"]))
cursor.execute(
"""INSERT INTO messages (time, user, text) VALUES (?, ?, ?);""",
(message.recv, message.content["name"], message.content["text"]))
self.SaveAndClose(file)
def GetAllMessagesTest(self) -> list:
file, cursor = self.OpenFile()
cursor.execute("SELECT * FROM messages ORDER BY time ASC")
messages = cursor.fetchall()
file.close()
return messages
def GetMessages(self, start_message_index: int, message_count: int, msg_direction: str) -> dict:
total_message_count = self.GetMessageCount() - 1
file, cursor = self.OpenFile()
if msg_direction == "back":
start_message_index -= 1
direction = "DESC"
elif msg_direction == "forward":
start_message_index += 1
direction = "ASC"
else:
return {}
# cmd = f"SELECT COUNT(?) from messages ORDER BY time {direction}"
# cmd = f"SELECT COUNT(?) from messages ORDER BY time {direction} offset 0"
cmd = f"SELECT * from messages ORDER BY time {direction} limit ?"
# cmd = f"SELECT * from messages ORDER BY time {direction} limit ? offset ?"
# cmd = "SELECT * from messages ORDER BY time " + direction
try:
# cursor.execute(cmd, (message_count, total_message_count - start_message_index))
# cursor.execute(cmd) # 0 SUPPLIED
cursor.execute(cmd, (str(message_count),)) # 2 SUPPLIED
# WHAT THE FUCK
# cursor.execute(cmd, (direction, ))
except Exception as F:
PrintException(F, "Exception Getting Messages From Channel File: ")
return {}
messages = cursor.fetchall()
file.close()
message_dict = {}
if direction == "DESC":
for index, message in enumerate(messages):
message_dict[start_message_index - index] = message
elif direction == "ASC":
# TODO: test this
for index, message in enumerate(messages):
message_dict[start_message_index + index] = message
return message_dict
def RunCommand(self, command: str):
file, cursor = self.OpenFile()
output = cursor.execute(command)
self.SaveAndClose(file)
return output
# TODO: add search tags, or make functions to call depending on the tags
def Search(self, string: str) -> None:
return
# TODO: make a server config file, like how we have a user config file for clients
class Server:
def __init__(self, name: str, ip: str, port: int, ftp_ip: str, ftp_port: int, max_clients: int) -> None:
self.name = name
self.ip = ip
self.port = int(port)
self.max_clients = int(max_clients)
self.ftp_server = FTPServerAPI(self, int(max_clients), ftp_ip, int(ftp_port))
self.client_uuid_list = {}
self.channel_list = []
self.user_info_file = UserInfoFile()
self.server_config = ServerConfig(self)
for user in self.user_info_file.users:
self.ftp_server.AddUser(user.public_uuid, user.private_uuid)
# The first argument AF_INET is the combined_address domain of the socket.
# This is used when we have an Internet Domain with any two hosts.
# The second argument is the type of socket.
# SOCK_STREAM means that data or characters are read in a continuous flow.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.client_list = []
self.con_var_list = []
self.con_command_dict = {
"find": self.Find, # TODO: move to cli_server?
# "add_channel": self.AddChannel,
# "rm_channel": self.RemoveChannel,
}
self.Start()
def Start(self) -> None:
self.socket.bind((self.ip, self.port))
Thread(target=self.ListenForClients, args=()).start()
Thread(target=self.ListenConsole, args=()).start() # TODO: remove this and move to cli version
Thread(target=self.ftp_server.StartServer, args=()).start()
def Close(self) -> None:
self.socket.close()
TimePrint("Server closed")
def GetChannelList(self) -> dict:
channel_dict = {}
for channel in self.channel_list:
channel_dict[channel.name] = {
"desc": channel.description,
"count": channel.GetMessageCount(),
}
return channel_dict
def GetChannel(self, channel_name: str) -> str:
for channel in self.channel_list:
if channel.name == channel_name:
return channel
else:
Exception("Channel does not exist")
def RemoveClient(self, client: ServerClient) -> None:
if client.socket in self.client_list:
self.client_list.remove(client.socket)
TimePrint("-------- {0} disconnected --------".format(client.address))
del client
# this will be used for when we receive a message or some shit idk
def Broadcast(self, command: str, *args) -> None:
[client.SendPacket(command, *args) for client in self.client_list]
def Find(self, search: str) -> None:
result = []
for con_command in self.con_command_dict.keys():
if search in con_command:
result.append(con_command)
if result:
print(" - " + "\n - ".join(result))
else:
print("No | |
import time
import json
import sys
from docplex.mp.model import Model
from docplex.util.environment import get_environment
import constants
from utils import initial_validation, output_result
from path_gen import path_gen
RU_ID = "id"
DRC_str = "drc"
RU_POS = "ru"
DU_POS = "du"
CU_POS = "cu"
PATH = "path"
class Path:
def __init__(self, id, source, target, seq, p1, p2, p3, delay_p1, delay_p2, delay_p3):
self.id = id
self.source = source
self.target = target
self.seq = seq
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.delay_p1 = delay_p1
self.delay_p2 = delay_p2
self.delay_p3 = delay_p3
def __str__(self):
return "ID: {}\tSEQ: {}\t P1: {}\t P2: {}\t P3: {}\t dP1: {}\t dP2: {}\t dP3: {}".format(self.id, self.seq, self.p1, self.p2, self.p3, self.delay_p1, self.delay_p2, self.delay_p3)
#incluir RAM na chamada
class CR:
def __init__(self, id, cpu, num_BS, ram):
self.id = id
self.cpu = cpu
self.num_BS = num_BS
self.ram = ram
def __str__(self):
return "ID: {}\tCPU: {}".format(self.id, self.cpu)
class DRC:
def __init__(self, id, cpu_CU, cpu_DU, cpu_RU, ram_CU, ram_DU, ram_RU, Fs_CU, Fs_DU, Fs_RU, delay_BH, delay_MH,
delay_FH, bw_BH, bw_MH, bw_FH):
self.id = id
self.cpu_CU = cpu_CU
self.ram_CU = ram_CU
self.Fs_CU = Fs_CU
self.cpu_DU = cpu_DU
self.ram_DU = ram_DU
self.Fs_DU = Fs_DU
self.cpu_RU = cpu_RU
self.ram_RU = ram_RU
self.Fs_RU = Fs_RU
self.delay_BH = delay_BH
self.delay_MH = delay_MH
self.delay_FH = delay_FH
self.bw_BH = bw_BH
self.bw_MH = bw_MH
self.bw_FH = bw_FH
class Fs:
def __init__(self, id, f_cpu, f_ram):
self.id = id
self.f_cpu = f_cpu
self.f_ram = f_ram
class RU:
def __init__(self, id, CR):
self.id = id
self.CR = CR
def __str__(self):
return "RU: {}\tCR: {}".format(self.id, self.CR)
# Global vars
links = []
capacity = {}
delay = {}
CRs = {}
paths = {}
conj_Fs = {}
def read_topology():
"""
READ TELEFONICA TOPOLOGY FILE
This method read the topology json file and create the main structure that will be used in all model fases
:rtype: insert in the globals structures the topology information. For that the method has no return
"""
with open("mini_topo_links.json") as json_file:
data = json.load(json_file)
print("topology:")
print(data)
# create a set of links with delay and capacity read by the json file, stored in a global list "links"
json_links = data["links"]
for item in json_links:
link = item
source_node = int(link["fromNode"])
destination_node = int(link["toNode"])
# create links full duplex for each link readed by the json file
if source_node < destination_node:
capacity[(source_node, destination_node)] = link["capacity"]
delay[(source_node, destination_node)] = float(link["delay"])
links.append((source_node, destination_node))
#add this for Gustavo topo
capacity[(destination_node, source_node)] = link["capacity"]
delay[(destination_node, source_node)] = float(link["delay"])
links.append((destination_node, source_node))
# creating links full duplex for each link readed by the json file
else:
capacity[(destination_node, source_node)] = link["capacity"]
delay[(destination_node, source_node)] = float(link["delay"])
links.append((destination_node, source_node))
#add this for Gustavo topo
capacity[(source_node, destination_node)] = link["capacity"]
delay[(source_node, destination_node)] = float(link["delay"])
links.append((source_node, destination_node))
# create and store the set of CR's with RAM and CPU in a global list "CRs"-CR[0] is the core network without CR
with open("mini_topo_nodes.json") as json_file:
data = json.load(json_file)
print("nodes:")
print(data)
json_nodes = data["nodes"]
for item in json_nodes:
node = item
CR_id = node["nodeNumber"]
node_RAM = node["RAM"]
node_CPU = node["cpu"]
cr = CR(CR_id, node_CPU*1000, node_RAM, node_RAM*1024)
CRs[CR_id] = cr
CRs[0] = CR(0, 0, 0, 0)
# create a set of paths that are calculated previously by the algorithm implemented in "path_gen.py"
with open('paths.json') as json_paths_file:
# read the json file that contain the set of paths
json_paths_f = json.load(json_paths_file)
json_paths = json_paths_f["paths"]
# for each path calculate the id, source (always the core node) and destination
for item in json_paths:
path = json_paths[item]
path_id = path["id"]
path_source = path["source"]
if path_source == "CN":
path_source = 0
path_target = path["target"]
path_seq = path["seq"]
# calculate the intermediate paths p1, p2 and p3 (that path's correspond to BH, MH and FH respectively)
paths_p = [path["p1"], path["p2"], path["p3"]]
list_p1 = []
list_p2 = []
list_p3 = []
for path_p in paths_p:
aux = ""
sum_delay = 0
for tup in path_p:
aux += tup
tup_aux = tup
tup_aux = tup_aux.replace('(', '')
tup_aux = tup_aux.replace(')', '')
tup_aux = tuple(map(int, tup_aux.split(', ')))
if path_p == path["p1"]:
list_p1.append(tup_aux)
elif path_p == path["p2"]:
list_p2.append(tup_aux)
elif path_p == path["p3"]:
list_p3.append(tup_aux)
sum_delay += delay[tup_aux]
if path_p == path["p1"]:
delay_p1 = sum_delay
elif path_p == path["p2"]:
delay_p2 = sum_delay
elif path_p == path["p3"]:
delay_p3 = sum_delay
if path_seq[0] == 0:
delay_p1 = 0
if path_seq[1] == 0:
delay_p2 = 0
# create the path and store at the global dict "paths"
p = Path(path_id, path_source, path_target, path_seq, list_p1, list_p2, list_p3, delay_p1, delay_p2,
delay_p3)
paths[path_id] = p
def DRC_structure():
CU_CPU = 564
DU_CPU = 624
RU_CPU = 627
CU_MEM = 152
DU_MEM = 529
RU_MEM = 108
# create the DRC's and the set of DRC's
# DRC5 = 8 -> NG-RAN(3) [CU]-[DU]-[RU]
#DRC5 = DRC(5, 0.98, 0.735, 3.185, 0, 0, 0, [1, 2], [
# 3, 4, 5], [6, 7, 8], 30, 30, 2, 151, 151, 152)
DRC5 = DRC(5, CU_CPU, DU_CPU, RU_CPU, CU_MEM, DU_MEM, RU_MEM, ['f8', 'f7'], ['f6', 'f5', 'f4', 'f3'], ['f2', 'f1', 'f0'], 30, 30, 2, 151, 151, 152)
# DRC7 = 13 -> NG-RAN(2) [CU]-[DU+RU]
#DRC7 = DRC(7, 0, 3, 3.92, 0, 0, 0, [0], [1, 2], [
# 3, 4, 5, 6, 7, 8], 0, 30, 30, 0, 151, 151)
DRC7 = DRC(7, 0, CU_CPU, DU_CPU + RU_CPU, 0, CU_MEM, DU_MEM + RU_MEM, [0], ['f8', 'f7'], ['f6', 'f5', 'f4', 'f3', 'f2', 'f1', 'f0'], 0, 30, 30, 0, 151, 151)
# DRC10 = 17 -> C-RAN [CU+DU]-[RU]
#DRC10 = DRC(10, 0, 1.71, 3.185, 0, 0, 0, [0], [
# 1, 2, 3, 4, 5], [6, 7, 8], 0, 30, 2, 0, 151, 152)
DRC10 = DRC(10, 0, CU_CPU + DU_CPU, RU_CPU, 0, CU_MEM + DU_MEM, RU_MEM, [0], ['f8', 'f7', 'f6', 'f5', 'f4', 'f3'], ['f2', 'f1', 'f0'], 0, 30, 2, 0, 151, 152)
# DRC8 = 19 -> D-RAN [CU+DU+RU]
#DRC8 = DRC(8, 0, 0, 4.9, 0, 0, 0, [0], [0], [
# 1, 2, 3, 4, 5, 6, 7, 8], 0, 0, 30, 0, 0, 151)
DRC8 = DRC(8, 0, 0, CU_CPU + DU_CPU + RU_CPU, 0, 0, CU_MEM + DU_MEM + RU_MEM, [0], [0], ['f8', 'f7', 'f6', 'f5', 'f4', 'f3', 'f2', 'f1', 'f0'], 0, 0, 30, 0, 0, 151)
# set of DRC's
DRCs = {5: DRC5, 7: DRC7, 8: DRC8, 10: DRC10}
return DRCs
def ru_location():
"""
Read TELEFONICA topology files
:return:
"""
rus = {}
count = 1
with open("mini_topo_nodes.json") as json_file:
data = json.load(json_file)
json_CRs = data["nodes"]
for item in json_CRs:
node = item
num_rus = node["RU"]
num_CR = node["nodeNumber"]
for i in range(0, num_rus):
rus[count] = RU(count, int(num_CR))
count += 1
return rus
DRC_f1 = 0
f1_vars = []
f2_vars = []
def run_phase_1():
"""
This method uses the topology main structure to calculate the optimal solution of the fase 1
:rtype: This method returns the optimal value of the fase 1
"""
print("Running Fase - 1")
print("-----------------------------------------------------------------------------------------------------------")
alocation_time_start = time.time()
# read the topology data at the json file
read_topology()
DRCs = DRC_structure()
rus = ru_location()
# Creates the set of Fs (functional splits)
# Fs(id, f_cpu, f_ram)
F1 = Fs('f8', 2, 2)
F2 = Fs('f7', 2, 2)
F3 = Fs('f6', 2, 2)
F4 = Fs('f5', 2, 2)
F5 = Fs('f4', 2, 2)
F6 = Fs('f3', 2, 2)
F7 = Fs('f2', 2, 2)
F8 = Fs('f1', 2, 2)
F9 = Fs('f0', 2, 2)
conj_Fs = {'f8': F1, 'f7': F2, 'f6': F3, 'f5': F4, 'f4': F5, 'f3': F6, 'f2': F7}
# create the fase 1 model
mdl = Model(name='NGRAN Problem', log_output=True)
# tuple that will be used by the decision variable
i = [(p, d, b)
for p in paths for d in DRCs for b in rus if paths[p].seq[2] == rus[b].CR]
# Decision variable X
mdl.x = mdl.binary_var_dict(i, name='x')
# Fase 1 - Objective Function
mdl.minimize(mdl.sum(mdl.min(1, mdl.sum(mdl.x[it] for it in i if c in paths[it[0]].seq)) for c in CRs
if CRs[c].id != 0) - mdl.sum(mdl.sum(mdl.max(0, (mdl.sum(mdl.x[it] for it in i
if ((o in DRCs[it[1]].Fs_CU and paths[it[0]].seq[0] == CRs[c].id) or (o in DRCs[it[1]].Fs_DU
and paths[it[0]].seq[1] == CRs[c].id) or (o in DRCs[it[1]].Fs_RU
and paths[it[0]].seq[2] == CRs[c].id))) - | |
(17664064*mcMS**10)/(9*mbkin**12) +
(353281280*mcMS**10)/(81*mbkin**11) + (7955554444931*mcMS**10)/
(76621545*mbkin**10) - (337299328*mcMS**11)/(189*mbkin**13) -
(6745986560*mcMS**11)/(1701*mbkin**12) - (181064392519811*mcMS**11)/
(3831077250*mbkin**11) + (1338720*mcMS**12)/mbkin**14 +
(8924800*mcMS**12)/(3*mbkin**13) + (332372802464726*mcMS**12)/
(21070924875*mbkin**12) - (37077248*mcMS**13)/(45*mbkin**15) -
(148308992*mcMS**13)/(81*mbkin**14) - (5983215661469*mcMS**13)/
(1641890250*mbkin**13) + (241028224*mcMS**14)/(585*mbkin**16) +
(964112896*mcMS**14)/(1053*mbkin**15) + (194192359957*mcMS**14)/
(372683025*mbkin**14) - (164985088*mcMS**15)/(1001*mbkin**17) -
(3299701760*mcMS**15)/(9009*mbkin**16) - (3809167143809*mcMS**15)/
(109568809350*mbkin**15) + (669632*mcMS**16)/(13*mbkin**18) +
(13392640*mcMS**16)/(117*mbkin**17) - (4254400*mcMS**17)/
(351*mbkin**19) - (85088000*mcMS**17)/(3159*mbkin**18) +
(46879712*mcMS**18)/(23205*mbkin**20) + (187518848*mcMS**18)/
(41769*mbkin**19) - (1368512*mcMS**19)/(6435*mbkin**21) -
(5474048*mcMS**19)/(11583*mbkin**20) + (1205584*mcMS**20)/
(113373*mbkin**22) + (24111680*mcMS**20)/(1020357*mbkin**21) +
(104*np.sqrt(0j + mcMS**2/mbkin**2))/(81*mbkin**3) +
(5912*mcMS**4*np.sqrt(0j + mcMS**2/mbkin**2))/(27*mbkin**7) +
(70064*(mcMS**2/mbkin**2)**(3/2))/(243*mbkin**3) -
(376528*mcMS**2)/(1053*mbkin**4*(1 - mcMS/mbkin)**2) -
(3012224*mcMS**2)/(3159*mbkin**3*(1 - mcMS/mbkin)**2) +
(753056*mcMS**2)/(1053*mbkin**2*(1 - mcMS/mbkin)**2) +
(429497312*mcMS**3)/(89505*mbkin**5*(1 - mcMS/mbkin)**2) +
(3435978496*mcMS**3)/(268515*mbkin**4*(1 - mcMS/mbkin)**2) -
(858994624*mcMS**3)/(89505*mbkin**3*(1 - mcMS/mbkin)**2) -
(234545329016*mcMS**4)/(6891885*mbkin**6*(1 - mcMS/mbkin)**2) -
(1876362632128*mcMS**4)/(20675655*mbkin**5*(1 - mcMS/mbkin)**2) +
(469090658032*mcMS**4)/(6891885*mbkin**4*(1 - mcMS/mbkin)**2) +
(75343294960*mcMS**5)/(459459*mbkin**7*(1 - mcMS/mbkin)**2) +
(602746359680*mcMS**5)/(1378377*mbkin**6*(1 - mcMS/mbkin)**2) -
(150686589920*mcMS**5)/(459459*mbkin**5*(1 - mcMS/mbkin)**2) -
(79048041848*mcMS**6)/(135135*mbkin**8*(1 - mcMS/mbkin)**2) -
(632384334784*mcMS**6)/(405405*mbkin**7*(1 - mcMS/mbkin)**2) +
(158096083696*mcMS**6)/(135135*mbkin**6*(1 - mcMS/mbkin)**2) +
(566619520*mcMS**7)/(351*mbkin**9*(1 - mcMS/mbkin)**2) +
(4532956160*mcMS**7)/(1053*mbkin**8*(1 - mcMS/mbkin)**2) -
(1133239040*mcMS**7)/(351*mbkin**7*(1 - mcMS/mbkin)**2) -
(205356871136*mcMS**8)/(57915*mbkin**10*(1 - mcMS/mbkin)**2) -
(1642854969088*mcMS**8)/(173745*mbkin**9*(1 - mcMS/mbkin)**2) +
(410713742272*mcMS**8)/(57915*mbkin**8*(1 - mcMS/mbkin)**2) +
(366047261248*mcMS**9)/(57915*mbkin**11*(1 - mcMS/mbkin)**2) +
(2928378089984*mcMS**9)/(173745*mbkin**10*(1 - mcMS/mbkin)**2) -
(732094522496*mcMS**9)/(57915*mbkin**9*(1 - mcMS/mbkin)**2) -
(41256072896*mcMS**10)/(4455*mbkin**12*(1 - mcMS/mbkin)**2) -
(330048583168*mcMS**10)/(13365*mbkin**11*(1 - mcMS/mbkin)**2) +
(82512145792*mcMS**10)/(4455*mbkin**10*(1 - mcMS/mbkin)**2) +
(31867463104*mcMS**11)/(2835*mbkin**13*(1 - mcMS/mbkin)**2) +
(254939704832*mcMS**11)/(8505*mbkin**12*(1 - mcMS/mbkin)**2) -
(63734926208*mcMS**11)/(2835*mbkin**11*(1 - mcMS/mbkin)**2) -
(6433846768*mcMS**12)/(567*mbkin**14*(1 - mcMS/mbkin)**2) -
(51470774144*mcMS**12)/(1701*mbkin**13*(1 - mcMS/mbkin)**2) +
(12867693536*mcMS**12)/(567*mbkin**12*(1 - mcMS/mbkin)**2) +
(27020210848*mcMS**13)/(2835*mbkin**15*(1 - mcMS/mbkin)**2) +
(216161686784*mcMS**13)/(8505*mbkin**14*(1 - mcMS/mbkin)**2) -
(54040421696*mcMS**13)/(2835*mbkin**13*(1 - mcMS/mbkin)**2) -
(896774288*mcMS**14)/(135*mbkin**16*(1 - mcMS/mbkin)**2) -
(7174194304*mcMS**14)/(405*mbkin**15*(1 - mcMS/mbkin)**2) +
(1793548576*mcMS**14)/(135*mbkin**14*(1 - mcMS/mbkin)**2) +
(73703643776*mcMS**15)/(19305*mbkin**17*(1 - mcMS/mbkin)**2) +
(589629150208*mcMS**15)/(57915*mbkin**16*(1 - mcMS/mbkin)**2) -
(147407287552*mcMS**15)/(19305*mbkin**15*(1 - mcMS/mbkin)**2) -
(34568184736*mcMS**16)/(19305*mbkin**18*(1 - mcMS/mbkin)**2) -
(276545477888*mcMS**16)/(57915*mbkin**17*(1 - mcMS/mbkin)**2) +
(69136369472*mcMS**16)/(19305*mbkin**16*(1 - mcMS/mbkin)**2) +
(7812195136*mcMS**17)/(11583*mbkin**19*(1 - mcMS/mbkin)**2) +
(62497561088*mcMS**17)/(34749*mbkin**18*(1 - mcMS/mbkin)**2) -
(15624390272*mcMS**17)/(11583*mbkin**17*(1 - mcMS/mbkin)**2) -
(7339439504*mcMS**18)/(36855*mbkin**20*(1 - mcMS/mbkin)**2) -
(58715516032*mcMS**18)/(110565*mbkin**19*(1 - mcMS/mbkin)**2) +
(14678879008*mcMS**18)/(36855*mbkin**18*(1 - mcMS/mbkin)**2) +
(18003292256*mcMS**19)/(405405*mbkin**21*(1 - mcMS/mbkin)**2) +
(144026338048*mcMS**19)/(1216215*mbkin**20*(1 - mcMS/mbkin)**2) -
(36006584512*mcMS**19)/(405405*mbkin**19*(1 - mcMS/mbkin)**2) -
(9695082136*mcMS**20)/(1378377*mbkin**22*(1 - mcMS/mbkin)**2) -
(77560657088*mcMS**20)/(4135131*mbkin**21*(1 - mcMS/mbkin)**2) +
(19390164272*mcMS**20)/(1378377*mbkin**20*(1 - mcMS/mbkin)**2) +
(694454128*mcMS**21)/(984555*mbkin**23*(1 - mcMS/mbkin)**2) +
(5555633024*mcMS**21)/(2953665*mbkin**22*(1 - mcMS/mbkin)**2) -
(1388908256*mcMS**21)/(984555*mbkin**21*(1 - mcMS/mbkin)**2) -
(602792*mcMS**22)/(17901*mbkin**24*(1 - mcMS/mbkin)**2) -
(4822336*mcMS**22)/(53703*mbkin**23*(1 - mcMS/mbkin)**2) +
(1205584*mcMS**22)/(17901*mbkin**22*(1 - mcMS/mbkin)**2) +
(28240544*mcMS)/(793611*mbkin**3*(1 - mcMS/mbkin)) +
(225924352*mcMS)/(2380833*mbkin**2*(1 - mcMS/mbkin)) -
(379482310*mcMS)/(2380833*mbkin*(1 - mcMS/mbkin)) -
(936051440*mcMS**2)/(2380833*mbkin**4*(1 - mcMS/mbkin)) -
(7488411520*mcMS**2)/(7142499*mbkin**3*(1 - mcMS/mbkin)) +
(12578191225*mcMS**2)/(7142499*mbkin**2*(1 - mcMS/mbkin)) +
(214748656*mcMS**3)/(89505*mbkin**5*(1 - mcMS/mbkin)) +
(1717989248*mcMS**3)/(268515*mbkin**4*(1 - mcMS/mbkin)) -
(577137013*mcMS**3)/(53703*mbkin**3*(1 - mcMS/mbkin)) -
(24223298056*mcMS**4)/(2297295*mbkin**6*(1 - mcMS/mbkin)) -
(193786384448*mcMS**4)/(6891885*mbkin**5*(1 - mcMS/mbkin)) +
(130200227051*mcMS**4)/(2756754*mbkin**4*(1 - mcMS/mbkin)) +
(1609166072*mcMS**5)/(45045*mbkin**7*(1 - mcMS/mbkin)) +
(12873328576*mcMS**5)/(135135*mbkin**6*(1 - mcMS/mbkin)) -
(8649267637*mcMS**5)/(54054*mbkin**5*(1 - mcMS/mbkin)) -
(33540544*mcMS**6)/(351*mbkin**8*(1 - mcMS/mbkin)) -
(268324352*mcMS**6)/(1053*mbkin**7*(1 - mcMS/mbkin)) +
(450701060*mcMS**6)/(1053*mbkin**6*(1 - mcMS/mbkin)) +
(24025408*mcMS**7)/(117*mbkin**9*(1 - mcMS/mbkin)) +
(192203264*mcMS**7)/(351*mbkin**8*(1 - mcMS/mbkin)) -
(322841420*mcMS**7)/(351*mbkin**7*(1 - mcMS/mbkin)) -
(20841998048*mcMS**8)/(57915*mbkin**10*(1 - mcMS/mbkin)) -
(166735984384*mcMS**8)/(173745*mbkin**9*(1 - mcMS/mbkin)) +
(56012869754*mcMS**8)/(34749*mbkin**8*(1 - mcMS/mbkin)) +
(463452448*mcMS**9)/(891*mbkin**11*(1 - mcMS/mbkin)) +
(3707619584*mcMS**9)/(2673*mbkin**10*(1 - mcMS/mbkin)) -
(6227642270*mcMS**9)/(2673*mbkin**9*(1 - mcMS/mbkin)) -
(84293728*mcMS**10)/(135*mbkin**12*(1 - mcMS/mbkin)) -
(674349824*mcMS**10)/(405*mbkin**11*(1 - mcMS/mbkin)) +
(226539394*mcMS**10)/(81*mbkin**10*(1 - mcMS/mbkin)) +
(354122336*mcMS**11)/(567*mbkin**13*(1 - mcMS/mbkin)) +
(2832978688*mcMS**11)/(1701*mbkin**12*(1 - mcMS/mbkin)) -
(4758518890*mcMS**11)/(1701*mbkin**11*(1 - mcMS/mbkin)) -
(295158704*mcMS**12)/(567*mbkin**14*(1 - mcMS/mbkin)) -
(2361269632*mcMS**12)/(1701*mbkin**13*(1 - mcMS/mbkin)) +
(3966195085*mcMS**12)/(1701*mbkin**12*(1 - mcMS/mbkin)) +
(48659824*mcMS**13)/(135*mbkin**15*(1 - mcMS/mbkin)) +
(389278592*mcMS**13)/(405*mbkin**14*(1 - mcMS/mbkin)) -
(130773277*mcMS**13)/(81*mbkin**13*(1 - mcMS/mbkin)) -
(120505408*mcMS**14)/(585*mbkin**16*(1 - mcMS/mbkin)) -
(964043264*mcMS**14)/(1755*mbkin**15*(1 - mcMS/mbkin)) +
(323858284*mcMS**14)/(351*mbkin**14*(1 - mcMS/mbkin)) +
(12991751104*mcMS**15)/(135135*mbkin**17*(1 - mcMS/mbkin)) +
(103934008832*mcMS**15)/(405405*mbkin**16*(1 - mcMS/mbkin)) -
(34915331092*mcMS**15)/(81081*mbkin**15*(1 - mcMS/mbkin)) -
(108273376*mcMS**16)/(3003*mbkin**18*(1 - mcMS/mbkin)) -
(866187008*mcMS**16)/(9009*mbkin**17*(1 - mcMS/mbkin)) +
(1454923490*mcMS**16)/(9009*mbkin**16*(1 - mcMS/mbkin)) +
(11167232*mcMS**17)/(1053*mbkin**19*(1 - mcMS/mbkin)) +
(89337856*mcMS**17)/(3159*mbkin**18*(1 - mcMS/mbkin)) -
(150059680*mcMS**17)/(3159*mbkin**17*(1 - mcMS/mbkin)) -
(1476642704*mcMS**18)/(626535*mbkin**20*(1 - mcMS/mbkin)) -
(11813141632*mcMS**18)/(1879605*mbkin**19*(1 - mcMS/mbkin)) +
(3968477267*mcMS**18)/(375921*mbkin**18*(1 - mcMS/mbkin)) +
(854941712*mcMS**19)/(2297295*mbkin**21*(1 - mcMS/mbkin)) +
(6839533696*mcMS**19)/(6891885*mbkin**20*(1 - mcMS/mbkin)) -
(2297655851*mcMS**19)/(1378377*mbkin**19*(1 - mcMS/mbkin)) -
(696197624*mcMS**20)/(18706545*mbkin**22*(1 - mcMS/mbkin)) -
(5569580992*mcMS**20)/(56119635*mbkin**21*(1 - mcMS/mbkin)) +
(3742062229*mcMS**20)/(22447854*mbkin**20*(1 - mcMS/mbkin)) +
(602792*mcMS**21)/(340119*mbkin**23*(1 - mcMS/mbkin)) +
(4822336*mcMS**21)/(1020357*mbkin**22*(1 - mcMS/mbkin)) -
(16200035*mcMS**21)/(2040714*mbkin**21*(1 - mcMS/mbkin)) -
(688*mcMS)/(243*mbkin**4*(-1 + mcMS/mbkin)) + (688*mcMS**2)/
(243*mbkin**5*(-1 + mcMS/mbkin)) + (1760*mcMS**3)/
(9*mbkin**6*(-1 + mcMS/mbkin)) - (1760*mcMS**4)/
(9*mbkin**7*(-1 + mcMS/mbkin)) + (1456*mcMS**5)/
(9*mbkin**8*(-1 + mcMS/mbkin)) - (1456*mcMS**6)/
(9*mbkin**9*(-1 + mcMS/mbkin)) + (180704*mcMS*np.pi**2)/(9009*mbkin) -
(600176*mcMS**2*np.pi**2)/(3861*mbkin**2) + (308192*mcMS**3*np.pi**2)/
(495*mbkin**3) - (2032544*mcMS**4*np.pi**2)/(1155*mbkin**4) +
(103328*mcMS**5*np.pi**2)/(27*mbkin**5) - (58064*mcMS**6*np.pi**2)/
(9*mbkin**6) + (25184*mcMS**7*np.pi**2)/(3*mbkin**7) -
(531904*mcMS**8*np.pi**2)/(63*mbkin**8) + (98656*mcMS**9*np.pi**2)/
(15*mbkin**9) - (35536*mcMS**10*np.pi**2)/(9*mbkin**10) +
(339424*mcMS**11*np.pi**2)/(189*mbkin**11) - (59296*mcMS**12*np.pi**2)/
(99*mbkin**12) + (22816*mcMS**13*np.pi**2)/(165*mbkin**13) -
(381488*mcMS**14*np.pi**2)/(19305*mbkin**14) + (1696*mcMS**15*np.pi**2)/
(1287*mbkin**15) + ((1312*np.sqrt(0j + mcMS**2/mbkin**2))/(243*mbkin**3) +
(2912*mcMS**4*np.sqrt(0j + mcMS**2/mbkin**2))/(9*mbkin**7) +
(99520*(mcMS**2/mbkin**2)**(3/2))/(243*mbkin**3) -
(188264*mcMS**2)/(351*mbkin**2*(1 - mcMS/mbkin)**2) +
(214748656*mcMS**3)/(29835*mbkin**3*(1 - mcMS/mbkin)**2) -
(117272664508*mcMS**4)/(2297295*mbkin**4*(1 - mcMS/mbkin)**2) +
(37671647480*mcMS**5)/(153153*mbkin**5*(1 - mcMS/mbkin)**2) -
(39524020924*mcMS**6)/(45045*mbkin**6*(1 - mcMS/mbkin)**2) +
(283309760*mcMS**7)/(117*mbkin**7*(1 - mcMS/mbkin)**2) -
(102678435568*mcMS**8)/(19305*mbkin**8*(1 - mcMS/mbkin)**2) +
(183023630624*mcMS**9)/(19305*mbkin**9*(1 - mcMS/mbkin)**2) -
(20628036448*mcMS**10)/(1485*mbkin**10*(1 - mcMS/mbkin)**2) +
(15933731552*mcMS**11)/(945*mbkin**11*(1 - mcMS/mbkin)**2) -
(3216923384*mcMS**12)/(189*mbkin**12*(1 - mcMS/mbkin)**2) +
(13510105424*mcMS**13)/(945*mbkin**13*(1 - mcMS/mbkin)**2) -
(448387144*mcMS**14)/(45*mbkin**14*(1 - mcMS/mbkin)**2) +
(36851821888*mcMS**15)/(6435*mbkin**15*(1 - mcMS/mbkin)**2) -
(17284092368*mcMS**16)/(6435*mbkin**16*(1 - mcMS/mbkin)**2) +
(3906097568*mcMS**17)/(3861*mbkin**17*(1 - mcMS/mbkin)**2) -
(3669719752*mcMS**18)/(12285*mbkin**18*(1 - mcMS/mbkin)**2) +
(9001646128*mcMS**19)/(135135*mbkin**19*(1 - mcMS/mbkin)**2) -
(4847541068*mcMS**20)/(459459*mbkin**20*(1 - mcMS/mbkin)**2) +
(347227064*mcMS**21)/(328185*mbkin**21*(1 - mcMS/mbkin)**2) -
(301396*mcMS**22)/(5967*mbkin**22*(1 - mcMS/mbkin)**2) +
(14120272*mcMS)/(264537*mbkin*(1 - mcMS/mbkin)) -
(468025720*mcMS**2)/(793611*mbkin**2*(1 - mcMS/mbkin)) +
(107374328*mcMS**3)/(29835*mbkin**3*(1 - mcMS/mbkin)) -
(12111649028*mcMS**4)/(765765*mbkin**4*(1 - mcMS/mbkin)) +
(804583036*mcMS**5)/(15015*mbkin**5*(1 - mcMS/mbkin)) -
(16770272*mcMS**6)/(117*mbkin**6*(1 - mcMS/mbkin)) +
(12012704*mcMS**7)/(39*mbkin**7*(1 - mcMS/mbkin)) -
(10420999024*mcMS**8)/(19305*mbkin**8*(1 - mcMS/mbkin)) +
(231726224*mcMS**9)/(297*mbkin**9*(1 - mcMS/mbkin)) -
(42146864*mcMS**10)/(45*mbkin**10*(1 - mcMS/mbkin)) +
(177061168*mcMS**11)/(189*mbkin**11*(1 - mcMS/mbkin)) -
(147579352*mcMS**12)/(189*mbkin**12*(1 - mcMS/mbkin)) +
(24329912*mcMS**13)/(45*mbkin**13*(1 - mcMS/mbkin)) -
(60252704*mcMS**14)/(195*mbkin**14*(1 - mcMS/mbkin)) +
(6495875552*mcMS**15)/(45045*mbkin**15*(1 - mcMS/mbkin)) -
(54136688*mcMS**16)/(1001*mbkin**16*(1 - mcMS/mbkin)) +
(5583616*mcMS**17)/(351*mbkin**17*(1 - mcMS/mbkin)) -
(738321352*mcMS**18)/(208845*mbkin**18*(1 - mcMS/mbkin)) +
(427470856*mcMS**19)/(765765*mbkin**19*(1 - mcMS/mbkin)) -
(348098812*mcMS**20)/(6235515*mbkin**20*(1 - mcMS/mbkin)) +
(301396*mcMS**21)/(113373*mbkin**21*(1 - mcMS/mbkin)) +
(968*mcMS)/(243*mbkin**4*(-1 + mcMS/mbkin)) - (968*mcMS**2)/
(243*mbkin**5*(-1 + mcMS/mbkin)) + (170800*mcMS**3)/
(243*mbkin**6*(-1 + mcMS/mbkin)) - (170800*mcMS**4)/
(243*mbkin**7*(-1 + mcMS/mbkin)) + (728*mcMS**5)/(mbkin**8*(-1 +
mcMS/mbkin)) - (728*mcMS**6)/(mbkin**9*(-1 + mcMS/mbkin)))*
np.log(mcMS**2/mbkin**2))*np.log(1 - mcMS/mbkin) +
((1790032*mcMS)/(675675*mbkin) + (222153992*mcMS**2)/
(868725*mbkin**2) - (993662224*mcMS**3)/(467775*mbkin**3) +
(2262297616*mcMS**4)/(280665*mbkin**4) - (2022928*mcMS**5)/
(105*mbkin**5) + (200330504*mcMS**6)/(6075*mbkin**6) -
(260772688*mcMS**7)/(6075*mbkin**7) + (615949856*mcMS**8)/
(14175*mbkin**8) - (2773168*mcMS**9)/(81*mbkin**9) +
(35346424*mcMS**10)/(1701*mbkin**10) - (405443344*mcMS**11)/
(42525*mbkin**11) + (1498143088*mcMS**12)/(467775*mbkin**12) -
(149130896*mcMS**13)/(200475*mbkin**13) + (2062888*mcMS**14)/
(19305*mbkin**14) - (8705456*mcMS**15)/(1216215*mbkin**15))*
np.log(1 - mcMS/mbkin)**2 + (304/(81*mbkin**3) + (160*mcMS**2)/
(9*mbkin**5) - (464*mcMS**4)/(27*mbkin**7) - (4400*mcMS**6)/
(81*mbkin**9) + (32*mcMS**8)/(3*mbkin**11) +
(1688*np.sqrt(0j + mcMS**2/mbkin**2))/(243*mbkin**3) -
(2824*mcMS**4*np.sqrt(0j + mcMS**2/mbkin**2))/(27*mbkin**7) -
(24976*(mcMS**2/mbkin**2)**(3/2))/(243*mbkin**3) -
(688*mcMS)/(243*mbkin**4*(1 + mcMS/mbkin)) - (688*mcMS**2)/
(243*mbkin**5*(1 + mcMS/mbkin)) + (1760*mcMS**3)/
(9*mbkin**6*(1 + mcMS/mbkin)) + (1760*mcMS**4)/
(9*mbkin**7*(1 + mcMS/mbkin)) + (1456*mcMS**5)/
(9*mbkin**8*(1 + mcMS/mbkin)) + (1456*mcMS**6)/
(9*mbkin**9*(1 + mcMS/mbkin)))*np.log(1 + mcMS/mbkin) +
(7904/(81*mbkin**3) + (2968*mbkin)/(243*mcMS**4) -
89848/(3645*mbkin*mcMS**2) - (53168*mcMS**2)/(243*mbkin**5) +
(16616*mcMS**4)/(729*mbkin**7) + (59320*mcMS**6)/(81*mbkin**9) -
(707344*mcMS**8)/(1215*mbkin**11) - (1000*np.sqrt(0j + mcMS**2/mbkin**2))/
(243*mbkin**3) - (1544*mcMS**4*np.sqrt(0j + mcMS**2/mbkin**2))/(27*mbkin**7) -
(22544*(mcMS**2/mbkin**2)**(3/2))/(243*mbkin**3))*
np.log(1 - mcMS**2/mbkin**2) + np.log(mcMS**2/mbkin**2)*(-4064/(81*mbkin**3) -
(16624*mcMS**2)/(81*mbkin**5) + (704*mcMS**2)/(9*mbkin**4) +
(1408*mcMS**2)/(9*mbkin**3) + (28194016*mcMS**2)/(264537*mbkin**2) +
(766608176*mcMS**3)/(793611*mbkin**3) + (64*mcMS**4)/(3*mbkin**8) -
(27908*mcMS**4)/(729*mbkin**7) - (11512*mcMS**4)/(9*mbkin**6) -
(60064*mcMS**4)/(27*mbkin**5) - (4711.275998320473*mcMS**4)/mbkin**4 +
(68321780792*mcMS**5)/(3357585*mbkin**5) - (22376*mcMS**6)/
(81*mbkin**9) + (4480*mcMS**6)/(9*mbkin**7) - (2844597796984*mcMS**6)/
(43648605*mbkin**6) + (6272042344856*mcMS**7)/(43648605*mbkin**7) -
(157016*mcMS**8)/(1215*mbkin**11) - (752*mcMS**8)/(9*mbkin**10) -
(3008*mcMS**8)/(9*mbkin**9) - (11623406689684*mcMS**8)/
(43648605*mbkin**8) + (5838076386728*mcMS**9)/(14549535*mbkin**9) -
(7138029219736*mcMS**10)/(14549535*mbkin**10) +
(2379988730648*mcMS**11)/(4849845*mbkin**11) -
(2504128296664*mcMS**12)/(6235515*mbkin**12) +
(1669676023736*mcMS**13)/(6235515*mbkin**13) -
(899165488072*mcMS**14)/(6235515*mbkin**14) + (385394431736*mcMS**15)/
(6235515*mbkin**15) - (899326358968*mcMS**16)/(43648605*mbkin**16) +
(224846820392*mcMS**17)/(43648605*mbkin**17) - (39681135608*mcMS**18)/
(43648605*mbkin**18) + (629890504*mcMS**19)/(6235515*mbkin**19) -
(602792*mcMS**20)/(113373*mbkin**20) + (3820703038853*mcMS**2)/
(10213773570*mbkin**2*(1 - mcMS/mbkin)**2) -
(1324623229305471443*mcMS**3)/(267396592062600*mbkin**3*
(1 - mcMS/mbkin)**2) + (5514257196652051*mcMS**4)/
(154653899400*mbkin**4*(1 - mcMS/mbkin)**2) -
(2445534345846504631*mcMS**5)/(14073504845400*mbkin**5*
(1 - mcMS/mbkin)**2) + | |
dtype=np.float32)
np.testing.assert_array_almost_equal(outpt, expected_outpt)
def test_leaky_relu(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
LeakyReluLayer(
name='leaky_relu',
xtype='ReLU',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
data=[],
subgraph=None,
attrs={
'alpha': 0.1
}
)
]
inputs = [np.reshape(
np.array([1, -1, 0, 4, -5, 1, 0, 8, 3,
-5, 1, 0, 1, 9, -3, -4],
dtype=np.float32),
(1, 1, 4, 4))]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.array([[[[1, -0.1, 0, 4], [-0.5, 1, 0, 8],
[3, -0.5, 1, 0], [1, 9, -0.3, -0.4]]]])
np.testing.assert_array_almost_equal(outpt, expected_outpt)
def test_maxpool_layer(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
PoolingLayer(
name='conv1',
shape=TensorShape([1, 1, 3, 3]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None,
op='Max',
ksize=[1, 1, 2, 2],
paddings=[[0, 0], [0, 0], [1, 1], [1, 1]],
strides=[1, 1, 2, 2],
attrs={
'data_layout': 'NCHW'
}
)
]
inputs = [np.reshape(np.array([[1, 1, 0, 4], [5, 1, 0, 8],
[3, 5, 1, 0], [1, 9, 3, 4]],
dtype=np.float32),
(1, 1, 4, 4))]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.array([[[[1, 1, 4], [5, 5, 8], [1, 9, 4]]]])
np.testing.assert_array_equal(outpt, expected_outpt)
def test_mean(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
MeanLayer(
name='mean',
xtype='Mean',
shape=TensorShape([1, 1, 1, 1]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
data=[],
subgraph=None,
attrs={
'axes': [0, 1, 2, 3],
'keepdims': True,
'exclude': False
}
)
]
data = np.reshape(np.array([[1, -1, 0, 4, -5, 1, 0, 8, 3,
-5, 1, 0, 1, 9, -3, -4]],
dtype=np.float32),
(1, 1, 4, 4))
inputs = [data]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.mean(data, axis=(0, 1, 2, 3), keepdims=True)
np.testing.assert_array_equal(outpt, expected_outpt)
def test_pad(self):
padding = ((0, 0), (0, 0), (0, 1), (0, 1))
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 2, 1, 1]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 1, 1])],
subgraph=None
),
PadLayer(
name='pad',
xtype='Pad',
shape=TensorShape([1, 2, 3, 3]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 1, 1])],
data=[],
subgraph=None,
attrs={
'padding': padding
}
)
]
inputs = {
'input': np.ones((1, 2, 1, 1), dtype=np.float32)
}
for layer in layers:
inpts = [inputs[name] for name in layer.inputs]
outpt = layer.forward_exec(inpts)
inputs[layer.name] = outpt
expected_outpt = np.pad(inputs['input'], padding, mode='constant')
np.testing.assert_array_almost_equal(outpt, expected_outpt)
def test_pool_no_division_layer(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
PoolingNoDivisionLayer(
name='pool1',
shape=TensorShape([1, 1, 3, 3]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None,
op='Avg',
ksize=[1, 1, 2, 2],
paddings=[[0, 0], [0, 0], [1, 1], [1, 1]],
strides=[1, 1, 2, 2],
attrs={
'data_layout': 'NCHW',
'op': 'Avg'
}
)
]
inputs = [np.reshape(np.array([[1, 1, 0, 4], [5, 1, 0, 8],
[3, 5, 1, 0], [1, 9, 3, 4]],
dtype=np.float32),
(1, 1, 4, 4))]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.array([[[[1, 1, 4], [8, 7, 8], [1, 12, 4]]]])
np.testing.assert_array_equal(outpt, expected_outpt)
def test_relu(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
ReluLayer(
name='relu1',
xtype='ReLU',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
data=[],
subgraph=None,
attrs={}
)
]
inputs = [np.reshape(
np.array([1, -1, 0, 4, -5, 1, 0, 8, 3,
-5, 1, 0, 1, 9, -3, -4],
dtype=np.float32),
(1, 1, 4, 4))]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.array([[[[1, 0, 0, 4], [0, 1, 0, 8],
[3, 0, 1, 0], [1, 9, 0, 0]]]])
np.testing.assert_array_equal(outpt, expected_outpt)
def test_relu6(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
Relu6Layer(
name='relu1',
xtype='ReLU6',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
data=[],
subgraph=None,
attrs={}
)
]
inputs = [np.reshape(
np.array([1, -1, 0, 4, -5, 1, 0, 8, 3,
-5, 1, 0, 1, 9, -3, -4],
dtype=np.float32),
(1, 1, 4, 4))]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.array([[[[1, 0, 0, 4], [0, 1, 0, 6],
[3, 0, 1, 0], [1, 6, 0, 0]]]])
np.testing.assert_array_equal(outpt, expected_outpt)
def test_scale(self):
G = np.array([0.5, 1.2], dtype=np.float32)
B = np.array([0.1, 0.05], dtype=np.float32)
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 2, 1, 1]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 1, 1])],
subgraph=None
),
ConstantLayer(
name='gamma',
shape=TensorShape([2]),
dtype='float32',
inputs=[],
input_shapes=[],
subgraph=None,
value=G
),
ConstantLayer(
name='beta',
shape=TensorShape([2]),
dtype='float32',
inputs=[],
input_shapes=[],
subgraph=None,
value=B
),
ScaleLayer(
name='scale',
shape=TensorShape([1, 2, 1, 1]),
dtype='float32',
inputs=['input', 'gamma', 'beta'],
input_shapes=[TensorShape([1, 2, 1, 1]),
TensorShape([2]),
TensorShape([2])],
subgraph=None,
attrs={
'axis': 1
},
gamma=None,
beta=None
)
]
inputs = {
'input': np.ones((1, 2, 1, 1), dtype=np.float32)
}
for layer in layers:
inpts = [inputs[name] for name in layer.inputs]
outpt = layer.forward_exec(inpts)
inputs[layer.name] = outpt
expected_outpt = (np.ones((1, 2, 1, 1), dtype=np.float32) *
np.reshape(G, (1, 2, 1, 1))) +\
np.reshape(B, (1, 2, 1, 1))
np.testing.assert_array_equal(outpt, expected_outpt)
def test_softmax_layer(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([16]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([16])],
subgraph=None
),
SoftmaxLayer(
name='softmax1',
xtype='Softmax',
shape=TensorShape([16]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([16])],
data=[],
subgraph=None,
attrs={}
)
]
inpt = np.array([1, -1, 0, 4, -5, 1, 0, 8, 3, -5, 1, 0, 1, 9, -3, -4],
dtype=np.float32)
inputs = [inpt]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = softmax(inpt)
np.testing.assert_array_almost_equal(outpt, expected_outpt)
def test_squeeze(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 1, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 1, 4])],
subgraph=None
),
SqueezeLayer(
name='squeeze',
xtype='Squeeze',
shape=TensorShape([1, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 1, 4])],
data=[],
subgraph=None,
attrs={
'axis': [1, 2]
}
)
]
inputs = [np.reshape(np.array([1, -1, 0, 4], dtype=np.float32),
(1, 1, 1, 4))]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.reshape(np.array([1, -1, 0, 4], dtype=np.float32),
(1, 4))
np.testing.assert_array_equal(outpt, expected_outpt)
def test_tanh(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
TanhLayer(
name='tanh1',
xtype='Tanh',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
data=[],
subgraph=None,
attrs={}
)
]
a = np.reshape(
np.array([1, -1, 0, 4, -5, 1, 0, 8, 3,
-5, 1, 0, 1, 9, -3, -4],
dtype=np.float32),
(1, 1, 4, 4))
inputs = [a]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.tanh(a)
np.testing.assert_array_almost_equal(outpt, expected_outpt)
def test_transpose(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
TransposeLayer(
name='transpose',
xtype='Transpose',
shape=TensorShape([1, 4, 4, 1]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 4, 4])],
data=[],
subgraph=None,
attrs={
'axes': [0, 2, 3, 1]
}
)
]
data = np.reshape(np.array([[1, -1, 0, 4, -5, 1, 0, 8, 3,
-5, 1, 0, 1, 9, -3, -4]],
dtype=np.float32),
(1, 1, 4, 4))
inputs = [data]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.transpose(data, (0, 2, 3, 1))
np.testing.assert_array_equal(outpt, expected_outpt)
def test_conv2d_transpose(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 1, 3, 3]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 3, 3])],
subgraph=None
),
Conv2DTransposeLayer(
name='trans_conv',
shape=TensorShape([1, 1, 6, 6]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 1, 3, 3])],
subgraph=None,
attrs={
'data_layout': 'NCHW'
},
kernel=np.reshape(
np.array([[[1, 1, 1], [1, 2, 1], [1, 1, 1]]],
dtype=np.float32),
(1, 1, 3, 3)),
kernel_layout='OIHW',
kernel_groups=1,
biases=np.array([0], dtype=np.float32),
paddings=[[0, 0], [0, 0], [0, 0], [0, 0]],
strides=[1, 1, 2, 2],
dilations=[1, 1, 1, 1]
)
]
data = np.reshape(np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.float32),
(1, 1, 3, 3))
# NOTE: VALID
# data1 = np.reshape(np.array([[1, 2], [3, 4]], dtype=np.float32),
# (1, 1, 2, 2))
inputs = [data]
for layer in layers:
outpt = layer.forward_exec(inputs)
inputs = [outpt]
expected_outpt = np.array([[[[1, 1, 3, 2, 5, 3],
[1, 2, 3, 4, 5, 6],
[5, 5, 12, 7, 16, 9],
[4, 8, 9, 10, 11, 12],
[11, 11, 24, 13, 28, 15],
[7, 14, 15, 16, 17, 18]]]],
dtype=np.float32)
np.testing.assert_array_equal(outpt, expected_outpt)
def test_tuple(self):
layers = [
InputLayer(
name='in1',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['in1'],
input_shapes=[TensorShape([1, 1, 4, 4])],
subgraph=None
),
InputLayer(
name='in2',
shape=TensorShape([1, 1, 4, 4]),
dtype='float32',
inputs=['in2'],
| |
#!/usr/bin/env python
"""MangaFrameExtraction.
Based on code created by 山田 祐雅
"""
from enum import Enum
from math import sqrt, atan, cos
import collections
import logging
import os
import attr
import cv2 as cv
from numpy import pi as CV_PI
from typing import List, Union, Optional
from cv import (
addWeighted as cvAddWeighted,
convertScaleAbs as cvConvertScaleAbs,
CreateImage as cvCreateImage,
CV_8U,
CV_GAUSSIAN,
CV_MAKE_TYPE,
destroyWindow as cvDestroyWindow,
imshow as cvShowImage,
Line as cvLine,
SaveImage as cvSaveImage,
Set as cvSet,
Smooth as cvSmooth,
Sobel as cvSobel,
THRESH_BINARY as CV_THRESH_BINARY,
Threshold as cvThreshold,
)
@attr.s
class CV_RGB:
red = attr.ib(default=0)
green = attr.ib(default=0)
blue = attr.ib(default=0)
@attr.s
class cvPoint:
x = attr.ib(default=0)
y = attr.ib(default=0)
class Mat:
def zeros(self, *args):
raise NotImplementedError
@attr.s
class CvSize:
width = attr.ib(default=0)
height = attr.ib(default=0)
class LineIterator:
def __init__(self, *args):
raise NotImplementedError
@property
def count(self):
raise NotImplementedError
def pos(self, *args):
raise NotImplementedError
COLOR_BLACK = CV_RGB(0, 0, 0)
COLOR_WHITE = CV_RGB(255, 255, 255)
AREA_THRESHOLD = 1
ADD_PAGEFRAME_WIDTH = 20
N_BIN = 45
THETA = (180 / N_BIN)
BLOCK_SIZE = 3
CELL_SIZE = 1
R = (CELL_SIZE * (BLOCK_SIZE)*0.5)
MARGIN = 1
NUM_SLC = 3
NUM_CANDIDATE = 10
# // 画素
# // Gaso
# // pixel
@attr.s
class PixPoint(collections.abc.Sequence):
x = attr.ib(default=0)
y = attr.ib(default=0)
def cvPoint(self):
return cvPoint(self.x, self.y)
@property
def size(self):
raise NotImplementedError
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
# // 分割線
# // Bunkatsu-sen
# // Partition line
class SL:
def __init__(self, is_horizontal: bool = True, position: int = 0, theta: int = 0, ig: float = 0.0, wpr: float = 0.0, Hw: float = 0.0, pixels: PixPoint = PixPoint(0)):
self.is_horizontal = is_horizontal
self.position = position
self.theta = theta
self.ig = ig
self.wpr = wpr
self.Hw = Hw
self.pixels = pixels
def resize(self, *args):
raise NotImplementedError
def at(self, *args):
raise NotImplementedError
class Response(Enum):
OK = 0
DROP_SLICE_SRC = 1
DROP_REMAINED_SRC = 2
INVALID_DIGREES = 3
INVASION_FRAMES = 4
@attr.s
class IplImage:
width = attr.ib(default=0)
height = attr.ib(default=0)
imageData: List = attr.ib(default=[])
widthStep = attr.ib(default=0)
nChannels = attr.ib(default=0)
separate_count = 0
def cvarrToMat(src):
raise NotImplementedError
def cvCloneImage(arg):
return arg.copy()
def cvReleaseImage(*args):
# compatibility for similarity to cpp code
pass
def cvScalarAll(*args):
raise NotImplementedError
def to_rad(*args):
raise NotImplementedError
def resize_pixels(*args):
raise NotImplementedError
class FrameSeparation:
def __init__(self, src, filename: Union[str, os.PathLike[str]], output_dir: Optional[str], original_size: int, rel_original_point):
"""init func.
Args:
src: source
filename (str): filename
output_dir (str): output dir
original_size (int): original size
rel_original_point: relative original point
"""
"""Original kwargs:
IplImage src, string filename, string output_dir, int original_size, PixPoint rel_original_point
"""
# // 元画像からの相対座標
# // Motogazō kara no sōtai zahyō
# // Relative coordinates from the original image
self.rel_original_point = PixPoint(x=0, y=0)
# // 分割線で切った画像の相対座標
# // bunkatsu-sen de kitta gazō no sōtai zahyō
# // Relative coordinates of the image cut by dividing line
self.rel_slice_point = PixPoint(x=0, y=0)
# // 分割線で切った残りの画像の相対座標
# // bunkatsu-sen de kitta nokori no gazō no sōtai zahyō
# // Relative coordinates of the remaining image cut by dividing line
self.rel_remained_point = PixPoint(x=0, y=0)
# // 元画像
# // Motogazō
# // The original image
# src: IplImage = IplImage()
# // 分割線で切った画像
# // bunkatsu-sen de kitta gazō
# // Image cut with parting line
self.slice_src: IplImage = IplImage()
# // 分割線で切った残りの画像
# // bunkatsu-sen de kitta nokori no gazō
# // The remaining image cut with parting line
self.remained_src: IplImage = IplImage()
# // 作業用画像
# // sagyō-yō gazō
# // Work image
self.proc_img: IplImage = IplImage()
# // 二値化画像
# // binary image
self.bin_img: IplImage = IplImage()
# // detect_pixels用
# dp_img = None
# // 分割候補線
# vector<SL> slc[2];
self.slc: List[SL] = []
# // x,y軸の各分割線候補の位置
# int sl_position[2];
self.sl_position = None
# // x,y軸の各分割線候補の評価値
# double sl_hw[2];
self.sl_hw = None
self.slice_line: SL = SL()
self.fs1_recursive = None
self.fs2_recursive = None
self.src = cvCloneImage(src)
# self.remained_src = cvCloneImage(remained_src)
self.proc_img = cvCloneImage(src)
self.bin_img = cvCloneImage(src)
# self.separate_count = separate_count
self.original_size = original_size
self.rel_original_point = rel_original_point
self.filename = filename
self.output_dir = output_dir
self.dp_img = cvarrToMat(self.src)
if not self.is_blank(self.src):
self.calculate_ig()
# // 最大長の2%分を走査から外す
# // Saidai-chō no 2-pāsento-bun o sōsa kara hazusu
# // Remove 2% of the maximum length from scanning
# // for cwgv
self.xi = min(src.width, src.height) * 0.02
logging.debug('xi: {}'.format(self.xi))
# NOTE:
logging.debug('==={}==='.format(separate_count))
self.cwgv()
self.dslc_hv()
self.slat()
if not self.sl_exists():
# // 斜めのコマを考慮する
# // Naname no koma o kōryo suru
# // Consider diagonal frames
self.dslc_o()
self.slat()
if not self.sl_exists():
if not self.is_blank(src):
self.save_image(src)
else:
separation_res = self.separation()
if separation_res == Response.DROP_SLICE_SRC:
if (self.remained_src.width * self.remained_src.height >= self.src.width * self.src.height * 0.95):
self.save_image(self.src)
self.fs1_recursive = FrameSeparation(self.remained_src, filename, output_dir, original_size, self.rel_remained_point)
del self.fs1_recursive
elif separation_res == Response.DROP_REMAINED_SRC:
if (self.slice_src.width * self.slice_src.height >= self.src.width * self.src.height * 0.95):
self.save_image(self.src)
self.fs1_recursive = FrameSeparation(self.slice_src, filename, output_dir, original_size, self.rel_slice_point)
del self.fs1_recursive
elif separation_res == Response.OK:
self.fs1_recursive = FrameSeparation(self.slice_src, filename, output_dir, original_size, self.rel_slice_point)
self.fs2_recursive = FrameSeparation(self.remained_src, filename, output_dir, original_size, self.rel_remained_point)
del self.fs1_recursive
del self.fs2_recursive
else:
separation_res = self.separation()
if separation_res == Response.DROP_SLICE_SRC:
self.fs1_recursive = FrameSeparation(self.remained_src, filename, output_dir, original_size, self.rel_remained_point)
del self.fs1_recursive
elif separation_res == Response.DROP_SLICE_SRC:
self.fs1_recursive = FrameSeparation(self.slice_src, filename, output_dir, original_size, self.rel_slice_point)
del self.fs1_recursive
elif separation_res == Response.OK:
self.fs1_recursive = FrameSeparation(self.slice_src, filename, output_dir, original_size, self.rel_slice_point)
self.fs2_recursive = FrameSeparation(self.remained_src, filename, output_dir, original_size, self.rel_remained_point)
del self.fs1_recursive
del self.fs2_recursive
def calculate_ending_point(self, is_horizontal: bool, position: int, length: int, theta: int)-> cvPoint:
return cvPoint(length, position + length * cos(to_rad(theta))) \
if is_horizontal else cvPoint(position + length * cos(to_rad(theta)), length)
def save_image(self, img):
"""save image.
Args:
img: image
"""
"""Original kwargs:
IplImage* img
"""
print("panel-region\nseparate count:{}\nrel original point(x, y):{},{}\nimg (width, height):{}, {}".format(
self.separate_count,
self.rel_original_point.x, self.rel_original_point.y,
img.width, img.height,
))
dst_path = os.path.join(self.output_dir, '{}_{}.jpg'.format(self.filename, self.separate_count))
cvSaveImage(dst_path, img)
self.separate_count += 1
def cwgv(self, show_image: bool = False):
"""Center Weighted concentration Gradient Value."""
# // 2値化
# // 2 Atai-ka
# // Binarization
binary = cvCloneImage(self.src)
cvThreshold(binary, binary, 120, 255, CV_THRESH_BINARY)
self.bin_img = cvCloneImage(binary)
self.proc_img = cvCloneImage(binary)
if show_image:
cvShowImage("[ cwgv ] bin_img", self.bin_img)
cv.waitKey(0)
cvReleaseImage(binary)
def dslc_hv(self):
"""Detection of Separate Line Candidate for horizontal and vertical direction."""
# // 分割線候補検出
# // Bunkatsu-sen kōho kenshutsu
# // Split line candidate detection
# // y軸の走査
# // Y-jiku no sōsa
# // Scan on y axis
self.calculate_slc(True)
self.calculate_wpr(True)
# // x軸の走査
# // x-jiku no sōsa
# // Scan on x axis
self.calculate_slc(False)
self.calculate_wpr(False)
def dslc_o(self):
"""Detection of Separate Line Candidate for oblique direction."""
# // 分割線候補検出
# // Bunkatsu-sen kōho kenshutsu
# // Detect candidate line candidate
# // y軸の走査
# // Y-jiku no sōsa
# // Scan on y axis
self.calculate_oblique_slc(True)
# self.calculate_oblique_wpr(True)
# // x軸の走査
# // x-jiku no sōsa
# // Scan on x axis
self.calculate_oblique_slc(False)
# self.calculate_oblique_wpr(False)
def invasion_test(self, is_horizontal: bool, position: int, length: int, theta: int)-> Response:
"""Judge inside / outside frame."""
# // inside / outside frame judgment
# // コマ内外判定
# // Koma naigai hantei
"""Original kwargs:
bool is_horizontal, int position, int length, int theta
"""
pixels: List[PixPoint] = []
try:
pixels = self.detect_pixels(is_horizontal, position, length, theta, pixels)
except Exception as err:
if err == Response.INVALID_DIGREES:
logging.debug("invalid digree")
is_left_black = False
is_right_black = False
width = 2 if theta == 90 else 3
count = 0
count_l = 0
count_r = 0
src = self.src
# pixels_size = pixels.size()
pixels_size = len(pixels)
for d in range(pixels_size):
if ((pixels[d].x + width >= src.width) or (pixels[d].x - width <= 0) or (pixels[d].y + width >= src.height) or (pixels[d].y - width <= 0)):
continue
is_left_black = False
is_right_black = False
if is_horizontal:
for i in range(width):
is_left_black = True if self.bin_img.imageData[(pixels[d].y + i) * self.bin_img.widthStep + pixels[d].x * self.bin_img.nChannels] < 127 else is_left_black
is_right_black = True if self.bin_img.imageData[(pixels[d].y - i) * self.bin_img.widthStep + pixels[d].x * self.bin_img.nChannels] < 127 else is_right_black
else:
for i in range(width):
is_left_black = True if self.bin_img.imageData[pixels[d].y * self.bin_img.widthStep + (pixels[d].x - i) * self.bin_img.nChannels] < 127 else is_left_black
is_right_black = True if self.bin_img.imageData[pixels[d].y * self.bin_img.widthStep + (pixels[d].x + i) * self.bin_img.nChannels] < 127 else is_right_black
if is_left_black and not is_right_black:
count_l += 1
if not is_left_black and is_right_black:
count_r += 1
count = max(count_l, count_r)
logging.debug("is_horizontal:{}, position:{}, theta:{}, invasion_test:{}".format(
is_horizontal, position, theta, count/length
))
return Response.OK if count / length > (0.55 if theta == 90 else 0.4) else | |
- - - - - -
result = indexers.subindex_list(
doubleton_dataset, index_keys=['a', 'b']
)
self.assertEqual(len(result), 5)
# [
# {
# 'indexed_items': [['a', 1], ['b', 1]],
# 'elements': [{'a': 1, 'b': 1}]
# },
# {
# 'indexed_items': [['a', 1], ['b', 3]],
# 'elements': [{'a': 1, 'b': 3}]
# },
# {
# 'indexed_items': [['a', 2], ['b', 1]],
# 'elements': [{'a': 2, 'b': 1}]
# },
# {
# 'indexed_items': [['a', 2], ['b', 2]],
# 'elements': [{'a': 2, 'b': 2}]
# },
# {
# 'indexed_items': [['a', 3], ['b', 2]],
# 'elements': [{'a': 3, 'b': 2}]
# }
# ]
self.assertEqual(result[0]['indexed_items'], (('a', 1), ('b', 1)))
self.assertEqual(result[1]['indexed_items'], (('a', 1), ('b', 3)))
self.assertEqual(result[2]['indexed_items'], (('a', 2), ('b', 1)))
self.assertEqual(result[3]['indexed_items'], (('a', 2), ('b', 2)))
self.assertEqual(result[4]['indexed_items'], (('a', 3), ('b', 2)))
self.assertEqual(result[0]['elements'], [{'a': 1, 'b': 1}])
self.assertEqual(result[1]['elements'], [{'a': 1, 'b': 3}])
self.assertEqual(result[2]['elements'], [{'a': 2, 'b': 1}])
self.assertEqual(result[3]['elements'], [{'a': 2, 'b': 2}])
self.assertEqual(result[4]['elements'], [{'a': 3, 'b': 2}])
# - - - - - - - - - - - - - - - -
result = indexers.subindex_list(
doubleton_dataset,
index_items={'a': [1, 2, 3], 'b': [1, 2, 3]}
)
self.assertEqual(len(result), 5)
# [
# {
# 'indexed_items': [['a', 1], ['b', 1]],
# 'elements': [{'a': 1, 'b': 1}]
# },
# {
# 'indexed_items': [['a', 1], ['b', 3]],
# 'elements': [{'a': 1, 'b': 3}]
# },
# {
# 'indexed_items': [['a', 2], ['b', 1]],
# 'elements': [{'a': 2, 'b': 1}]
# },
# {
# 'indexed_items': [['a', 2], ['b', 2]],
# 'elements': [{'a': 2, 'b': 2}]
# },
# {
# 'indexed_items': [['a', 3], ['b', 2]],
# 'elements': [{'a': 3, 'b': 2}]
# }
# ]
self.assertEqual(result[0]['indexed_items'], (('a', 1), ('b', 1)))
self.assertEqual(result[1]['indexed_items'], (('a', 1), ('b', 3)))
self.assertEqual(result[2]['indexed_items'], (('a', 2), ('b', 1)))
self.assertEqual(result[3]['indexed_items'], (('a', 2), ('b', 2)))
self.assertEqual(result[4]['indexed_items'], (('a', 3), ('b', 2)))
self.assertEqual(result[0]['elements'], [{'a': 1, 'b': 1}])
self.assertEqual(result[1]['elements'], [{'a': 1, 'b': 3}])
self.assertEqual(result[2]['elements'], [{'a': 2, 'b': 1}])
self.assertEqual(result[3]['elements'], [{'a': 2, 'b': 2}])
self.assertEqual(result[4]['elements'], [{'a': 3, 'b': 2}])
# - - - - - - - - - - - - - - - -
result = indexers.subindex_list(
doubleton_dataset,
index_items={'a': [1, 3, 2], 'b': [3, 2, 1]}
)
self.assertEqual(len(result), 5)
# [
# {
# 'indexed_items': [['a', 1], ['b', 1]],
# 'elements': [{'a': 1, 'b': 1}]
# },
# {
# 'indexed_items': [['a', 1], ['b', 3]],
# 'elements': [{'a': 1, 'b': 3}]
# },
# {
# 'indexed_items': [['a', 2], ['b', 1]],
# 'elements': [{'a': 2, 'b': 1}]
# },
# {
# 'indexed_items': [['a', 2], ['b', 2]],
# 'elements': [{'a': 2, 'b': 2}]
# },
# {
# 'indexed_items': [['a', 3], ['b', 2]],
# 'elements': [{'a': 3, 'b': 2}]
# }
# ]
self.assertEqual(result[0]['indexed_items'], (('a', 1), ('b', 3)))
self.assertEqual(result[1]['indexed_items'], (('a', 1), ('b', 1)))
self.assertEqual(result[2]['indexed_items'], (('a', 3), ('b', 2)))
self.assertEqual(result[3]['indexed_items'], (('a', 2), ('b', 2)))
self.assertEqual(result[4]['indexed_items'], (('a', 2), ('b', 1)))
self.assertEqual(result[0]['elements'], [{'a': 1, 'b': 3}])
self.assertEqual(result[1]['elements'], [{'a': 1, 'b': 1}])
self.assertEqual(result[2]['elements'], [{'a': 3, 'b': 2}])
self.assertEqual(result[3]['elements'], [{'a': 2, 'b': 2}])
self.assertEqual(result[4]['elements'], [{'a': 2, 'b': 1}])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_subindex_list_irregular_dataset(self):
'''Test subindex_list() with irregular_dataset.'''
irregular_dataset = [
{'a': 1, 'b': 1},
{'a': 2},
{'b': 2},
{'a': 2, 'b': 2},
{'a': 1, 'b': 3},
]
# - - - - - - - - - - - - - - - -
result = indexers.subindex_list(
irregular_dataset, index_keys=['a']
)
self.assertEqual(len(result), 2)
# [
# {
# 'indexed_items': [['a', 1]],
# 'elements': [{'a': 1, 'b': 1}, {'a': 1, 'b': 3}]
# },
# {
# 'indexed_items': [['a', 2]],
# 'elements': [{'a': 2}, {'a': 2, 'b': 2}]
# }
# ]
self.assertEqual(result[0]['indexed_items'], (('a', 1),))
self.assertEqual(result[1]['indexed_items'], (('a', 2),))
self.assertEqual(
result[0]['elements'],
[{'a': 1, 'b': 1}, {'a': 1, 'b': 3}]
)
self.assertEqual(
result[1]['elements'],
[{'a': 2}, {'a': 2, 'b': 2}]
)
# - - - - - - - - - - - - - - - -
result = indexers.subindex_list(
irregular_dataset, index_keys=['a', 'b']
)
self.assertEqual(len(result), 3)
# [
# {
# 'indexed_items': [['a', 1], ['b', 1]],
# 'elements': [{'a': 1, 'b': 1}]
# },
# {
# 'indexed_items': [['a', 1], ['b', 3]],
# 'elements': [{'a': 1, 'b': 3}]
# },
# {
# 'indexed_items': [['a', 2], ['b', 2]],
# 'elements': [{'a': 2, 'b': 2}]
# }
# ]
self.assertEqual(result[0]['indexed_items'], (('a', 1), ('b', 1)))
self.assertEqual(result[1]['indexed_items'], (('a', 1), ('b', 3)))
self.assertEqual(result[2]['indexed_items'], (('a', 2), ('b', 2)))
self.assertEqual(result[0]['elements'], [{'a': 1, 'b': 1}])
self.assertEqual(result[1]['elements'], [{'a': 1, 'b': 3}])
self.assertEqual(result[2]['elements'], [{'a': 2, 'b': 2}])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class TestCompoundSubindexList(unittest.TestCase):
'''Basic test cases for indexers.compound_subindex_list().'''
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_subindex_list_parameter_errors(self):
'''Test compound_subindex_list() function call parameter errors.'''
# with self.assertRaises(ValueError):
# indexers.compound_subindex_list([])
with self.assertRaises(ValueError):
indexers.compound_subindex_list(
[], indexers=[{'index_keys': []}]
)
with self.assertRaises(ValueError):
indexers.compound_subindex_list(
[], indexers=[{'index_keys': None, 'index_items': None}]
)
with self.assertRaises(ValueError):
indexers.compound_subindex_list(
[], indexers=[{'index_items': {}}]
)
with self.assertRaises(ValueError):
indexers.compound_subindex_list(
[], indexers=[{'index_keys': ['a'], 'index_items': {'a': 1}}]
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_compound_subindex_list_empty(self):
'''Test compound_subindex_list() with an empty list of dicts.'''
result = indexers.compound_subindex_list(
[], indexers=[{'index_keys': ['a']}]
)
self.assertEqual(result, [])
result = indexers.compound_subindex_list(
[], indexers=[{'index_items': {'a': [1]}}]
)
self.assertEqual(result, [])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_compound_subindex_list_singleton_dataset(self):
'''Test compound_subindex_list() with a single singleton dict.'''
singleton_dataset = [
{'a': 1}, {'a': 2}, {'a': 3}, {'a': 2}, {'a': 1},
]
compound_result = indexers.compound_subindex_list(
singleton_dataset, indexers=[{'index_keys': ['a']}]
)
simple_result = indexers.subindex_list(
singleton_dataset, index_keys=['a']
)
self.assertEqual(compound_result, simple_result)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_compound_subindex_list_doubleton_dataset(self):
'''Test compound_subindex_list() with a single doubleton dict.'''
doubleton_dataset = [
{'a': 1, 'b': 1},
{'a': 2, 'b': 1},
{'a': 3, 'b': 2},
{'a': 2, 'b': 2},
{'a': 1, 'b': 3},
]
# - - - - - - - - - - - - - - - -
result = indexers.compound_subindex_list(
doubleton_dataset, indexers=[{'index_keys': ['a']}]
)
simple_result = indexers.subindex_list(
doubleton_dataset, index_keys=['a']
)
self.assertEqual(result, simple_result)
# - - - - - - - - - - - - - - - -
result = indexers.compound_subindex_list(
doubleton_dataset, indexers=[{'index_keys': ['a', 'b']}]
)
simple_result = indexers.subindex_list(
doubleton_dataset, index_keys=['a', 'b']
)
self.assertEqual(result, simple_result)
# - - - - - - - - - - - - - - - -
result = indexers.compound_subindex_list(
doubleton_dataset,
indexers=[
{'index_keys': ['a']},
{'index_keys': ['b']}
]
)
# [
# {
# 'indexed_items': [['a', 1]],
# 'elements': [
# {
# 'indexed_items': [['b', 1]],
# 'elements': [{'a': 1, 'b': 1}]
# },
# {
# 'indexed_items': [['b', 3]],
# 'elements': [{'a': 1, 'b': 3}]
# }
# ]
# },
# {
# 'indexed_items': [['a', 2]],
# 'elements': [
# {
# 'indexed_items': [['b', 1]],
# 'elements': [{'a': 2, 'b': 1}]
# },
# {
# 'indexed_items': [['b', 2]],
# 'elements': [{'a': 2, 'b': 2}]
# }
# ]
# },
# {
# 'indexed_items': [['a', 3]],
# 'elements': [
# {
# 'indexed_items': [['b', 2]],
# 'elements': [{'a': 3, 'b': 2}]
# }
# ]
# }
# ]
self.assertEqual(len(result), 3)
self.assertEqual(len(result[0]['elements']), 2)
self.assertEqual(len(result[1]['elements']), 2)
self.assertEqual(len(result[2]['elements']), 1)
self.assertEqual(result[0]['indexed_items'], (('a', 1),))
self.assertEqual(result[1]['indexed_items'], (('a', 2),))
self.assertEqual(result[2]['indexed_items'], (('a', | |
'''
<NAME>, Action Recognition? A New Model and the Kinetics Dataset
arxiv: https://arxiv.org/abs/1705.07750
'''
import tensorflow as tf
from central_reservoir.utils.layers import linear
from central_reservoir.utils.layers import conv_batchnorm_relu
from central_reservoir.utils.layers import maxpool
from central_reservoir.utils.layers import avgpool
VALID_ENDPOINTS = (
'Conv3d_1_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Logits',
'Predictions',
)
def build_i3d(final_endpoint='Logits', use_batch_norm=False,
use_cross_replica_batch_norm=False, num_classes=101,
spatial_squeeze=True, dropout_keep_prob=1.0, num_cores=8):
if final_endpoint not in VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def model(inputs, is_training):
net = inputs
end_points = {}
print('Inputs: {}'.format(net.get_shape().as_list()))
# 7x7x7 Conv, stride 2
end_point = 'Conv3d_1a_7x7'
net = conv_batchnorm_relu(net, end_point, 64,
kernel_size=7, stride=2, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# 1x3x3 Max-pool, stride 1, 2, 2
end_point = 'MaxPool3d_2a_3x3'
net = maxpool(net, end_point, ksize=[1, 1, 3, 3, 1],
strides=[1, 1, 2, 2, 1], padding='SAME')
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# 1x1x1 Conv, stride 1
end_point = 'Conv3d_2b_1x1'
net = conv_batchnorm_relu(net, end_point, 64,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# 3x3x3 Conv, stride 1
end_point = 'Conv3d_2c_3x3'
net = conv_batchnorm_relu(net, end_point, 192,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# 1x3x3 Max-pool, stride 1, 2, 2
end_point = 'MaxPool3d_3a_3x3'
net = maxpool(net, end_point, ksize=[1, 1, 3, 3, 1],
strides=[1, 1, 2, 2, 1], padding='SAME')
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# Mixed 3b : Inception block
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
# 1x1x1 Conv, stride 1
branch_0 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 64,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_1'):
# 1x1x1 Conv, stride 1
branch_1 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 96,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_1 = conv_batchnorm_relu(branch_1, 'Conv3d_0b_3x3', 128,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_2'):
# 1x1x1 Conv, stride 1
branch_2 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 16,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_2 = conv_batchnorm_relu(branch_2, 'Conv3d_0b_3x3', 32,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_3'):
# 3x3x3 Max-pool, stride 1, 1, 1
branch_3 = maxpool(net, 'MaxPool3d_0a_3x3',
ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1],
padding='SAME')
# 1x1x1 Conv, stride 1
branch_3 = conv_batchnorm_relu(branch_3, 'Conv3d_0b_1x1', 32,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# Concat branch_[0-3]
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# Mixed 3c: Inception block
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
# 1x1x1 Conv, stride 1
branch_0 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 128,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_1'):
# 1x1x1 Conv, stride 1
branch_1 = conv_batchnorm_relu(net, 'Conv3d_0b_1x1', 128,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_1 = conv_batchnorm_relu(branch_1, 'Conv3d_0b_3x3', 192,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_2'):
# 1x1x1 Conv, stride 1
branch_2 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 32,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_2 = conv_batchnorm_relu(branch_2, 'Conv3d_0b_3x3', 96,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_3'):
# 3x3x3 Max-Pool, stride 1, 1, 1
branch_3 = maxpool(net, 'MaxPool3d_0a_3x3',
ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1],
padding='SAME')
# 1x1x1 Conv, stide 1
branch_3 = conv_batchnorm_relu(branch_3, 'Conv3d_0b_1x1', 64,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# Concat branch_[0-3]
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# 3x3x3 Max-pool, stride 2, 2, 2
end_point = 'MaxPool3d_4a_3x3'
net = maxpool(net, end_point, ksize=[1, 3, 3, 3, 1],
strides=[1, 2, 2, 2, 1], padding='SAME')
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# Mixed 4b: Inception block
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
# 1x1x1 Conv, stride 1
branch_0 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 192,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_1'):
# 1x1x1 Conv, stride 1
branch_1 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 96,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_1 = conv_batchnorm_relu(branch_1, 'Conv3d_0b_3x3', 208,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_2'):
# 1x1x1 Conv, stride 1
branch_2 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 16,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_2 = conv_batchnorm_relu(branch_2, 'Conv3d_0b_3x3', 48,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_3'):
# 3x3x3 Max-pool, stride 1, 1, 1
branch_3 = maxpool(net, 'MaxPool3d_0a_3x3',
ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1],
padding='SAME')
# 1x1x1 Conv, stride 1
branch_3 = conv_batchnorm_relu(branch_3, 'Conv3d_0b_1x1', 64,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# Concat branch_[0-3]
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# Mixed 4c: Inception block
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
# 1x1x1 Conv, stride 1
branch_0 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 160,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_1'):
# 1x1x1 Conv, stride 1
branch_1 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 112,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_1 = conv_batchnorm_relu(branch_1, 'Conv3d_0b_3x3', 224,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_2'):
# 1x1x1 Conv, stride 1
branch_2 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 24,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_2 = conv_batchnorm_relu(branch_2, 'Conv3d_0b_3x3', 64,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_3'):
# 3x3x3 Max-pool, stride 1, 1, 1
branch_3 = maxpool(net, 'MaxPool3d_0a_3x3',
ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1],
padding='SAME')
# 1x1x1 Conv, stride 1
branch_3 = conv_batchnorm_relu(branch_3, 'Conv3d_0b_1x1', 64,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# Concat branch_[0-3]
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# Mixed 4d: Inception block
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
# 1x1x1 Conv, stride 1
branch_0 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 128,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_1'):
# 1x1x1 Conv, stride 1
branch_1 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 128,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_1 = conv_batchnorm_relu(branch_1, 'Conv3d_0b_3x3', 256,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_2'):
# 1x1x1 Conv, stride 1
branch_2 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 24,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_2 = conv_batchnorm_relu(branch_2, 'Conv3d_0b_3x3', 64,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_3'):
# 3x3x3 Max-pool, stride 1, 1, 1
branch_3 = maxpool(net, 'MaxPool3d_0a_3x3',
ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1],
padding='SAME')
# 1x1x1 Conv, stride 1
branch_3 = conv_batchnorm_relu(branch_3, 'Conv3d_0b_1x1', 64,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# Concat branch_[0-3]
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
get_shape = net.get_shape().as_list()
print('{} : {}'.format(end_point, get_shape))
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
# Mixed 4e: Inception block
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
# 1x1x1 Conv, stride 1
branch_0 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 112,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_1'):
# 1x1x1 Conv, stride 1
branch_1 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 144,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_1 = conv_batchnorm_relu(branch_1, 'Conv3d_0b_3x3', 288,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_2'):
# 1x1x1 Conv, stride 1
branch_2 = conv_batchnorm_relu(net, 'Conv3d_0a_1x1', 32,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# 3x3x3 Conv, stride 1
branch_2 = conv_batchnorm_relu(branch_2, 'Conv3d_0b_3x3', 64,
kernel_size=3, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
with tf.variable_scope('Branch_3'):
# 3x3x3 Max-pool, stride 1, 1, 1
branch_3 = maxpool(net, 'MaxPool3d_0a_3x3',
ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1],
padding='SAME')
# 1x1x1 Conv, stride 1
branch_3 = conv_batchnorm_relu(branch_3, 'Conv3d_0b_1x1', 64,
kernel_size=1, stride=1, is_training=is_training, num_cores=num_cores,
use_batch_norm=use_batch_norm, use_cross_replica_batch_norm=use_cross_replica_batch_norm)
# Concat branch_[0-3]
net = tf.concat([branch_0, | |
known to not work, but the similar query
# with 'f3 ge x' instead of 'f3 like x' does work.
# See test_06_index_field_condition_and_or_and_03()
# in WhereClause_evaluate_node_resultTC.
self._enr(
"f2 gt o and lt q or f1 gt a and lt c and f3 like x",
[
dict(down=1),
dict(up=0, right=2, field="f2", condition="gt", value="o"),
dict(
up=0,
right=3,
left=1,
operator="and",
field="f2",
condition="lt",
value="q",
),
dict(
up=0,
right=4,
left=2,
operator="or",
field="f1",
condition="gt",
value="a",
),
dict(
up=0,
right=5,
left=3,
operator="and",
field="f1",
condition="lt",
value="c",
),
dict(
up=0,
left=4,
operator="and",
field="f3",
condition="like",
value="x",
),
],
{1, 3},
)
class Where_evaluateTC(unittest.TestCase):
def setUp(self):
self.processors = Processors(
{"f"},
{"f1", "f2", "f3", "f4", "f5", "f6"},
[
{
"f": ["0"],
"f1": ["b"],
"f2": ["m", "n"],
"f3": ["a"],
"f4": ["a"],
"f5": ["de"],
},
{
"f": ["1"],
"f1": ["c"],
"f2": ["n", "o"],
"f3": ["ab"],
"f4": ["xa"],
},
{
"f": ["2"],
"f1": ["d"],
"f2": ["o", "p"],
"f3": ["b"],
"f4": ["xax"],
"f5": ["de"],
},
{
"f": ["3"],
"f1": ["e"],
"f2": ["p", "q"],
"f3": ["ba"],
"f4": ["axa"],
},
{
"f": ["4"],
"f1": ["f"],
"f2": ["q", "r"],
"f3": ["bc"],
"f4": ["ax"],
"f5": ["df"],
},
{
"f": ["5"],
"f1": ["g"],
"f2": ["s", "t"],
"f3": ["1"],
"f4": ["b"],
},
{
"f": ["6"],
"f1": ["h"],
"f2": ["u", "v"],
"f3": ["12"],
"f4": ["xb"],
"f5": ["df"],
},
{
"f": ["7"],
"f1": ["i"],
"f2": ["w", "x"],
"f3": ["2"],
"f4": ["xbx"],
},
{
"f": ["8"],
"f1": ["j"],
"f2": ["y", "z"],
"f3": ["21"],
"f4": ["xbax"],
"f5": ["dg"],
},
{
"f": ["9"],
"f1": ["k"],
"f2": ["l", "l"],
"f3": ["23"],
"f4": ["x"],
},
],
)
def tearDown(self):
pass
def test____assumptions(self):
self.assertEqual(self.processors.non_indexed_fields, {"f"})
self.assertEqual(
self.processors.indexed_fields,
{"f1", "f2", "f3", "f4", "f5", "f6"},
)
self.assertEqual(
self.processors.records,
[
{
"f": ["0"],
"f1": ["b"],
"f2": ["m", "n"],
"f3": ["a"],
"f4": ["a"],
"f5": ["de"],
},
{
"f": ["1"],
"f1": ["c"],
"f2": ["n", "o"],
"f3": ["ab"],
"f4": ["xa"],
},
{
"f": ["2"],
"f1": ["d"],
"f2": ["o", "p"],
"f3": ["b"],
"f4": ["xax"],
"f5": ["de"],
},
{
"f": ["3"],
"f1": ["e"],
"f2": ["p", "q"],
"f3": ["ba"],
"f4": ["axa"],
},
{
"f": ["4"],
"f1": ["f"],
"f2": ["q", "r"],
"f3": ["bc"],
"f4": ["ax"],
"f5": ["df"],
},
{
"f": ["5"],
"f1": ["g"],
"f2": ["s", "t"],
"f3": ["1"],
"f4": ["b"],
},
{
"f": ["6"],
"f1": ["h"],
"f2": ["u", "v"],
"f3": ["12"],
"f4": ["xb"],
"f5": ["df"],
},
{
"f": ["7"],
"f1": ["i"],
"f2": ["w", "x"],
"f3": ["2"],
"f4": ["xbx"],
},
{
"f": ["8"],
"f1": ["j"],
"f2": ["y", "z"],
"f3": ["21"],
"f4": ["xbax"],
"f5": ["dg"],
},
{
"f": ["9"],
"f1": ["k"],
"f2": ["l", "l"],
"f3": ["23"],
"f4": ["x"],
},
],
)
self.assertEqual(self.processors.existence, set(range(10)))
self.assertEqual(self.processors.get_existence(), set(range(10)))
def _ev(self, query, expected_answer):
ae = self.assertEqual
w = where.Where(query)
w.lex()
w.parse()
ae(
w.validate(self.processors.database, self.processors.filename),
None,
)
ae(w.evaluate(self.processors), None)
ae(w._processors, None)
ae(adjust(expected_answer), w.node.result.answer)
def test_01_like_01(self):
self._ev("f1 like a", set())
self._ev("f1 like b", {0})
self._ev("f2 like p", {2, 3})
self._ev("f3 like a", {0, 1, 3})
self._ev("f3 like a\Z", {0, 3})
self._ev("f4 like a", {0, 1, 2, 3, 4, 8})
self._ev("f4 like a\Z", {0, 1, 3})
self._ev("f4 like \Aa", {0, 3, 4})
self._ev("f4 like \Aa\Z", {0})
self._ev("f4 like b", {5, 6, 7, 8})
self._ev("f4 like b\Z", {5, 6})
self._ev("f4 like \Ab", {5})
self._ev("f4 like \Ab\Z", {5})
def test_01_like_02_not(self):
self._ev("not f1 like a", {0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("not f1 like b", {1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("not f2 like p", {0, 1, 4, 5, 6, 7, 8, 9})
self._ev("not f3 like a", {2, 4, 5, 6, 7, 8, 9})
self._ev("not f3 like a\Z", {1, 2, 4, 5, 6, 7, 8, 9})
self._ev("not f4 like a", {5, 6, 7, 9})
self._ev("not f4 like a\Z", {2, 4, 5, 6, 7, 8, 9})
self._ev("not f4 like \Aa", {1, 2, 5, 6, 7, 8, 9})
self._ev("not f4 like \Aa\Z", {1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("not f4 like b", {0, 1, 2, 3, 4, 9})
self._ev("not f4 like b\Z", {0, 1, 2, 3, 4, 7, 8, 9})
self._ev("not f4 like \Ab", {0, 1, 2, 3, 4, 6, 7, 8, 9})
self._ev("not f4 like \Ab\Z", {0, 1, 2, 3, 4, 6, 7, 8, 9})
def test_01_like_03_not(self):
self._ev("f1 not like a", {0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("f1 not like b", {1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("f2 not like p", {0, 1, 4, 5, 6, 7, 8, 9})
self._ev("f3 not like a", {2, 4, 5, 6, 7, 8, 9})
self._ev("f3 not like a\Z", {1, 2, 4, 5, 6, 7, 8, 9})
self._ev("f4 not like a", {5, 6, 7, 9})
self._ev("f4 not like a\Z", {2, 4, 5, 6, 7, 8, 9})
self._ev("f4 not like \Aa", {1, 2, 5, 6, 7, 8, 9})
self._ev("f4 not like \Aa\Z", {1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("f4 not like b", {0, 1, 2, 3, 4, 9})
self._ev("f4 not like b\Z", {0, 1, 2, 3, 4, 7, 8, 9})
self._ev("f4 not like \Ab", {0, 1, 2, 3, 4, 6, 7, 8, 9})
self._ev("f4 not like \Ab\Z", {0, 1, 2, 3, 4, 6, 7, 8, 9})
def test_01_like_04_not_not(self):
self._ev("not f1 not like a", set())
self._ev("not f1 not like b", {0})
self._ev("not f2 not like p", {2, 3})
self._ev("not f3 not like a", {0, 1, 3})
self._ev("not f3 not like a\Z", {0, 3})
self._ev("not f4 not like a", {0, 1, 2, 3, 4, 8})
self._ev("not f4 not like a\Z", {0, 1, 3})
self._ev("not f4 not like \Aa", {0, 3, 4})
self._ev("not f4 not like \Aa\Z", {0})
self._ev("not f4 not like b", {5, 6, 7, 8})
self._ev("not f4 not like b\Z", {5, 6})
self._ev("not f4 not like \Ab", {5})
self._ev("not f4 not like \Ab\Z", {5})
def test_01_like_05_parentheses(self):
self._ev("( f1 like a )", set())
self._ev("( f1 like b )", {0})
self._ev("( f2 like p )", {2, 3})
self._ev("( f3 like a )", {0, 1, 3})
self._ev("( f3 like a\Z )", {0, 3})
self._ev("( f4 like a )", {0, 1, 2, 3, 4, 8})
self._ev("( f4 like a\Z )", {0, 1, 3})
self._ev("( f4 like \Aa )", {0, 3, 4})
self._ev("( f4 like \Aa\Z )", {0})
self._ev("( f4 like b )", {5, 6, 7, 8})
self._ev("( f4 like b\Z )", {5, 6})
self._ev("( f4 like \Ab )", {5})
self._ev("( f4 like \Ab\Z )", {5})
def test_01_like_06_not_parentheses_01(self):
self._ev("not ( f1 like a )", {0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("not ( f1 like b )", {1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("not ( f2 like p )", {0, 1, 4, 5, 6, 7, 8, 9})
self._ev("not ( f3 like a )", {2, 4, 5, 6, 7, 8, 9})
self._ev("not ( f3 like a\Z )", {1, 2, 4, 5, 6, 7, 8, 9})
self._ev("not ( f4 like a )", {5, 6, 7, 9})
self._ev("not ( f4 like a\Z )", {2, 4, 5, 6, 7, 8, 9})
self._ev("not ( f4 like \Aa )", {1, 2, 5, 6, 7, 8, 9})
self._ev("not ( f4 like \Aa\Z )", {1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("not ( f4 like b )", {0, 1, 2, 3, 4, 9})
self._ev("not ( f4 like b\Z )", {0, 1, 2, 3, 4, 7, 8, 9})
self._ev("not ( f4 like \Ab )", {0, 1, 2, 3, 4, 6, 7, 8, 9})
self._ev("not ( f4 like \Ab\Z )", {0, 1, 2, 3, 4, 6, 7, 8, 9})
def test_01_like_06_not_parentheses_02(self):
self._ev("( not f1 like a )", {0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("( not f1 like b )", {1, 2, 3, 4, 5, 6, 7, 8, 9})
self._ev("( not f2 like p )", {0, 1, 4, 5, 6, 7, 8, 9})
self._ev("( not f3 like a )", {2, 4, 5, 6, 7, 8, 9})
self._ev("( not f3 like a\Z )", {1, 2, 4, 5, 6, 7, 8, 9})
self._ev("( not f4 like a )", {5, 6, 7, 9})
self._ev("( not f4 like a\Z )", | |
tab.
"""
if self.cur_device is not None:
self.TabIndex[tab]()
def updateAllTabs(self):
for tab in self.TabIndex:
self.TabIndex[tab]()
def updateCurrentTab(self):
log.debug("updateCurrentTab()")
self.TabIndex[self.Tabs.currentIndex()]()
# ***********************************************************************************
#
# DEVICE ICON LIST/DEVICE UPDATE(S)
#
# ***********************************************************************************
def DeviceRefreshAction_activated(self):
self.DeviceRefreshAction.setEnabled(False)
self.requestDeviceUpdate()
self.DeviceRefreshAction.setEnabled(True)
def RefreshAllAction_activated(self):
self.rescanDevices()
def setDeviceListViewMode(self, mode):
if mode == QListView.ListMode:
self.DeviceList.setViewMode(QListView.ListMode)
self.ViewAsListAction.setEnabled(False)
self.ViewAsIconsAction.setEnabled(True)
else:
self.DeviceList.setViewMode(QListView.IconMode)
self.ViewAsListAction.setEnabled(True)
self.ViewAsIconsAction.setEnabled(False)
def createDeviceIcon(self, dev=None):
if dev is None:
dev = self.cur_device
try:
dev.icon
except AttributeError:
dev.icon = "default_printer"
try:
self.device_icons[dev.icon]
except:
self.device_icons[dev.icon] = load_pixmap(dev.icon, 'devices')
pix = self.device_icons[dev.icon]
w, h = pix.width(), pix.height()
error_state = dev.error_state
icon = QPixmap(w, h)
p = QPainter(icon)
p.eraseRect(0, 0, icon.width(), icon.height())
p.drawPixmap(0, 0, pix)
try:
tech_type = dev.tech_type
except AttributeError:
tech_type = TECH_TYPE_NONE
if dev.device_type == DEVICE_TYPE_FAX:
p.drawPixmap(w - self.fax_icon.width(), 0, self.fax_icon)
if error_state != ERROR_STATE_CLEAR:
if tech_type in (TECH_TYPE_COLOR_INK, TECH_TYPE_MONO_INK):
status_icon = getStatusOverlayIcon(error_state)[0] # ink
else:
status_icon = getStatusOverlayIcon(error_state)[1] # laser
if status_icon is not None:
p.drawPixmap(0, 0, status_icon)
p.end()
return icon
def refreshDeviceList(self):
global devices
log.debug("Rescanning device list...")
if 1:
beginWaitCursor()
self.updating = True
self.setWindowTitle(self.__tr("Refreshing Device List - HP Device Manager"))
self.statusBar().showMessage(self.__tr("Refreshing device list..."))
self.cups_devices = device.getSupportedCUPSDevices(['hp', 'hpfax'])
current = None
try:
adds = []
for d in self.cups_devices:
if d not in device_list:
adds.append(d)
log.debug("Adds: %s" % ','.join(adds))
removals = []
for d in device_list:
if d not in self.cups_devices:
removals.append(d)
log.debug("Removals (1): %s" % ','.join(removals))
updates = []
for d in device_list:
if d not in adds and d not in removals:
updates.append(d)
log.debug("Updates: %s" % ','.join(updates))
for d in adds:
log.debug("adding: %s" % d)
# Note: Do not perform any I/O with this device.
dev = device.Device(d, service=self.service, disable_dbus=False)
if not dev.supported:
log.debug("Unsupported model - removing device.")
removals.append(d)
continue
icon = self.createDeviceIcon(dev)
if dev.device_type == DEVICE_TYPE_FAX:
DeviceViewItem(self.DeviceList, self.__tr("%s (Fax)"%dev.model_ui),
icon, d)
else:
if dev.fax_type:
DeviceViewItem(self.DeviceList, self.__tr("%s (Printer)"%dev.model_ui),
icon, d)
else:
DeviceViewItem(self.DeviceList, dev.model_ui,
icon, d)
device_list[d] = dev
log.debug("Removals (2): %s" % ','.join(removals))
removed_device=None
for d in removals:
removed_device = d
index = self.DeviceList.count()-1
item = self.DeviceList.item(index)
log.debug("removing: %s" % d)
try:
del device_list[d]
except KeyError:
pass
while index >= 0 and item is not None:
if item.device_uri == d:
self.DeviceList.takeItem(index)
break
index -= 1
item = self.DeviceList.item(index)
qApp.processEvents()
self.DeviceList.updateGeometry()
qApp.processEvents()
if len(device_list):
for tab in self.TabIndex:
self.Tabs.setTabEnabled(tab, True)
if self.cur_device_uri:
index = 0
item = first_item = self.DeviceList.item(index)
while item is not None:
qApp.processEvents()
if item.device_uri == self.cur_device_uri:
current = item
self.statusBar().showMessage(self.cur_device_uri)
break
index += 1
item = self.DeviceList.item(index)
else:
self.cur_device = None
self.cur_device_uri = ''
if self.cur_device is None:
i = self.DeviceList.item(0)
if i is not None:
self.cur_device_uri = i.device_uri
self.cur_device = device_list[self.cur_device_uri]
current = i
self.updatePrinterCombos()
if self.cur_device_uri:
#user_conf.set('last_used', 'device_uri',self.cur_device_uri)
self.user_settings.last_used_device_uri = self.cur_device_uri
self.user_settings.save()
for d in updates + adds:
if d not in removals:
self.requestDeviceUpdate(device_list[d])
else: # no devices
self.cur_device = None
self.DeviceRefreshAction.setEnabled(False)
self.RemoveDeviceAction.setEnabled(False)
#self.DiagnoseQueueAction.setEnabled(False)
self.updating = False
self.statusBar().showMessage(self.__tr("Press F6 to refresh."))
for tab in self.TabIndex:
self.Tabs.setTabEnabled(tab, False)
endWaitCursor()
dlg = NoDevicesDialog(self)
dlg.exec_()
finally:
self.updating = False
endWaitCursor()
if current is not None:
self.DeviceList.setCurrentItem(current)
self.DeviceRefreshAction.setEnabled(True)
if self.cur_device is not None:
self.RemoveDeviceAction.setEnabled(True)
#self.DiagnoseQueueAction.setEnabled(True)
self.statusBar().showMessage(self.cur_device_uri)
self.updateWindowTitle()
def updateWindowTitle(self):
if self.cur_device.device_type == DEVICE_TYPE_FAX:
self.setWindowTitle(self.__tr("HP Device Manager - %s (Fax)"%self.cur_device.model_ui))
else:
if self.cur_device.fax_type:
self.setWindowTitle(self.__tr("HP Device Manager - %s (Printer)"%self.cur_device.model_ui))
else:
self.setWindowTitle(self.__tr("HP Device Manager - %s"%self.cur_device.model_ui))
self.statusBar().showMessage(self.cur_device_uri)
def updateDeviceByURI(self, device_uri):
return self.updateDevice(self.findDeviceByURI(device_uri))
def updateDevice(self, dev=None, update_tab=True):
""" Update the device icon and currently displayed tab.
"""
if dev is None:
dev = self.cur_device
log.debug("updateDevice(%s)" % dev.device_uri)
item = self.findItem(dev)
if item is not None:
item.setIcon(QIcon(self.createDeviceIcon(dev)))
if dev is self.cur_device and update_tab:
self.updatePrinterCombos()
self.updateCurrentTab()
self.statusBar().showMessage(self.cur_device_uri)
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
self.Tabs.setTabText(self.Tabs.indexOf(self.Settings), QApplication.translate("MainWindow", "Print Settings", None))
self.Tabs.setTabText(self.Tabs.indexOf(self.Control), QApplication.translate("MainWindow", "Printer Control", None))
else:
self.Tabs.setTabText(self.Tabs.indexOf(self.Settings), QApplication.translate("MainWindow", "Fax Settings", None))
self.Tabs.setTabText(self.Tabs.indexOf(self.Control), QApplication.translate("MainWindow", "Fax Control", None))
def DeviceList_currentChanged(self, i, j):
if i is not None and not self.updating:
self.cur_device_uri = self.DeviceList.currentItem().device_uri
self.cur_device = device_list[self.cur_device_uri]
#user_conf.set('last_used', 'device_uri', self.cur_device_uri)
self.user_settings.last_used_device_uri = self.cur_device_uri
self.user_settings.save()
self.updateDevice()
self.updateWindowTitle()
def findItem(self, dev):
if dev is None:
dev = self.cur_device
return self.findItemByURI(dev.device_uri)
def findItemByURI(self, device_uri):
index = 0
item = self.DeviceList.item(index)
while item is not None:
if item.device_uri == device_uri:
return item
index += 1
item = self.DeviceList.item(index)
def findDeviceByURI(self, device_uri):
try:
return device_list[device_uri]
except:
return None
def requestDeviceUpdate(self, dev=None, item=None):
""" Submit device update request to update thread. """
if dev is None:
dev = self.cur_device
if dev is not None:
dev.error_state = ERROR_STATE_REFRESHING
self.updateDevice(dev, update_tab=False)
self.sendMessage(dev.device_uri, '', EVENT_DEVICE_UPDATE_REQUESTED)
def rescanDevices(self):
""" Rescan and update all devices. """
if not self.updating:
self.RefreshAllAction.setEnabled(False)
try:
self.refreshDeviceList()
finally:
self.RefreshAllAction.setEnabled(True)
def callback(self):
qApp.processEvents()
# ***********************************************************************************
#
# DEVICE LIST RIGHT CLICK
#
# ***********************************************************************************
def DeviceList_customContextMenuRequested(self, p):
d = self.cur_device
if d is not None:
avail = d.device_state != DEVICE_STATE_NOT_FOUND and d.supported
printer = d.device_type == DEVICE_TYPE_PRINTER and avail
fax = d.fax_type > FAX_TYPE_NONE and prop.fax_build and d.device_type == DEVICE_TYPE_FAX and \
sys.hexversion >= 0x020300f0 and avail
scan = d.scan_type > SCAN_TYPE_NONE and prop.scan_build and \
printer and self.user_settings.cmd_scan
cpy = d.copy_type > COPY_TYPE_NONE and printer
popup = QMenu(self)
item = self.DeviceList.currentItem()
if item is not None:
if self.cur_device.error_state != ERROR_STATE_ERROR:
if printer:
popup.addAction(self.__tr("Print..."), lambda: self.contextMenuFunc(PrintDialog(self, self.cur_printer)))
if scan:
popup.addAction(self.__tr("Scan..."), lambda: self.contextMenuFunc(self.user_settings.cmd_scan)) #self.ScanButton_clicked)
if cpy:
popup.addAction(self.__tr("Make Copies..."), lambda: MakeCopiesDialog(self, self.cur_device_uri)) #self.MakeCopiesButton_clicked)
else: # self.cur_device.device_type == DEVICE_TYPE_FAX:
if fax:
popup.addAction(self.__tr("Send Fax..."), lambda: self.contextMenuFunc(SendFaxDialog(self, self.cur_printer, self.cur_device_uri))) #self.SendFaxButton_clicked)
popup.addSeparator()
if not self.updating:
popup.addAction(self.__tr("Refresh Device"), self.requestDeviceUpdate) #self.DeviceRefreshAction_activated)
if not self.updating:
popup.addAction(self.__tr("Refresh All"), self.rescanDevices) #self.RefreshAllAction_activated)
popup.addSeparator()
if self.DeviceList.viewMode() == QListView.IconMode:
popup.addAction(self.__tr("View as List"), lambda: self.setDeviceListViewMode(QListView.ListMode))
else:
popup.addAction(self.__tr("View as Icons"), lambda: self.setDeviceListViewMode(QListView.IconMode))
popup.exec_(self.DeviceList.mapToGlobal(p))
def contextMenuFunc(self, f):
self.sendMessage('', '', EVENT_DEVICE_STOP_POLLING)
try:
try:
f.exec_() # Dialog
except AttributeError:
beginWaitCursor()
if f.split(':')[0] in ('http', 'https', 'file'):
log.debug("Opening browser to: %s" % f)
utils.openURL(f)
else:
self.runExternalCommand(f)
QTimer.singleShot(1000, self.unlockClick)
finally:
self.sendMessage('', '', EVENT_DEVICE_START_POLLING)
# ***********************************************************************************
#
# PRINTER NAME COMBOS
#
# ***********************************************************************************
def updatePrinterCombos(self):
self.PrintSettingsPrinterNameCombo.clear()
self.PrintControlPrinterNameCombo.clear()
if self.cur_device is not None and \
self.cur_device.supported:
self.cur_device.updateCUPSPrinters()
for c in self.cur_device.cups_printers:
self.PrintSettingsPrinterNameCombo.insertItem(0, c)
self.PrintControlPrinterNameCombo.insertItem(0, c)
self.cur_printer = to_unicode(self.PrintSettingsPrinterNameCombo.currentText())
def PrintSettingsPrinterNameCombo_activated(self, s):
self.cur_printer = to_unicode(s)
self.updateCurrentTab()
def PrintControlPrinterNameCombo_activated(self, s):
self.cur_printer = to_unicode(s)
self.updateCurrentTab()
# ***********************************************************************************
#
# FUNCTIONS/ACTION TAB
#
# ***********************************************************************************
def initActionsTab(self):
self.click_lock = None
self.ActionsList.setIconSize(QSize(32, 32))
self.ActionsList.itemClicked["QListWidgetItem *"].connect(self.ActionsList_clicked)
self.ActionsList.itemDoubleClicked["QListWidgetItem *"].connect(self.ActionsList_clicked)
def updateActionsTab(self):
beginWaitCursor()
try:
self.ActionsList.clear()
d = self.cur_device
if d is not None:
avail = d.device_state != DEVICE_STATE_NOT_FOUND and d.supported
fax = d.fax_type > FAX_TYPE_NONE and prop.fax_build and d.device_type == DEVICE_TYPE_FAX and \
sys.hexversion >= 0x020300f0 and avail
printer = d.device_type == DEVICE_TYPE_PRINTER and avail
scan = d.scan_type > SCAN_TYPE_NONE and prop.scan_build and \
printer and self.user_settings.cmd_scan
cpy = d.copy_type > COPY_TYPE_NONE and printer
req_plugin = d.plugin == PLUGIN_REQUIRED
opt_plugin = d.plugin == PLUGIN_OPTIONAL
try:
back_end, is_hp, bus, model, serial, dev_file, host, zc, port = \
device.parseDeviceURI(self.cur_device_uri)
except Error:
return
hplip_conf = configparser.ConfigParser()
fp = open("/etc/hp/hplip.conf", "r")
hplip_conf.readfp(fp)
fp.close()
try:
plugin_installed = utils.to_bool(hplip_conf.get("hplip", "plugin"))
except configparser.NoOptionError:
plugin_installed = False
if d.plugin != PLUGIN_NONE:
if req_plugin and plugin_installed:
x = self.__tr("Download and install<br>required plugin (already installed).")
elif req_plugin and not plugin_installed:
x = self.__tr("Download and install<br>required plugin (needs installation).")
elif opt_plugin and plugin_installed:
x = self.__tr("Download and install<br>optional plugin (already installed).")
elif opt_plugin and not plugin_installed:
x = self.__tr("Download and install<br>optional plugin (needs installation).")
else:
x = ''
# TODO: Cache this data structure
# -- add a field that specifies if the icon should always show, or only when device is avail.
# TODO: Tooltips
# TODO: Right-click icon/list view menu
self.ICONS = [
# PRINTER
(lambda : printer,
self.__tr("Print"), # Text
"print", # Icon
self.__tr("Print documents or files."), # Tooltip
lambda : PrintDialog(self, self.cur_printer)), # command/action
(lambda :scan,
self.__tr("Scan"),
"scan",
self.__tr("Scan a document, image, or photograph.<br>"),
self.user_settings.cmd_scan),
(lambda : cpy,
self.__tr("Make Copies"),
"makecopies",
self.__tr("Make copies on the device controlled by the PC.<br>"),
lambda : MakeCopiesDialog(self, self.cur_device_uri)),
# FAX
(lambda: fax,
self.__tr("Send Fax"),
"fax",
self.__tr("Send a fax from the PC."),
lambda : SendFaxDialog(self, self.cur_printer, self.cur_device_uri)),
(lambda: fax,
self.__tr("Fax Setup"),
"fax_setup",
self.__tr("Fax support must be setup before you can send faxes."),
lambda : FaxSetupDialog(self, self.cur_device_uri)),
(lambda: fax and self.user_settings.cmd_fab,
self.__tr("Fax Address Book"),
"fab",
self.__tr("Setup fax phone numbers to | |
from time import sleep
import os
import requests
import xmltodict as x2d
from datetime import datetime
import time
import sys
from os.path import join as pjoin
itt = 1
def append_path(function):
for r,d,f in os.walk(os.path.normpath(os.getcwd()+os.sep+os.pardir)):
for files in f:
if files == function+'.py' or files == function+'.pyc':
url = os.path.join(r,files)
url = url.rsplit(files)[0]
return url
sys.path.append(append_path('SepFunc'))
import SepFunc as sf
sys.path.append(append_path('dnsdiscover'))
import dnsdiscover as dns
sys.path.append(append_path('ServerPath'))
import ServerPath as sp
sys.path.append(append_path('Config'))
import Config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.normpath(os.getcwd()+os.sep+os.pardir)+'/logs.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def certificate(function):
for r,d,f in os.walk(os.path.normpath(os.getcwd()+os.sep+os.pardir)):
for files in f:
if files == function+'.pem':
url = os.path.join(r,files)
return url
def DRresponse_path(function):
for r,d,f in os.walk(os.path.normpath(os.getcwd()+os.sep+os.pardir)):
for files in f:
if files == function+'.xml' or files == function+'.exi':
url = os.path.join(r,files)
url = url.rsplit(function)[0]
return url
def ClientCert(function):
for r,d,f in os.walk(os.path.normpath(os.getcwd()+os.sep+os.pardir)):
for files in f:
if files == function+'.crt':
Crturl = os.path.join(r,files)
if files == function+'.key':
Keyurl = os.path.join(r,files)
return Crturl, Keyurl
def discovery(ip, port, path, sFDI):
print 'https://' + ip + ':' + port + path
r = requests.get('https://' + ip + ':' + port + path, verify = certificate('TAserver'), cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout=3)
sleep(3)
if r.status_code == 200 :
try:
EndDeviceList = r.text
doc = x2d.parse(EndDeviceList)
if int(doc['EndDeviceList']['@all'])==1:
if doc['EndDeviceList']['EndDevice']['sFDI'] == str(sFDI):
RegistrationLink = doc['EndDeviceList']['EndDevice']['RegistrationLink']['@href']
FunctionSetAssignmentsListLink = doc['EndDeviceList']['EndDevice']['FunctionSetAssignmentsListLink']['@href']
LoadShedAvailabilityLink = doc['EndDeviceList']['EndDevice']['LoadShedAvailabilityLink']['@href']
return RegistrationLink, FunctionSetAssignmentsListLink, LoadShedAvailabilityLink
else:
for i in range(int(doc['EndDeviceList']['@all'])):
if doc['EndDeviceList']['EndDevice'][i]['sFDI'] == str(sFDI):
RegistrationLink = doc['EndDeviceList']['EndDevice'][i]['RegistrationLink']['@href']
FunctionSetAssignmentsListLink = doc['EndDeviceList']['EndDevice'][i]['FunctionSetAssignmentsListLink']['@href']
LoadShedAvailabilityLink = doc['EndDeviceList']['EndDevice'][i]['LoadShedAvailabilityLink']['@href']
return RegistrationLink, FunctionSetAssignmentsListLink, LoadShedAvailabilityLink
except:
raise
print " server is not reposnding with given URI or SFDI is not correct"
def registery(ip, port, path, sFDI, RegistrationLink):
r = requests.get('https://' + ip + ':' + port + RegistrationLink, verify = certificate('TAserver'),\
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
try:
if r.status_code == 200 :
Registration = r.text
doc = x2d.parse(Registration)
dateTimeRegistered = int(doc['Registration']['dateTimeRegistered'])
pIN = int(doc['Registration']['pIN'])
except:
print " server is not responding! "
return pIN
def AssignmentLink(mRID, FunctionSetAssignmentsListLink):
mRID = mRID.encode('hex')
mRID = mRID.upper()
try:
r = requests.get('https://' + ip + ':' + port + FunctionSetAssignmentsListLink, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
FSAL = r.text
doc = x2d.parse(FSAL)
if int(doc['FunctionSetAssignmentsList']['@all'])==1:
if doc['FunctionSetAssignmentsList']['FunctionSetAssignments']['mRID'] == mRID:
url = doc['FunctionSetAssignmentsList']['FunctionSetAssignments']['@href']
r = requests.get('https://' + ip + ':' + port + url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
FSA = r.text
dox = x2d.parse(FSA)
DemandResponseProgramListLink = dox['FunctionSetAssignments']['DemandResponseProgramListLink']['@href']
DERProgramListLink = dox['FunctionSetAssignments']['DERProgramListLink']['@href']
ResponseSetListLink = dox['FunctionSetAssignments']['ResponseSetListLink']['@href']
UsagePointListLink = dox['FunctionSetAssignments']['UsagePointListLink']['@href']
else:
for i in range(int(doc['FunctionSetAssignmentsList']['@all'])):
if doc['FunctionSetAssignmentsList']['FunctionSetAssignments'][i]['mRID'] == mRID:
url = doc['FunctionSetAssignmentsList']['FunctionSetAssignments'][i]['@href']
r = requests.get('https://' + ip + ':' + port + url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
FSA = r.text
dox = x2d.parse(FSA)
DemandResponseProgramListLink = dox['FunctionSetAssignments']['DemandResponseProgramListLink']['@href']
DERProgramListLink = dox['FunctionSetAssignments']['DERProgramListLink']['@href']
ResponseSetListLink = dox['FunctionSetAssignments']['ResponseSetListLink']['@href']
UsagePointListLink = dox['FunctionSetAssignments']['UsagePointListLink']['@href']
return url, DemandResponseProgramListLink, DERProgramListLink, ResponseSetListLink, UsagePointListLink
except:
print " Server is not running or mRID is wrong! "
pass
def ActiveEndDeviceLink(id1, id2):
_href = sp.Path3(append_path('Server'), 'dr', id1, 'actedc')[1]
_all = 1
ActiveEndDeviceControlListLink = sf.ActiveEndDeviceControlListLink_FUNC(_href, _all)
return ActiveEndDeviceControlListLink
def EndDeviceControlLL(id1, id2):
_href = sp.Path3(append_path('Server'), 'dr', id1, 'edc')[1]
_all = 1
EndDeviceControlListLink = sf.EndDeviceControlListLink_FUNC(_href, _all)
return EndDeviceControlListLink
def DemandResponseP(doc, mRID, id1, id2, i):
_href_Dr = sp.Path2(append_path('Server'), 'dr', id1)[1]
_mRID = mRID
_description = 'GREAT-DR round# ' + str(i)
_version = int(doc['DemandResponseProgram']['version'])
_ActiveEndDeviceControlListLink = ActiveEndDeviceLink(id1, id2)
_availabilityUpdatePercentChangeThreshold = int(doc['DemandResponseProgram']['availabilityUpdatePercentChangeThreshold'])
_multiplier = int(doc['DemandResponseProgram']['availabilityUpdatePowerChangeThreshold']['multiplier'])
_value = int(doc['DemandResponseProgram']['availabilityUpdatePowerChangeThreshold']['value'])
_availabilityUpdatePowerChangeThreshold = 1 # TODO: wipe
_EndDeviceControlListLink = EndDeviceControlLL(id1, id2)
_primacy = int(doc['DemandResponseProgram']['primacy'])
DemandResponseProgram = sf.DemandResponseProgram_FUNC(_href_Dr, _mRID, _description, _version, _ActiveEndDeviceControlListLink, _availabilityUpdatePercentChangeThreshold, _multiplier, _value, _availabilityUpdatePowerChangeThreshold, _EndDeviceControlListLink, _primacy)
#print (DemandResponseProgram.toxml(element_name='DemandResponseProgram'))
with open(pjoin(sp.Path2(append_path('Server'), 'dr', id1)[0], 'DemandResponseProgram.xml'), 'w') as f:
f.write(DemandResponseProgram.toDOM().toprettyxml())
JAVApath = 'java' #'/usr/java/jdk1.8.0_171/bin/java'
Enginepathe = pjoin(append_path('Server'), 'ExiProcessor\ExiProcessor.jar')
XMLpath = pjoin(sp.Path2(append_path('Server'), 'dr', id1)[0], 'DemandResponseProgram.xml')
EXIpath = pjoin(sp.Path2(append_path('Server'), 'dr', id1)[0], 'DemandResponseProgram.exi')
XSDpath = pjoin(append_path('Server'), 'sep.xsd')
#XML2EXI(JAVApath, Enginepathe, XMLpath, EXIpath, XSDpath)
return DemandResponseProgram
def EndDeviceC(doc, mRID, id1, id2, i):
_href = sp.Path4(append_path('Server'), 'dr', id1, 'edc', id2)[1]
_replyTo = sp.Path4(append_path('Server'), 'rsps', id1, 'rsp', id2)[1]
ReplyTo = doc['EndDeviceControl']['@replyTo']
_responseRequired = doc['EndDeviceControl']['@responseRequired']
_mRID = mRID
_description = 'GREAT-DR round# ' + str(i)
_version = 0
_subscribable = int(doc['EndDeviceControl']['@subscribable'])
_currentStatus = int(doc['EndDeviceControl']['EventStatus']['currentStatus']) # 0 = Scheduled, 1 = Active, 2 = Cancelled, 3 = Cancelled with Randomization, 4 = Superseded
_dateTime = int(time.mktime(datetime.now().timetuple()))
if doc['EndDeviceControl']['EventStatus']['potentiallySuperseded'] == 'false':
_potentiallySuperseded = 0
else:
_potentiallySuperseded = 1
_potentiallySupersededTime = int(doc['EndDeviceControl']['EventStatus']['potentiallySupersededTime'])
_reason = doc['EndDeviceControl']['EventStatus']['reason']
_creationTime = int(doc['EndDeviceControl']['creationTime'])
_duration = int(doc['EndDeviceControl']['interval']['duration'])
_start = int(doc['EndDeviceControl']['interval']['start'])
_randomizeDuration = int(doc['EndDeviceControl']['randomizeDuration'])
_randomizeStart = int(doc['EndDeviceControl']['randomizeStart'])
_ApplianceLoadReductionType = int(doc['EndDeviceControl']['ApplianceLoadReduction']['type'])
_DeviceCategoryType = '\x00\x00\x00\x00'
if doc['EndDeviceControl']['drProgramMandatory'] == 'false':
_drProgramMandatory = 0
else:
_drProgramMandatory = 1
_DutyCycleValue = int(doc['EndDeviceControl']['DutyCycle']['normalValue'])
if doc['EndDeviceControl']['loadShiftForward']== 'false':
_loadShiftForward = 0
else:
_loadShiftForward = 1
_coolingOffset = int(doc['EndDeviceControl']['Offset']['coolingOffset'])
_heatingOffset = int(doc['EndDeviceControl']['Offset']['heatingOffset'])
_loadAdjustmentPercentageOffset = int(doc['EndDeviceControl']['Offset']['loadAdjustmentPercentageOffset'])
_overrideDuration = int(doc['EndDeviceControl']['overrideDuration'])
_coolingSetpoint = int(doc['EndDeviceControl']['SetPoint']['coolingSetpoint'])
_heatingSetpoint = int(doc['EndDeviceControl']['SetPoint']['heatingSetpoint'])
_ReductionType = int(doc['EndDeviceControl']['TargetReduction']['type'])
_ReductionValue = int(doc['EndDeviceControl']['TargetReduction']['value'])
EndDeviceControl = sf.EndDeviceControl_FUNC(_href, _replyTo, _responseRequired, _mRID, _description, _version, _subscribable, _currentStatus, _dateTime, _potentiallySuperseded, _potentiallySupersededTime,\
_reason, _creationTime, _duration, _start, _randomizeDuration, _randomizeStart, _ApplianceLoadReductionType, _DeviceCategoryType, _drProgramMandatory, _DutyCycleValue, _loadShiftForward,\
_coolingOffset, _heatingOffset, _loadAdjustmentPercentageOffset, _overrideDuration, _coolingSetpoint, _heatingSetpoint, _ReductionType, _ReductionValue)
#print (EndDeviceControl.toxml(element_name='EndDeviceControl'))
with open(pjoin(sp.Path4(append_path('Server'), 'dr', id1, 'edc', id2)[0], 'EndDeviceControl.xml'), 'w') as f:
f.write(EndDeviceControl.toDOM(parent=None, element_name='EndDeviceControl').toprettyxml())
JAVApath = 'java' #'/usr/java/jdk1.8.0_171/bin/java'
Enginepathe = pjoin(append_path('Server'),'ExiProcessor\ExiProcessor.jar')
XMLpath = pjoin(sp.Path4(append_path('Server'), 'dr', id1, 'edc', id2)[0], 'EndDeviceControl.xml')
EXIpath = pjoin(sp.Path4(append_path('Server'), 'dr', id1, 'edc', id2)[0], 'EndDeviceControl.exi')
XSDpath = pjoin(append_path('Server'), 'sep.xsd')
#XML2EXI(JAVApath, Enginepathe, XMLpath, EXIpath, XSDpath)
return _href, _replyTo, ReplyTo
def GetDR(ip, port, DemandResponseProgramListLink, mRID, id1, id2, EDCtlTimeStamp, HEMSCmRID, DrNum, EdevNum, itt):
mRIDX = mRID.encode('hex')
mRIDX = mRIDX.upper()
r = requests.get('https://' + ip + ':' + port + DemandResponseProgramListLink, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
DemandResponseProgramList = r.text
doc = x2d.parse(DemandResponseProgramList)
DemandResponseLinks = []
if int(doc['DemandResponseProgramList']['@all']) == 1:
for item in doc['DemandResponseProgramList']['DemandResponseProgram']:
mrid = doc['DemandResponseProgramList']['DemandResponseProgram']['mRID']
if mrid == mRIDX:
url = ((doc['DemandResponseProgramList']['DemandResponseProgram'])['@href']).replace('\\','/')
r = requests.get('https://' + ip + ':' + port + url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
DemandResponseProgram = r.text
DRP = x2d.parse(DemandResponseProgram)
DemandResponseProgram = DemandResponseP(DRP, HEMSCmRID, DrNum, EdevNum, itt)
url = ((doc['DemandResponseProgramList']['DemandResponseProgram'])['EndDeviceControlListLink']['@href']).replace('\\','/')
r = requests.get('https://' + ip + ':' + port + url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
EndDeviceControlList = r.text
EndDeviceCtlList = x2d.parse(EndDeviceControlList)
if int(EndDeviceCtlList['EndDeviceControlList']['@all']) == 1:
url = (EndDeviceCtlList['EndDeviceControlList']['EndDeviceControl']['@href']).replace('\\','/')
r = requests.get('https://' + ip + ':' + port + url , verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
EndDeviceCtl = r.text
EndDeviceCtl = x2d.parse(EndDeviceCtl)
creationTime = int(EndDeviceCtl['EndDeviceControl']['creationTime'])
if creationTime != EDCtlTimeStamp:
_href, _replyTo, ReplyTo = EndDeviceC(EndDeviceCtl, HEMSCmRID, DrNum, EdevNum, itt)
logger.info("New EndDevice is created at " + _href )
print "New EndDevice is created at " , _href
DemandResponseLinks.append((_href, _replyTo, ReplyTo))
else:
print "No new EndDeviceControl! "
EDCtlTimeStamp = creationTime
else:
for j in range(int(EndDeviceCtlList['EndDeviceControlList']['@all'])):
url = ((EndDeviceCtlList['EndDeviceControlList']['EndDeviceControl'])[j]['@href']).replace('\\','/')
r = requests.get('https://' + ip + ':' + port + url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
EndDeviceCtl = r.text
EndDeviceCtl = x2d.parse(EndDeviceCtl)
creationTime = int(EndDeviceCtl['EndDeviceControl']['creationTime'])
if creationTime != EDCtlTimeStamp:
_href, _replyTo, ReplyTo = EndDeviceC(EndDeviceCtl, HEMSCmRID, DrNum, EdevNum, itt)
logger.info("New EndDevice is created at " + _href )
print "New EndDevice is created at " , _href
DemandResponseLinks.append((_href, _replyTo, ReplyTo))
else:
print "No new EndDeviceControl! "
EDCtlTimeStamp = creationTime
else:
for j in range(int(doc['DemandResponseProgramList']['@all'])):
mrid = (doc['DemandResponseProgramList']['DemandResponseProgram'])[j]['mRID']
if mrid == mRIDX:
url = ((doc['DemandResponseProgramList']['DemandResponseProgram'])[j]['@href']).replace('\\','/')
r = requests.get('https://' + ip + ':' + port + url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
DemandResponseProgram = r.text
DRP = x2d.parse(DemandResponseProgram)
DemandResponseProgram = DemandResponseP(DRP, HEMSCmRID, DrNum, EdevNum, itt)
url = ((doc['DemandResponseProgramList']['DemandResponseProgram'])[j]['EndDeviceControlListLink']['@href']).replace('\\','/')
r = requests.get('https://' + ip + ':' + port + url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
EndDeviceControlList = r.text
EndDeviceCtlList = x2d.parse(EndDeviceControlList)
if int(EndDeviceCtlList['EndDeviceControlList']['@all']) == 1:
url = (EndDeviceCtlList['EndDeviceControlList']['EndDeviceControl']['@href']).replace('\\','/')
r = requests.get('https://' + ip + ':' + port +url, verify = certificate('TAserver'), \
cert = (ClientCert('CA')[0],ClientCert('CA')[1]), auth=('username', 'password'), timeout = 3)
sleep(3)
EndDeviceCtl = r.text
EndDeviceCtl = x2d.parse(EndDeviceCtl)
creationTime = int(EndDeviceCtl['EndDeviceControl']['creationTime'])
if creationTime != EDCtlTimeStamp:
_href, _replyTo, ReplyTo = EndDeviceC(EndDeviceCtl, HEMSCmRID, DrNum, EdevNum, itt)
logger.info("New EndDevice | |
# Built-in
import os
import warnings
# Common
import numpy as np
import scipy.constants as scpct
from scipy.interpolate import BSpline
import matplotlib.pyplot as plt
# specific
from . import _fit12d_funccostjac as _funccostjac
__all__ = [
'fit1d_extract',
'fit2d_extract',
]
# Think this through again:
# automatically load all ?
# width => Ti?
# shift => vi?
_D3 = {
'bck_amp': {
'types': ['x'],
'unit': 'a.u.',
'field': 'bck_amp',
},
'bck_rate': {
'types': ['x'],
'unit': 'a.u.',
'field': 'bck_rate',
},
'amp': {
'types': ['x', 'lines'],
'units': 'a.u.',
'field': 'amp',
},
'width': {
'types': ['x', 'lines'],
'units': 'a.u.',
'field': 'width',
},
'shift': {
'types': ['x', 'lines'],
'units': 'a.u.',
'field': 'shift',
},
'ratio': {
'types': ['lines'],
'units': 'a.u.',
'field': 'amp',
},
'Ti': {
'types': ['lines'],
'units': 'eV',
'field': 'width',
},
'vi': {
'types': ['x'],
'units': 'm.s^-1',
'field': 'shift',
},
'dratio': {
'types': ['x'],
'units': 'a.u.',
'field': 'dratio',
},
'dshift': {
'types': ['x'],
'units': 'a.u.',
'field': 'dshift',
},
}
_ALLOW_PICKLE = True
###########################################################
###########################################################
#
# Extract data from pre-computed dict of fitted results
#
###########################################################
###########################################################
def fit12d_get_data_checkformat(
dfit=None,
bck=None,
amp=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
sol_total=None,
sol_detail=None,
sol_pts=None,
phi_prof=None,
phi_npts=None,
vs_nbs=None,
allow_pickle=None,
):
# ----------------
# load file if str
if isinstance(dfit, str):
if not os.path.isfile(dfit) or not dfit[-4:] == '.npz':
msg = ("Provided dfit must be either a dict or "
+ "the absolute path to a saved .npz\n"
+ " You provided: {}".format(dfit))
raise Exception(msg)
if allow_pickle is None:
allow_pickle = _ALLOW_PICKLE
dfit = dict(np.load(dfit, allow_pickle=allow_pickle))
_rebuild_dict(dfit)
# ----------------
# check dfit basic structure
lk = ['dprepare', 'dinput', 'dind', 'sol_x', 'jac', 'scales']
c0 = isinstance(dfit, dict) and all([ss in dfit.keys() for ss in lk])
if not isinstance(dfit, dict):
msg = ("\ndfit must be a dict with at least the following keys:\n"
+ "\t- {}\n".format(lk)
+ "\t- provided: {}".format(dfit))
raise Exception(msg)
# ----------------
# Identify if fit1d or fit2d
is2d = 'nbs' in dfit['dinput'].keys()
if is2d is True:
if 'symmetry' not in dfit['dinput'].keys():
msg = "dfit is a fit2d but does not have key 'symmetry'!"
raise Exception(msg)
if dfit['dinput']['symmetry']:
c0 = dfit['dinput'].get('symmetry_axis', False) is False
if c0:
msg = "dfit is a fit2d but does not have key 'symmetry_axis'!"
raise Exception(msg)
else:
phi_prof = False
# ----------------
# Extract dinput and dprepare (more readable)
dinput = dfit['dinput']
dprepare = dfit['dinput']['dprepare']
# ----------------
# ratio
if ratio is None:
ratio = False
if ratio is not False:
amp = ['lines', 'x']
if ratio is not False:
lkeys = dfit['dinput']['keys']
if isinstance(ratio, tuple):
ratio = [ratio]
lc = [
isinstance(ratio, list)
and all([isinstance(tt, tuple) and len(tt) == 2 for tt in ratio]),
isinstance(ratio, np.ndarray)
and ratio.ndim == 2
and ratio.shape[0] == 2
]
msg = (
"\nArg ratio (spectral lines magnitude ratio) must be either:\n"
"\t- False: no line ration computed\n"
"\t- tuple of len=2: upper and lower keys of the lines\n"
"\t- list of tuple of len=2: upper and lower keys pairs\n"
"\t- np.ndarray of shape (2, N): upper keys and lower keys\n"
f" Available keys: {lkeys}\n"
f" Provided: {ratio}\n"
)
if not any(lc):
warnings.warn(msg)
ratio = False
elif lc[0]:
ratio = np.atleast_2d(ratio).T
# Remove ratio using non-aviailable lines
indokratio = np.array([
ratio[0, ii] in lkeys and ratio[1, ii] in lkeys
for ii in range(ratio.shape[1])
])
if np.any(indokratio):
ratio = ratio[:, indokratio]
else:
ratio = False
# ----------------
# Check / format amp, Ti, vi
# check if double
isdouble = dfit['dinput']['double']
d3 = {k0: dict(v0) for k0, v0 in _D3.items()}
lval = [
[bck, 'bck_amp'], [bck, 'bck_rate'],
[amp, 'amp'], [width, 'width'], [shift, 'shift'],
[ratio, 'ratio'], [Ti, 'Ti'], [vi, 'vi'],
[isdouble, 'dratio'], [isdouble, 'dshift'],
]
for (v0, k0) in lval:
if v0 is None or v0 is True:
d3[k0]['requested'] = _D3[k0]['types']
else:
d3[k0]['requested'] = v0
# remove non-requested
lout = [k0 for k0, v0 in d3.items() if v0['requested'] is False]
for k0 in lout:
del d3[k0]
# ----------------
# amp, Ti, vi from d3
lkkeys = ['amp', 'width', 'shift', 'Ti', 'vi']
for k0 in d3.keys():
if k0 == 'ratio':
v0 = d3[k0]['types']
else:
v0 = d3[k0]['requested']
# basic conformity check
if isinstance(v0, str):
v0 = [v0]
d3[k0]['requested'] = v0
c0 = (
k0 != 'ratio'
and isinstance(v0, list)
and all([isinstance(ss, str) for ss in v0])
)
if not (k0 == 'ratio' or c0):
msg = (
f"Arg {k0} must be a list of str!\n"
f"Provided: {v0}"
)
raise Exception(msg)
# check if trying to get all/some lines and / or all/some x
ltypes = d3[k0]['types']
c0 = all([ss in ltypes for ss in v0]), # all lines/x
c1 = (
not c0
and 'lines' in ltypes
and all([ss in dinput['keys'] for ss in v0]), # some lines
)
c2 = (
not c0
and not c1
and 'x' in ltypes
and all([ss in dinput[k0]['keys'] for ss in v0]), # some x
)
if not any([c0, c1, c2]):
msg = (
f"Arg {k0} elements must be either:\n"
f"\t- 'x': return all unique {k0}\n"
f"\t- 'lines': return {k0} for all lines (inc. duplicates)\n"
"\t- str: a key in:\n"
f"\t\t lines: {dinput['keys']}\n"
f"\t\t variables: {dinput[d3[k0][1]]['keys']}\n\n"
f"Provided: {d3[k0][0]}"
)
raise Exception(msg)
if c0:
# 'lines' and/or 'x'
for k1 in v0:
if k0 in lkkeys:
if k1 == 'lines':
keys = dinput['keys']
else:
keys = dinput[d3[k0]['field']]['keys']
d3[k0][k1] = {
'keys': keys,
'ind': np.arange(0, len(keys)),
}
elif k0 != 'ratio':
d3[k0][k1] = {
'ind': np.r_[0],
}
else:
d3[k0][k1] = {}
else:
if c1:
# a selection of lines
typ = 'lines'
keysok = dinput['keys']
keys = v0
if k0 == 'amp' and ratio is not False:
for rr in set(ratio.ravel().tolist()):
if rr not in keys:
keys.append(rr)
elif c2:
# a selection of variables 'x'
typ = 'x'
keysok = dinput[d3[k0][1]]['keys']
keys = v0
d3[k0][typ] = {
'keys': keys,
'ind': np.array(
[(keysok == ss).nonzero()[0][0] for ss in keys],
dtype=int,
)
}
# ----------------
# phi_prof, phi_npts
if is2d is True:
c1 = [phi_prof is not None, phi_npts is not None]
if all(c1):
msg = "Arg phi_prof and phi_npts cannot be both provided!"
raise Exception(msg)
if phi_npts is False or phi_prof is False:
phi_prof = False
else:
if not any(c1):
phi_npts = (2*dinput['deg']-1)*(dinput['knots'].size-1) + 1
if phi_npts is not None:
phi_npts = int(phi_npts)
if dfit['dinput']['symmetry'] is True:
phimin = (
np.mean(dfit['dinput']['symmetry_axis'])
- dprepare['domain']['phi']['minmax'][1]
)
else:
phimin = dprepare['domain']['phi']['minmax'][0]
phi_prof = np.linspace(
phimin,
dprepare['domain']['phi']['minmax'][1],
phi_npts,
)
else:
phi_prof = np.atleast_1d(phi_prof).ravel()
# vs_nbs
if vs_nbs is None:
vs_nbs = True
if not isinstance(vs_nbs, bool):
msg = "Arg vs_nbs must be a bool!"
raise Exception(msg)
# ----------------
# sol_total, sol_detail, sol_pts
if sol_pts is not None:
if is2d is True:
c0 = (
isinstance(sol_pts, (list, np.ndarray))
and len(sol_pts) == 2
and all([isinstance(ss, np.ndarray) for ss in sol_pts])
and sol_pts[0].shape == sol_pts[1].shape
)
if not c0:
msg = (
"Arg sol_lamb_phi must be a tuple of 2 np.ndarray"
" of same shape!"
)
raise Exception(msg)
else:
c0 = isinstance(sol_pts, np.ndarray)
if not c0:
msg = "Arg sol_lamb must be a np.ndarray!"
raise Exception(msg)
if sol_total is None:
sol_total = sol_pts is not None
if sol_detail is None:
sol_detail = False
if not isinstance(sol_total, bool):
msg = f"Arg sol_total must be a bool!\nProvided: {sol_total}"
raise Exception(msg)
if not isinstance(sol_detail, bool):
msg = f"Arg sol_detail must be a bool!\nProvided: {sol_detail}"
raise Exception(msg)
c0 = (sol_total is True or sol_detail is True) and sol_pts is None
if c0:
if dprepare is None:
sol_pts = False
else:
if is2d is True:
sol_pts = [dprepare['lamb'], dprepare['phi']]
else:
sol_pts = dprepare['lamb']
if any([sol_total, sol_detail]):
assert sol_pts is not None
return dfit, d3, sol_total, sol_detail, sol_pts, phi_prof, vs_nbs
def fit1d_extract(
dfit1d=None,
bck=None,
amp=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
sol_total=None,
sol_detail=None,
sol_lamb=None,
):
"""
Return a dict with extarcted data of interest
bck_amp: (nt,) array
bck_rate: (nt,) array
amp: (nt, namp) array
coefs: (nt, nlines) array
ratio: (nt, nratio) array
width: (nt, nwidth) array
Ti: (nt, nlines) array
shift: (nt, nshift) array
vi: (nt, nlines) array
"""
# -------------------
# Check format input
(
dfit1d, d3,
sol_total, sol_detail, sol_lamb,
_, _,
) | |
potentially needs a replica
:param nc_x: naming context (x) that we are testing if it
"should be present" on the local DC
:param gc_only: Boolean - only consider global catalog servers
:param detect_stale: Boolean - check whether links seems down
:return: None
"""
# We're using the MS notation names here to allow
# correlation back to the published algorithm.
#
# nc_x - naming context (x) that we are testing if it
# "should be present" on the local DC
# f_of_x - replica (f) found on a DC (s) for NC (x)
# dc_s - DC where f_of_x replica was found
# dc_local - local DC that potentially needs a replica
# (f_of_x)
# r_list - replica list R
# p_of_x - replica (p) is partial and found on a DC (s)
# for NC (x)
# l_of_x - replica (l) is the local replica for NC (x)
# that should appear on the local DC
# r_len = is length of replica list |R|
#
# If the DSA doesn't need a replica for this
# partition (NC x) then continue
needed, ro, partial = nc_x.should_be_present(dc_local)
debug.DEBUG_YELLOW("construct_intrasite_graph(): enter" +
"\n\tgc_only=%d" % gc_only +
"\n\tdetect_stale=%d" % detect_stale +
"\n\tneeded=%s" % needed +
"\n\tro=%s" % ro +
"\n\tpartial=%s" % partial +
"\n%s" % nc_x)
if not needed:
debug.DEBUG_RED("%s lacks 'should be present' status, "
"aborting construct_intersite_graph!" %
nc_x.nc_dnstr)
return
# Create a NCReplica that matches what the local replica
# should say. We'll use this below in our r_list
l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
nc_x.nc_dnstr)
l_of_x.identify_by_basedn(self.samdb)
l_of_x.rep_partial = partial
l_of_x.rep_ro = ro
# Add this replica that "should be present" to the
# needed replica table for this DSA
dc_local.add_needed_replica(l_of_x)
# Replica list
#
# Let R be a sequence containing each writable replica f of x
# such that f "is present" on a DC s satisfying the following
# criteria:
#
# * s is a writable DC other than the local DC.
#
# * s is in the same site as the local DC.
#
# * If x is a read-only full replica and x is a domain NC,
# then the DC's functional level is at least
# DS_BEHAVIOR_WIN2008.
#
# * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
# in the options attribute of the site settings object for
# the local DC's site, or no tuple z exists in the
# kCCFailedLinks or kCCFailedConnections variables such
# that z.UUIDDsa is the objectGUID of the nTDSDSA object
# for s, z.FailureCount > 0, and the current time -
# z.TimeFirstFailure > 2 hours.
r_list = []
# We'll loop thru all the DSAs looking for
# writeable NC replicas that match the naming
# context dn for (nc_x)
#
for dc_s in self.my_site.dsa_table.values():
# If this partition (nc_x) doesn't appear as a
# replica (f_of_x) on (dc_s) then continue
if not nc_x.nc_dnstr in dc_s.current_rep_table:
continue
# Pull out the NCReplica (f) of (x) with the dn
# that matches NC (x) we are examining.
f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
# Replica (f) of NC (x) must be writable
if f_of_x.is_ro():
continue
# Replica (f) of NC (x) must satisfy the
# "is present" criteria for DC (s) that
# it was found on
if not f_of_x.is_present():
continue
# DC (s) must be a writable DSA other than
# my local DC. In other words we'd only replicate
# from other writable DC
if dc_s.is_ro() or dc_s is dc_local:
continue
# Certain replica graphs are produced only
# for global catalogs, so test against
# method input parameter
if gc_only and not dc_s.is_gc():
continue
# DC (s) must be in the same site as the local DC
# as this is the intra-site algorithm. This is
# handled by virtue of placing DSAs in per
# site objects (see enclosing for() loop)
# If NC (x) is intended to be read-only full replica
# for a domain NC on the target DC then the source
# DC should have functional level at minimum WIN2008
#
# Effectively we're saying that in order to replicate
# to a targeted RODC (which was introduced in Windows 2008)
# then we have to replicate from a DC that is also minimally
# at that level.
#
# You can also see this requirement in the MS special
# considerations for RODC which state that to deploy
# an RODC, at least one writable domain controller in
# the domain must be running Windows Server 2008
if ro and not partial and nc_x.nc_type == NCType.domain:
if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
continue
# If we haven't been told to turn off stale connection
# detection and this dsa has a stale connection then
# continue
if detect_stale and self.is_stale_link_connection(dc_s):
continue
# Replica meets criteria. Add it to table indexed
# by the GUID of the DC that it appears on
r_list.append(f_of_x)
# If a partial (not full) replica of NC (x) "should be present"
# on the local DC, append to R each partial replica (p of x)
# such that p "is present" on a DC satisfying the same
# criteria defined above for full replica DCs.
#
# XXX This loop and the previous one differ only in whether
# the replica is partial or not. here we only accept partial
# (because we're partial); before we only accepted full. Order
# doen't matter (the list is sorted a few lines down) so these
# loops could easily be merged. Or this could be a helper
# function.
if partial:
# Now we loop thru all the DSAs looking for
# partial NC replicas that match the naming
# context dn for (NC x)
for dc_s in self.my_site.dsa_table.values():
# If this partition NC (x) doesn't appear as a
# replica (p) of NC (x) on the dsa DC (s) then
# continue
if not nc_x.nc_dnstr in dc_s.current_rep_table:
continue
# Pull out the NCReplica with the dn that
# matches NC (x) we are examining.
p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
# Replica (p) of NC (x) must be partial
if not p_of_x.is_partial():
continue
# Replica (p) of NC (x) must satisfy the
# "is present" criteria for DC (s) that
# it was found on
if not p_of_x.is_present():
continue
# DC (s) must be a writable DSA other than
# my DSA. In other words we'd only replicate
# from other writable DSA
if dc_s.is_ro() or dc_s is dc_local:
continue
# Certain replica graphs are produced only
# for global catalogs, so test against
# method input parameter
if gc_only and not dc_s.is_gc():
continue
# If we haven't been told to turn off stale connection
# detection and this dsa has a stale connection then
# continue
if detect_stale and self.is_stale_link_connection(dc_s):
continue
# Replica meets criteria. Add it to table indexed
# by the GUID of the DSA that it appears on
r_list.append(p_of_x)
# Append to R the NC replica that "should be present"
# on the local DC
r_list.append(l_of_x)
r_list.sort(sort_replica_by_dsa_guid)
r_len = len(r_list)
max_node_edges = self.intrasite_max_node_edges(r_len)
# Add a node for each r_list element to the replica graph
graph_list = []
for rep in r_list:
node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
graph_list.append(node)
# For each r(i) from (0 <= i < |R|-1)
i = 0
while i < (r_len-1):
# Add an edge from r(i) to r(i+1) if r(i) is a full
# replica or r(i+1) is a partial replica
if not r_list[i].is_partial() or r_list[i+1].is_partial():
graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)
# Add an edge from r(i+1) to r(i) if r(i+1) is a full
# replica or ri is a partial replica.
if not r_list[i+1].is_partial() or r_list[i].is_partial():
graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
i = i + 1
# Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
# or r0 is a partial | |
def valid(input):
lines = input.split("\n")
count = 0
for line in lines:
words = ["".join(sorted(word)) for word in line.split()]
word_set = set(words)
if len(word_set) == len(words):
count += 1
return count
if __name__ == "__main__":
test_input = """abcde fghij
abcde xyz ecdab
a ab abc abd abf abj
iiii oiii ooii oooi oooo
oiii ioii iioi iiio"""
c_val = valid(test_input)
print('test1', c_val == 3)
real_input="""bdwdjjo avricm cjbmj ran lmfsom ivsof
mxonybc fndyzzi gmdp gdfyoi inrvhr kpuueel wdpga vkq
bneh ylltsc vhryov lsd hmruxy ebnh pdln vdprrky
fumay zbccai qymavw zwoove hqpd rcxyvy
bcuo khhkkro mpt dxrebym qwum zqp lhmbma esmr qiyomu
qjs giedut mzsubkn rcbugk voxk yrlp rqxfvz kspz vxg zskp
srceh xdwao reshc shecr
dcao isz wwse bbdgn ewsw qkze pwu
lbnvl lviftmr zqiv iadanl fdhrldn dlaani lxy dhfndrl fkoukx
raovmz pdysjsw hqps orsyqw rrwnzcz vrzoam jjljt
wgt gzi icpwp qeefgbe msadakj jbbrma sbj dufuujx zex
cfzx bvyu eswr hafcfy klw bgnhynv qrf aop
rzlq atrzcpb hpl pajjw cdxep ean aptzcrb rzcbapt
xogpf ucc nsukoz umtfbw xfvth ozusnk fopxg ubp iflb
xot nqcdyu kpwix szo cyxv hpmp hwtrc zso nyuqdc aha
mkzf cat tkjprc izxdggf obspan lmlbg bsyspf twox
lfmfrd ooclx tcl clt
dxvnyd nxwojj arutn eyqocj swzao tmh juvpezm
teu eman rlmdmk xkbodv fvrcm zorgy wmwe
hmo fdayx duciqf cgt duciqf
imjnv vfmsha cyrusow xjswoq nclrmjy sjxowq ynjrcml
rwbsay alsi bmzpvw ozq aduui nihwx glwdiz ixmkgfx
vtjzc ntkh zekj qrbkjhn zekj lyfnbg
afaig jqhli oie lhwyduh kqfnraz nfrzaqk mayfg iljqh
inb zum zmu dnl zjxg vrdziq ypdnsvt
uhbzmre mpdxm alkbmsq aopjmkl mqxenry ayvkrf zxvs qkfqva
fimjr ccv cnug crdsv
bqyve lhxdj ydu qbyve vihwjr vyodhc
vmng dyttyf noagpji tdtyfy ewiest ogg
kgscfj klmsv vmksl vmlks
qlvh veo wruft wtm fbrlvjr evo wvwmny dhp bvrlrfj lvt vgzuyyw
mxuro orxmu tivu tjdq ojjvju cdd
kjexme gxbysxp yxrum hinrklv fxehytd qkt tqk umryx nim
kywnux wab tzrft dsaz jgwuw dubarmi fle wjguvr umjp uzncwj mzz
qokwh zrda xywufk tbxhhj eejqaoa hwoqk zer hwt hbjxth xyf hmh
eregs qdx tdequa agrlrg mwwpba qjie yrjvhr txujk
iyot fxwdcb zvwfv vfzwv wvkw ncwbr wdejrr ltcdza
waix eza znmonya ldfghws ialwfvc dey ubsz uhbnh svgekg nonzyam
bryz tfbqn xznfmw xiplgww wwxigpl jxzcgxl rzyb
cqvl rrcoqxs staeuqr hzzow cwv tsvol dio coc ddavii uuojy
txbn qvkkyh gbqnjtq ndpkqr srt bkpqfmm ytycev ypcv bpqmmfk
uqkjmul dour zgq ztango yrrjhrg ufxnmuw
ekxbcv vkxbec xbcevk jiq bar
wczff qdu cwffz hhk wlvyg
zjlconc osgsro dajzo hqih ehml
hnio shccluw cpu ivaby tormn vkef abv vkef ivaby
xgbdeso xiizs omqwy sbtnnt khago evviclw xyu dtvg wsyxfuc humewp
cnzu bia vdyqrf wwb qveum hmh ouupgc owli
pjpmfxa dvd lxgh ndy gwph oebfkqv vtlxdg efl ekj dyn
mvan nmdkc ucyshia mavn ecst poo
oybm pjwm bmyo wovgu xykziaq obmy eiirhqd
xkvomx yxvv oxxpth elh vxvy lhe ycn
okxglw gmaangx gnxaamg yduzrr nzwxtnd rcxcu xjjvno yat cin gaxnamg yss
oicgs rrol zvnbna rrol
abb edpnxuo peoudxn bab ceay
ncpkfz gvwunb fckpzn caafx pkcfzn tsfl
fnrt ymenkpq wodubcm niv nvi ziluu cuowbdm zocg pdakwt mlzxkex nuxqclo
uouxcgl stgua otadr ideannq wizxunv iqsdpj mxddt ldst ucxogul
rbrwyhk wqoz zqwo ikwgexl atpu iza
smo yolp pcahlu muljxkq cbkljmz zlbcmkj zvbmgz eaiv ncv zplifm yplo
ocutdhz zmnaap llgv llzpl loavju guzkfq saay rxyhng cwxzx lcv anrnzs
etyzx tcm upxrtvd imyoiu rdpj fed dmm
gonqa szteh szteh razdqh phyff upf knfqfaf knfqfaf fpsgl kakag
mcju mixskop isrwat lcr nfyi lcr aaevr nfyi pqrbk gnful
xfmr fkmnq fbnhd mxrf litniid xbae frxm zcenf
yuh lzojtj rqsh hyu
vbjgql yeshsuv lokt efqota wpwjfu ykyq rxc fxxh ycqfkk gndts vdf
wnylmr kkuruxm azr xukrkum dohkwx dmdb
bjiyrwf dvf fdv vdf gnokekr
jsaq hcww iayqtu llv gdpxdrd hwlo nosjit wpm lcab fcgwr
fxjp bys nnf xzllckh bys hvojw zcwtgwz wye ccyvjv
grafa hbb ghk wkdpsf ufa uoqmysd
yvacf kssbff iovrm mvrio cfbpb avh zzje
gqd qmsen wkvrfz vhtsa zrwfkv gul zkvwrf
hrbi svaogb aogsvb bgrk hibr jbtkr
ljl ryc mrewrge yky
fcqyymt emk qcmyytf mcfvusb luy qany cbsvumf
oknt mcozuc ccmuoz uoccmz
uziil xobutwf acnof iqgwl diso
sekq fxbtsuv ddnnqg rnemlt dngnqd hhgjfus stxvubf
lajcp qgiw fyz blrlcd pviwv buh wnkk
wolqfk nvpapfc rwcqxfz xobno yzjfz irpj wolqfk wbnwjt
vmabj noiljne hhqf holxkbk swwzx ylgj lnmxy lqodhk abjvm bmsrf
bpnp yrz pjepxxs jlmhuy vihlx zacm inuazhc xsxjepp
tryl kryh eonvaad ucevssk umkxg lqej nswopjj svkeucs bmh stosxxz
cfdwd dmfdrvm ibuhsz nwtgmb pjt dmfdrvm cqdcm fzjjz afa ibuhsz
erwp abn jwx ynmkkj rhgg abn epd atqhs rst rhgg
jtnp cegdsoy gfuvfbg gdmn ahlsc
jgrp diu jrgp onjnml nojmnl vxockc
lakqyuw khq dcpiwt ykwlqua hkq plklx ujbckec hjcvur jnp pvyf
usuvoo jkih ylafyy yhio jureyj
uazisdf cnwlfnf ewodatr woaddkd wbla qmn atdrowe
bnyepaa ntqh xppe ydtsw ppex
yewjwsp jxylmtk ijese ewry ijese kbja nfml zeuwcsh juimz
qbvmf nca zsfreo uurgaiz twe fbqmv ncwi etdcsk atowfp
jeotslx kgdpzp wxlcww pdd dcn ddp
macllv ldl kyluine lbt hbxbr wxcaspp ezwvc qxkeu
ivg gxv zsf ucr uff yrz
tdlwbny bqlrlz tbynwdl lwtbdny
tnekq pdaievs ttwpfh xfm fcaa
zqqhl zbf fbz uqrv bfz ffwavhk foccg
vcw ebqdd cwv eddbq nrmq
hpiusz sizphu xzq sgyehk wgagkv hsygek
vagkxa iou frqdnnr ipcg uxvh vvh eskf katgpiq aqktigp gzvseyi
xkwgd kzfxk pgdy fmtvq ngf rshx zti pamviob ely knz
hwo rteohu qzwoe rotuhe wzb
bsqgg tid dti gtats dit
sjtux djwxv dljwjq xwvjd xnqfvx veqdrtl uxtsj nnkjn wnhilaf unirrp
fruuqjk gtote gooklg bzwhim zfnccmm ezipnf cxwdxa wfu fdca
zcyxb byzxc cxbyz pgcqco ivlxz
wrjh zfdinsf ihw xwosiah hdg xpiabno bilyy azdeczg javuwa
rinlv dcpt qhencba mmb njxw gadc
qwcpua qzyzt cxjsgh kumh byiimas qhsgf qytzz rqqruwp ismyiba xydcxz rwkscqa
xbzefi hltca ibzxfe fkx xizbfe wvaynts
oyuce vzk ouxvj gfh efgbv ubc nyb bxnbhd mtwboe whksy ovmrt
ljrebp tacn bpjler utphw wmfw rcnha
drdnic eyodes rcnidd yseeod
umxmsf kfroz ukhjin awpnnnu ooyyohh tuv rafano jze
bakz lfzpjyg gfkqcgn kzh zwpvk gqfngck
jpaony ojpnya hmro xaaz tovary aaxz iel pbg
swvbgc bbhjp yvrcddd rhj clfu eao afrkegn qvvb yvcx nxjmdo rcvtx
conbjy jeqtri wvujt jeqtri rkhllgw tsdt zowreo qxr qbpragn kuzmplw wvujt
jrpxyp hchljy rkowqb eeaf ltllebb gtksrwx iazx vnsfmc zzrxw hlcjyh
piehb cjdzt eqn kuje rls oaewoz lrqwt lcrrq
hdjowxv uknhlv hluknv pokxg
txiqxfr fyyp pyyf xfxtrqi tvm rtvby cfx trx nwrf kqrxtat alwot
wdaadr stexpow ardawd uejqxc
wwgwjel wwgwjel mtjt wwgwjel
mczx uua lgceb dqru vkcea tcet ruz
jkt yroojr qdrtdu wze ovwz fdmqnr xxsyfd kchytwl hctlkwy gyd
eif irnrce iamhxgh bmis uxye azrwdi sznv yuowb vdlqqxu
dxdjyj hngqwzs yhwku qhsctfe rhbc rchb tqhcfse
fxyxnzs qtxevin rvtxtc iqnxtve
zgbpk mwzxx bgpkz wkpkn
rjiym iub lcyw agbtlb bzhx osv rbtf
emmyu uoflio tinih skpqaj rbor gezbhhv ine mij qlqte uuj ycns
owmwc uhxv pyho ftjh jzsg blqn bszyo bob trbycy mkru
mwgz bbqsmpp fgzs bihhg bbn pjxxexs qrqmt htsxfwo qltqp vqqaxi
lpr wcvy sxjqq ltd rftdgv pdnog ymu
qhcos shuy icdhucu lrikh rwslv yxbgibl rcomhn wakirz
civdmee owlzocl vedecim rogmjnn pix pohcmk dsjm yworm
vzdpxp lvt inufv yofqt omm qfoty qrlseqy amkt kjcvg vgkjc
huhq quhh levzsws sjuun ofgqr cjhp nfxbbft rnt wtbd tbzab
tjftkx xpfcv hvftvhw lpypbjg batrn fhwhtvv uthl arbtn brb sthv
ogr uyuxdco bpjgir edztxv sxtgu jzfmx ihnauz zwegqkr kvkw
mhxthf pervvn gshy jig ezjteq ckkcpy gww
tiljyki rpe prcojy tjkylii moxu
pjsdqc lgqydfd lohck emrtejw axwmo wuuv rfi qzyncmw gjijdfb bljfd xrs
ywjab gynzi relf kziy xmsby izyk ocwoho kqnyh bwayj
bhjlz uonz jhmzuq eiajoos zjnbj tomj bmyv hjlbz fgw jjbnz
kszz xzw xzw prtznyb
ghzk vxhwt thxwv slwpayp qxegmi dawdwo kgzh
ibpcvuf wnuwxu sbf jsj bfjynl cdp jbylnf
epaxr vfhf hvff azepadz pwf sbo pgfzya hslyo rqqj rmklw hwtta
yyolko pwbvxvg xdwl yfje hftep kzzsr kho jeyf yvslxpw kfyv
xmk juyjxy eqno mdwklum reg dgn cirh wmxfyj bnxlgo dlobk
oyv gshqyot jgcqe dsf gyohqst gqgeojo egoogjq dmqpyp
sypianq yss lmhu ulmh itilh ndkda lhiit
qbxxl bxxql ikald nfap qixwbqq
jtqhqty ljysnl nwoj toa bmmyj pal
ahktew sxody nkvsf pbxyt baws wgwfwej bevgzm jus hcvajfy kzrb jwgwewf
jzsb szbj ujngwf nfuuf lfiuxdu uufnf orsy
vgo hto isstyul gau wsmxoqw
uxw itwf epaw hec wape hemol rpwyosc xzxmrll eetz zui kagca
mjncux muv rygdeis rygdeis
qgkqjvf iprzibd fkvqqgj llcrl vbh vlf lllrc zwrunt
dslsa wvoex eqbwj tjem gbx ayn xcan fnacl xggxon gnwjlh
yzosv hcxjiz yvon gcgd
bixpny ecln sda eymt bjiwk
rlcad lrdca adoqfzs rgty mds pwb kmwj
wkai pmryffq rrdmodc wgyx taz yxwg nkap
auynzwc vzg uapdv qkrh
ldmuysp oyu kpn ejbl mfifa bzs hwyn brlw qpzqx uyilao ysdumpl
czoxoj pwnultl wezolbw lyk aonesgb
nqy nhb nle yycp lgtbo ojf dytwyh ufa
rwr eph obg peh pejret prjtee ovgz
vdqf vdqf ycjrg ovzl lelbe vdqf
gvagdqm gvdgqam dmb zaxe nepzwn
emwh bkkbgec qwdgk mhvfsrf wmdfpp ekzuua
mbqw lgkyazt ckyhvnq uladwo owt
qwiwd pbo tkjoqda zapo dygqopv zzdlwfn
qty dhb iinncba ytq kvh idgoevt chx waq
ulffsvk vplsz ulffsvk uxsh cpwgxd ikgcacx nrirke uowcjvn
gknmxr grkxnm fco dilyyj grmxkn
saqxkh uhue nvu fef xsuxq ekyyoc bcaavd
qltwqa vrmpv vhra nof yprauc vkreojm eaq igiy mec
wvheiyg uthy gpvcs nhnjrne mqaejr tfnsly zfbhn entcc nystfl | |
(i<nsym)) i++;
# to=i;
# # draw all tenuto strokes between these chords
# if (to<nsym) { // tenuto ends in same line
# for (m1=0, m2=0; m1<sym[from].npitch; m1++) {
# if (sym[from].ten1[m1]) {
# sym[from].ten1[m1] = 0; #mark as done
# for (m=m2; m<sym[to].npitch; m++)
# if (sym[to].ten2[m]) {
# m2=m;
# sym[to].ten2[m] = 0; #mark as done
# break;
# }
# # no further matching tenuto end?
# if (m2 >= sym[to].npitch) break;
# #
# * get positions for tenuto stroke
# * when no fret sign (i.e. 'y') include chord into tenuto stroke,
# * otherwise exclude it
#
# x1 = sym[from].x;
# if (sym[from].accs[m1] == 'y') x1 -= tabfont.size/2.0;
# else x1 += tabfont.size;
# hc1 = tabline(sym[from].pits[m1]);
# y1 = (6-hc1) * tabfont.size + 1.5;
# x2 = sym[to].x;
# if (sym[to].accs[m2] == 'y') x2 += tabfont.size/2.0;
# else x2 -= tabfont.size;
# hc2 = tabline(sym[to].pits[m2]);
# y2 = (6-hc2+1) * tabfont.size - 1.5;
# //output_slur (x1,y1,x2,y2,direction,height,shift);
# PUT4("%.1f %.1f %.1f %.1f tabten ", x1, y1, x2, y2);
# }
# }
# }
# }
# }
# }
#
# #*************************************************************************
# * tab_nplets - draw nplets in current line
# *************************************************************************
# void draw_tabnplets (void)
# {
# int i,j,k,p,r,c;
#
# #find start of nplets
# for (i=0;i<nsym;i++) {
# if ((sym[i].type==NOTE) || (sym[i].type==REST)) {
# if (sym[i].p_plet>0) {
# p=sym[i].p_plet;
# r=sym[i].r_plet;
# c=r;
# k=i;
# #find end of nplet
# for (j=i;j<nsym;j++) {
# if ((sym[j].type==NOTE) || (sym[j].type==REST)) {
# c--;
# k=j;
# if (c==0) break;
# }
# }
# #draw nplet
# output_slur (sym[i].x+2, 6*tabfont.size,
# sym[k].x, 6*tabfont.size,
# 1, 10, 0);
# PUT3("%.1f %.1f (%d) bnum\n",
# 0.5*(sym[i].x+sym[k].x), 6.0*tabfont.size+5, p);
# }
# }
# }
# }
#
# #*************************************************************************
# * tabline - returns tablature line corresponding to given course
# *************************************************************************
# int tabline (int course)
# {
# # all bourdons are printed on 7th line
# if (voice[ivc].key.ktype == ITALIAN8TAB) {
# if (course>8) course=7;
# } else {
# if (course>7) course=7;
# }
#
# switch (voice[ivc].key.ktype) {
# case ITALIANTAB:
# case ITALIAN7TAB:
# case ITALIAN8TAB:
# case ITALIAN5TAB:
# case ITALIAN4TAB:
# return (7-course);
# break;
# case FRENCH5TAB:
# case SPANISH5TAB:
# return (course+1);
# break;
# case FRENCH4TAB:
# case SPANISH4TAB:
# return (course+2);
# break;
# default:
# return course;
# break;
# }
# }
#
# #*************************************************************************
# * lowest_course - returns lowest course in chord
# *************************************************************************
# int lowest_course (struct SYMBOL *s)
# {
# int i,onlyy;
# int lowc=0;
#
# // y (invisible symbol) may only be ignored
# // when other symbols are present => check for this
# onlyy = 1;
# for (i=0;i<s->npitch;i++) {
# if (s->accs[i] != 'y') {
# onlyy = 0; break;
# }
# }
#
# if (voice[ivc].key.ktype==GERMANTAB) {
# for (i=s->npitch-1;i>=0;i--)
# lowc++;
#
# } else {
# for (i=s->npitch-1;i>=0;i--) {
# if (s->pits[i]>lowc && (s->accs[i] != 'y' || onlyy))
# lowc=s->pits[i];
# }
# if (lowc>7)
# lowc=7;
# }
#
# return lowc;
# }
#
# #*************************************************************************
# * highest_course - returns highest course in chord
# *************************************************************************
# int highest_course (struct SYMBOL *s)
# {
# int i,onlyy;
# int highc=10;
#
# if (voice[ivc].key.ktype==GERMANTAB)
# return 1;
#
# // y (invisible symbol) may only be ignored
# // when other symbols are present => check for this
# onlyy = 1;
# for (i=0;i<s->npitch;i++) {
# if (s->accs[i] != 'y') {
# onlyy = 0; break;
# }
# }
#
# for (i=0;i<s->npitch;i++) {
# if (s->pits[i]<highc && (s->accs[i] != 'y' || onlyy))
# highc=s->pits[i];
# }
# if (highc>7)
# highc=7;
#
# return highc;
# }
#
# #*************************************************************************
# * next_line - returns next used tabline tablature line lower than
# * hc in chord. If next line is not found, 0 is returned
# *************************************************************************
# int next_line (struct SYMBOL *s, int hc)
# {
# int i,nextc;
#
# if (voice[ivc].key.ktype==FRENCHTAB ||
# voice[ivc].key.ktype==FRENCH5TAB ||
# voice[ivc].key.ktype==FRENCH4TAB ||
# voice[ivc].key.ktype==SPANISHTAB ||
# voice[ivc].key.ktype==SPANISH5TAB ||
# voice[ivc].key.ktype==SPANISH4TAB) {
# # search larger course numbers
# nextc=99;
# for (i=0;i<s->npitch;i++) {
# if ((s->pits[i]<nextc) && (s->pits[i]>hc))
# nextc=s->pits[i];
# }
# if (nextc==99)
# return 0;
# if (nextc>7)
# nextc=7;
# } else {
# # italiantab: search smaller course numbers
# hc = 6-hc; #translate line to course
# nextc = 0;
# for (i=0;i<s->npitch;i++) {
# if ((s->pits[i]>nextc) && (s->pits[i]<hc))
# nextc=s->pits[i];
# }
# if (nextc>=6)
# return 0;
# nextc = 6-nextc; #translate course to line
# }
#
# return nextc;
# }
#
def open_tabfontfile(self, basename):
"""
find first fontfile in directorylist $ABCTABFONTS
and open it. If $ABCTABFONTS is unset, standard directories are
searched. If the file cannot be found, NULL is returned.
:param self:
:param basename:
:return:
"""
# get font directory list
font_dirs = os.environ.get('ABCTABFONT', None)
if not font_dirs:
font_dirs = TABFONTDIRS
font_file = '.'.join([basename, 'ps'])
for font_dir in font_dirs:
t = [f for f in os.listdir(font_dir)
if os.path.isfile(os.path.join(font_dir, font_file))]
if t:
return t
return
def def_tabfonts(self, fp):
"""
writes font definitions into outfile
from fontfile is removed by getline() and rewritten,
to achieve native line breaks on each platform
It is not possible to decide automatically, whether tab
is needed, because only one tune is accessible at a time
- load french font (can be suppressed with %%tabfontfrench none)
- load italian font (can be suppressed with %%tabfontitalian none)
- load german font (can be suppressed with %%tabfontgerman none)
:param fp:
:return:
"""
if args.notab: # when global flag set
return
font_list = [self.frfont, self.itfont, self.defont]
for tab_font in font_list:
if tab_font == "none":
fp.write('\n')
with self.open_tabfontfile(tab_font) as fontfp:
if not fontfp:
log.error('Please check the environment variable '
'ABCTABFONTS\n'
f'Cannot find tablature font file: {self.frfont}')
lines = fontfp.readlines()
for line in lines:
if line.startswith('%!'):
fp.write(line)
def def_tabsyms(self, fp, cfmt):
"""
writes PS macros for tablature into outfile
:param fp:
:param cfmt:
:return:
"""
# when global flag set
if args.notab:
return
# It is not possible to decide automatically, whether tab is needed,
# because only one tune is accessible at a time
# tablature system
fp.write("\n/tabN { %% l n tabN - n line tab\n"
" gsave %3.2f setlinewidth 0 0 moveto\n"
" {dup 0 rlineto dup neg %d rmoveto} repeat\n"
" pop stroke grestore\n"
"} bind def\n" % (cfmt.stafflinethickness, self.size))
fp.write("\n/tab1 { %% l y tab1 - tab separator line\n"
" gsave %3.2f setlinewidth 0 exch moveto 0 rlineto\n"
" stroke grestore\n"
"} bind def\n" % cfmt.stafflinethickness)
# time signatures
fp.write("\n/tabcsig { %% x n tabcsig - C time sig\n"
" gsave 1.2 setlinewidth\n"
" 6 sub %.1f mul %.1f add moveto currentpoint\n"
" -10 0 -10 %d 0 %d rcurveto\n"
" -15 0 -15 -%d 0 -%d rcurveto\n"
" fill moveto 7 3.5 rmoveto\n"
" -3.5 -3.5 -3.5 -3.5 -7 -3.5 rcurveto\n"
" -15 0 -15 %d 0 %d rcurveto\n"
" 3.5 0 3.5 0 7 -3.5 rcurveto\n"
" currentpoint stroke moveto 0.5 0 rmoveto\n"
" currentpoint exch -2 add exch -2 add 2 0 360 arc\n"
" fill grestore\n"
"} bind def\n" % (self.size * 0.5, self.size * 1.5,
self.size * 2, self.size * 2,
self.size * 2, self.size * 2,
self.size * 2, self.size * 2))
fp.write("\n/tabctsig { %% x n tabctsig - C| timesig\n"
" 2 copy tabcsig\n"
" 6 sub %.1f mul %.1f add moveto 0 %d rlineto stroke\n"
"} bind def\n" % (self.size * 0.5,
self.size * 0.5,
self.size * 4))
# beware that tab is higher than music
fp.write("\n/tabtsig { %% x n (top) (bot) tabtsig - time signature\n"
" 4 -2 roll -5 add %d mul moveto /bx false def\n"
" gsave /NewCenturySchlbk-Bold 16 selectfont %.1f %.1f scale\n"
" 0 1.0 rmoveto currentpoint 3 -1 roll cshow\n"
" moveto 0 %d %.1f div rmoveto cshow grestore\n"
"} bind def\n" % (self.size,
3 * self.size / STAFFHEIGHT,
3 * self.size / STAFFHEIGHT,
2 * self.size,
3 * self.size / STAFFHEIGHT))
fp.write(
"\n/tabt1sig { %% x n (top) tabt1sig - timesig without denominator\n"
" 3 1 roll 6 sub %.1f mul %.1f add moveto /bx false def\n"
" gsave /NewCenturySchlbk-Bold 16 selectfont %.1f %.1f scale\n"
" cshow grestore\n"
"} bind def\n" % (self.size * 0.5,
self.size * 1.8,
3 * self.size / STAFFHEIGHT,
3 * self.size / STAFFHEIGHT))
# tablature letter
fp.write(
"\n/tabfshow { %% x n (c) tabfshow - french tabletter c on course n\n"
" 3 1 roll\n"
# raise one point so that line is not touched
" -6 add -%d mul 1 add moveto\n"
# ^^^^^
" gsave /FrenchTabfont %.1f selectfont\n"
" /bx false def cshow grestore\n"
"} bind def\n" % (self.size, self.size * self.scale))
fp.write(
"\n/tabsshow { %% x n (c) tabsshow - spanish tabletter c on course n\n"
" 3 1 roll\n"
" -5.5 add -%d mul moveto\n"
| |
<reponame>pji/yadr
"""
test_lex
~~~~~~~~
Unit tests for the dice notation lexer.
"""
import unittest as ut
from tests.common import BaseTests
from yadr import lex
from yadr import model as m
# Symbol test cases.
class ASOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.AS_OPERATOR
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_basic_addition(self):
"""Given a basic addition equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 15),
(lex.Token.AS_OPERATOR, '+'),
(lex.Token.NUMBER, 3),
)
data = '15+3'
self.lex_test(exp, data)
def test_basic_addition_with_spaces(self):
"""Given a basic addition equation containing whitespace,
return the tokens that represent the equation.
"""
exp = (
(lex.Token.NUMBER, 15),
(lex.Token.AS_OPERATOR, '+'),
(lex.Token.NUMBER, 3),
)
data = ' 15 + 3 '
self.lex_test(exp, data)
def test_basic_subtraction(self):
"""Given a basic subtraction equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 200),
(lex.Token.AS_OPERATOR, '-'),
(lex.Token.NUMBER, 10),
)
data = '200-10'
self.lex_test(exp, data)
class BooleanTestCase(BaseTests.LexTokenTestCase):
token = m.Token.BOOLEAN
allowed = [
m.Token.CHOICE_OPERATOR,
m.Token.WHITESPACE,
]
def test_boolean_true(self):
"""Lex a boolean."""
exp = (
(lex.Token.BOOLEAN, True),
)
data = 'T'
self.lex_test(exp, data)
def test_boolean_false(self):
"""Lex a boolean."""
exp = (
(lex.Token.BOOLEAN, True),
)
data = 'T'
self.lex_test(exp, data)
class ChoiceTestCase(BaseTests.LexTokenTestCase):
token = m.Token.CHOICE_OPERATOR
allowed = [
m.Token.QUALIFIER,
m.Token.QUALIFIER_DELIMITER,
m.Token.WHITESPACE,
]
def test_choice(self):
"""Lex a choice operator."""
exp = (
(lex.Token.BOOLEAN, True),
(lex.Token.CHOICE_OPERATOR, '?'),
(lex.Token.QUALIFIER, 'spam'),
(lex.Token.OPTIONS_OPERATOR, ':'),
(lex.Token.QUALIFIER, 'eggs'),
)
data = 'T?"spam":"eggs"'
self.lex_test(exp, data)
class ComparisonOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.COMPARISON_OPERATOR
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_basic_equal(self):
"""Lex equal."""
exp = (
(lex.Token.NUMBER, 21),
(lex.Token.COMPARISON_OPERATOR, '=='),
(lex.Token.NUMBER, 20),
)
data = '21==20'
self.lex_test(exp, data)
def test_basic_greater_than(self):
"""Lex greater than."""
exp = (
(lex.Token.NUMBER, 21),
(lex.Token.COMPARISON_OPERATOR, '>'),
(lex.Token.NUMBER, 20),
)
data = '21>20'
self.lex_test(exp, data)
def test_basic_greater_than_or_equal(self):
"""Lex greater than or equal."""
exp = (
(lex.Token.NUMBER, 21),
(lex.Token.COMPARISON_OPERATOR, '>='),
(lex.Token.NUMBER, 20),
)
data = '21>=20'
self.lex_test(exp, data)
def test_basic_greater_than_whitspace(self):
"""Lex greater than."""
exp = (
(lex.Token.NUMBER, 21),
(lex.Token.COMPARISON_OPERATOR, '>'),
(lex.Token.NUMBER, 20),
)
data = '21 > 20'
self.lex_test(exp, data)
def test_basic_less_than(self):
"""Lex greater than."""
exp = (
(lex.Token.NUMBER, 21),
(lex.Token.COMPARISON_OPERATOR, '<'),
(lex.Token.NUMBER, 20),
)
data = '21<20'
self.lex_test(exp, data)
def test_basic_less_than_or_equal(self):
"""Lex less than or equal."""
exp = (
(lex.Token.NUMBER, 21),
(lex.Token.COMPARISON_OPERATOR, '<='),
(lex.Token.NUMBER, 20),
)
data = '21<=20'
self.lex_test(exp, data)
def test_basic_not_equal(self):
"""Lex not equal."""
exp = (
(lex.Token.NUMBER, 21),
(lex.Token.COMPARISON_OPERATOR, '!='),
(lex.Token.NUMBER, 20),
)
data = '21!=20'
self.lex_test(exp, data)
class DiceOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.DICE_OPERATOR
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_basic_concat(self):
"""Given a basic concat equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.DICE_OPERATOR, 'dc'),
(lex.Token.NUMBER, 10),
)
data = '20dc10'
self.lex_test(exp, data)
def test_basic_die(self):
"""Given a basic die equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.DICE_OPERATOR, 'd'),
(lex.Token.NUMBER, 10),
)
data = '20d10'
self.lex_test(exp, data)
def test_basic_exploding_die(self):
"""Given a basic exploding die equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.DICE_OPERATOR, 'd!'),
(lex.Token.NUMBER, 10),
)
data = '20d!10'
self.lex_test(exp, data)
def test_basic_keep_high_die(self):
"""Given a basic die equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.DICE_OPERATOR, 'dh'),
(lex.Token.NUMBER, 10),
)
data = '20dh10'
self.lex_test(exp, data)
def test_basic_keep_low_die(self):
"""Given a basic die equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.DICE_OPERATOR, 'dl'),
(lex.Token.NUMBER, 10),
)
data = '20dl10'
self.lex_test(exp, data)
def test_basic_wild_die(self):
"""Given a basic die equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.DICE_OPERATOR, 'dw'),
(lex.Token.NUMBER, 10),
)
data = '20dw10'
self.lex_test(exp, data)
class ExOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.EX_OPERATOR
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_basic_exponentiation(self):
"""Given a basic exponentiation equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.EX_OPERATOR, '^'),
(lex.Token.NUMBER, 10),
)
data = '20^10'
self.lex_test(exp, data)
class GroupingTestCase(BaseTests.LexTokenTestCase):
token = m.Token.GROUP_CLOSE
allowed = [
m.Token.AS_OPERATOR,
m.Token.MD_OPERATOR,
m.Token.EX_OPERATOR,
m.Token.DICE_OPERATOR,
m.Token.GROUP_CLOSE,
m.Token.POOL_OPERATOR,
m.Token.ROLL_DELIMITER,
m.Token.POOL_GEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_parentheses(self):
"""Given a statement containing parenthesis, return the
tokenized equation.
"""
exp = (
(lex.Token.GROUP_OPEN, '('),
(lex.Token.NUMBER, 32),
(lex.Token.AS_OPERATOR, '-'),
(lex.Token.NUMBER, 5),
(lex.Token.GROUP_CLOSE, ')'),
(lex.Token.MD_OPERATOR, '*'),
(lex.Token.NUMBER, 21),
)
data = '(32-5)*21'
self.lex_test(exp, data)
def test_parentheses_with_whitespace(self):
"""Given a statement containing parenthesis and whitespace,
return the tokenized equation.
"""
exp = (
(lex.Token.GROUP_OPEN, '('),
(lex.Token.NUMBER, 32),
(lex.Token.AS_OPERATOR, '-'),
(lex.Token.NUMBER, 5),
(lex.Token.GROUP_CLOSE, ')'),
(lex.Token.MD_OPERATOR, '*'),
(lex.Token.NUMBER, 21),
)
data = '( 32 - 5 ) * 21'
self.lex_test(exp, data)
class GroupOpenTestCase(BaseTests.LexTokenTestCase):
token = m.Token.GROUP_OPEN
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.POOL,
m.Token.POOL_OPEN,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
class MapTestCase(BaseTests.LexTokenTestCase):
token = m.Token.MAP
allowed = [
m.Token.ROLL_DELIMITER,
m.Token.WHITESPACE,
]
def test_map(self):
"""Given a statement containing a map, return the tokenized
dice mapping.
"""
exp = ((
m.Token.MAP,
(
'name',
{
1: "none",
2: "success",
3: "success",
4: "success success",
}
)
),)
yadn = '{"name"=1:"none",2:"success",3:"success",4:"success success"}'
self.lex_test(exp, yadn)
class MappingOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.MAPPING_OPERATOR
allowed = [
m.Token.QUALIFIER,
m.Token.QUALIFIER_DELIMITER,
m.Token.WHITESPACE,
]
def test_mapping_operator(self):
"""Lex a mapping operator."""
exp = (
(lex.Token.NUMBER, 3),
(lex.Token.MAPPING_OPERATOR, 'm'),
(lex.Token.QUALIFIER, 'spam'),
)
data = '3m"spam"'
self.lex_test(exp, data)
class MDOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.MD_OPERATOR
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_basic_division(self):
"""Given a basic division equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.MD_OPERATOR, '/'),
(lex.Token.NUMBER, 10),
)
data = '20/10'
self.lex_test(exp, data)
def test_basic_exponentiation(self):
"""Given a basic exponentiation equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.EX_OPERATOR, '^'),
(lex.Token.NUMBER, 10),
)
data = '20^10'
self.lex_test(exp, data)
def test_basic_modulo(self):
"""Given a basic modulo equation, return the tokens
that represent the equation.
"""
exp = (
(lex.Token.NUMBER, 2),
(lex.Token.MD_OPERATOR, '%'),
(lex.Token.NUMBER, 10),
)
data = '2%10'
self.lex_test(exp, data)
def test_basic_multiplication(self):
"""Given a basic multiplication equation, return the tokens
that represent the equation.
"""
exp = (
(lex.Token.NUMBER, 2),
(lex.Token.MD_OPERATOR, '*'),
(lex.Token.NUMBER, 10),
)
data = '2*10'
self.lex_test(exp, data)
class NumberTestCase(BaseTests.LexTokenTestCase):
token = m.Token.NUMBER
allowed = [
m.Token.AS_OPERATOR,
m.Token.COMPARISON_OPERATOR,
m.Token.EX_OPERATOR,
m.Token.DICE_OPERATOR,
m.Token.GROUP_CLOSE,
m.Token.MAPPING_OPERATOR,
m.Token.MD_OPERATOR,
m.Token.POOL_GEN_OPERATOR,
m.Token.ROLL_DELIMITER,
m.Token.WHITESPACE,
]
# Allowed next symbol.
def test_number_cannot_follow_number(self):
"""Numbers cannot follow numbers."""
# Expected values.
exp_ex = ValueError
exp_msg = '4 cannot follow a NUMBER.'
# Test data and state.
data = '3 4'
lexer = lex.Lexer()
# Run test and determine the result.
with self.assertRaisesRegex(exp_ex, exp_msg):
_ = lexer.lex(data)
class OptionsOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.OPTIONS_OPERATOR
allowed = [
m.Token.QUALIFIER,
m.Token.QUALIFIER_DELIMITER,
m.Token.WHITESPACE,
]
def test_basic_options_operator(self):
"""Lex choice options."""
exp = (
(lex.Token.QUALIFIER, 'spam'),
(lex.Token.OPTIONS_OPERATOR, ':'),
(lex.Token.QUALIFIER, 'eggs'),
)
data = '"spam":"eggs"'
self.lex_test(exp, data)
class PoolDegenerationOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.POOL_DEGEN_OPERATOR
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_basic_count_successes(self):
"""Given a basic count successes statement, return the tokens
in the statement.
"""
exp = (
(lex.Token.POOL, (5, 1, 9)),
(lex.Token.POOL_DEGEN_OPERATOR, 'ns'),
(lex.Token.NUMBER, 5),
)
data = '[5,1,9]ns5'
self.lex_test(exp, data)
def test_basic_count_successes_with_botch(self):
"""Given a basic count successes with botches statement, return
the tokens in the statement.
"""
exp = (
(lex.Token.POOL, (5, 1, 9)),
(lex.Token.POOL_DEGEN_OPERATOR, 'nb'),
(lex.Token.NUMBER, 5),
)
data = '[5,1,9]nb5'
self.lex_test(exp, data)
def test_count_successes_before_group(self):
"""Groups can follow pool degeneration operators."""
exp = (
(lex.Token.POOL, (5, 1, 9)),
(lex.Token.POOL_DEGEN_OPERATOR, 'ns'),
(lex.Token.GROUP_OPEN, '('),
(lex.Token.NUMBER, 3),
(lex.Token.AS_OPERATOR, '+'),
(lex.Token.NUMBER, 2),
(lex.Token.GROUP_CLOSE, ')'),
)
data = '[5,1,9]ns(3+2)'
self.lex_test(exp, data)
def test_count_successes_before_unary_pool_degen(self):
"""Unary pool degens can follow pool degeneration operators."""
exp = (
(lex.Token.POOL, (5, 1, 9)),
(lex.Token.POOL_DEGEN_OPERATOR, 'ns'),
(lex.Token.U_POOL_DEGEN_OPERATOR, 'N'),
(lex.Token.POOL, (5, 1, 9)),
)
data = '[5,1,9]nsN[5,1,9]'
self.lex_test(exp, data)
def test_count_successes_before_operator(self):
"""Operators cannot occur after pool degen operators."""
# Expected values.
exp_ex = ValueError
exp_msg = '\\+ cannot follow a POOL_DEGEN_OPERATOR.'
# Test data and state.
data = '[5,1,9]ns+'
lexer = lex.Lexer()
# Run test and determine results.
with self.assertRaisesRegex(exp_ex, exp_msg):
_ = lexer.lex(data)
class PoolGenerationOperatorTestCase(BaseTests.LexTokenTestCase):
token = m.Token.POOL_GEN_OPERATOR
allowed = [
m.Token.GROUP_OPEN,
m.Token.NEGATIVE_SIGN,
m.Token.NUMBER,
m.Token.U_POOL_DEGEN_OPERATOR,
m.Token.WHITESPACE,
]
def test_basic_dice_pool(self):
"""Given a basic die equation, return the tokens that
represent the equation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.POOL_GEN_OPERATOR, 'g'),
(lex.Token.NUMBER, 10),
)
data = '20g10'
self.lex_test(exp, data)
def test_basic_expolding_pool(self):
"""Given a basic pool generation, return the tokens that
represent the generation.
"""
exp = (
(lex.Token.NUMBER, 20),
(lex.Token.POOL_GEN_OPERATOR, 'g!'),
| |
<reponame>SilasPDJ/autoesk<filename>default/settings/set_paths.py<gh_stars>0
from .now import Now
class SetPaths(Now):
# the class Now IS NOT large
def __get_atual_competencia_file(self):
import os
f = '\\get_atual_competencia.txt'
dir_only = os.path.dirname(__file__)
project_dir = '\\'.join(dir_only.split('\\')[:-2])
# Contar de trás pra frente, pois vai que um dia eu coloque ele num diretorio raiz
tot = project_dir + f
return tot
def files_get_anexos(self, client, file_type='pdf', upload=False):
"""
:param client: nome da pasta onde estão os arquivos organizados por data dd-mm-yyyy
:param year: True -> folder contains year, False -> folder DOES NOT contain year
:param file_type: file annexed type
:param upload: False -> email it! True: upload it!
:return: pdf_files or whatever
# _files_path
"""
import os
from email.mime.application import MIMEApplication
# compt, excel_file_name = self.compt_and_filename()
compt_and_file = self.compt_and_filename()
path = self._files_path_v3(client, wexplorer_tup=compt_and_file)
# print(path, '\nPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATH', year)
volta = os.getcwd()
os.chdir(path)
list_returned = os.listdir()
pdf_files = list()
for fname in list_returned:
if fname.lower().endswith(f'.{file_type}'):
if not upload:
file_opened = MIMEApplication(open(fname, 'rb').read())
file_opened.add_header('Content-Disposition', 'attachment', filename=fname)
pdf_files.append(file_opened)
else:
pdf_files.append(f'{os.getcwd()}\\{fname}')
os.chdir(volta)
print(os.getcwd())
return pdf_files
def files_get_anexos_v2(self, client, file_type='pdf', wexplorer_tup=None, upload=False):
"""
:param client: nome da pasta onde estão os arquivos organizados por data dd-mm-yyyy
:param file_type: file annexed type
:param wexplorer_tup: ... ctrl+F me
:param upload: False -> email it! True: upload it!
:return: pdf_files or whatever
# _files_path
"""
import os
from email.mime.application import MIMEApplication
# compt, excel_file_name = self.compt_and_filename()
if wexplorer_tup is None:
compt_and_file_anexos = self.compt_and_filename()
else:
compt_and_file_anexos = wexplorer_tup
path = self._files_path_v3(client, wexplorer_tup=compt_and_file_anexos)
# print(path, '\nPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATH', year)
volta = os.getcwd()
os.chdir(path)
list_returned = os.listdir()
pdf_files = list()
for fname in list_returned:
if fname.lower().endswith(f'.{file_type}'):
if upload:
file_opened = MIMEApplication(open(fname, 'rb').read())
file_opened.add_header('Content-Disposition', 'attachment', filename=fname)
pdf_files.append(file_opened)
else:
pdf_files.append(f'{os.getcwd()}\\{fname}')
os.chdir(volta)
print(os.getcwd())
return pdf_files
def compt_and_filename(self):
"""
:return: already set compt and file_names; COMPT e file_names já programados antes vindos de um arquivo
##########################################################################
"""
from time import sleep
# compt, excel_file_name = 'biri', 'biri'
try:
sleep(1)
with open(self.__get_atual_competencia_file(), 'r') as f:
compt, excel_file_name = f.read().splitlines()
except FileNotFoundError:
# raise FileNotFoundError('\033[1;31mfile not existence\033[m')
return self.set_get_compt_file(m_cont=-1)
else:
return compt, excel_file_name
def file_wtp_only1(self):
import os
filepath = os.path.realpath(__file__)
os.path.join('\\'.join(filepath.split('\\')[:-1]))
file_with_name = 'with_titlePATH.txt'
try:
f = open(f'{file_with_name}', 'r')
a = f.read()
a = a.split('/')
a = '/'.join(a)
returned = a
f.close()
except FileNotFoundError:
FileExistsError('WITH TITLE PATH NOT EXISTENTE ')
returned = self.select_sheets_path_if_not_exists()
return returned
def select_sheets_path_if_not_exists(self):
from tkinter import Tk, filedialog, messagebox
root = Tk()
root.withdraw()
root = Tk()
root.withdraw()
file_with_name = 'with_titlePATH.txt'
# sh_management = SheetPathManager(file_with_name)
way = None
while way is None:
way = filedialog.askdirectory(title='SELECIONE ONDE ESTÃO SUAS PLANILHAS')
if len(way) <= 0:
way = None
resp = messagebox.askokcancel('ATENÇÃO!', message='Favor, selecione uma pasta ou clique em CANCELAR.')
if not resp:
break
else:
wf = open(file_with_name, 'w')
wf.write(way)
root.quit()
return way
def set_get_compt_file(self=None, m_cont=0, y_cont=0, past_only=True, file_type='xlsx', open_excel=False):
"""
:param int m_cont: quantos meses para trás? (0 atual)
:param int y_cont: quantos anos para trás? (0 atual)
:param bool past_only: True -> somente passado (multiplica por -1), False: não faz multiplicação
:param any file_type: None -> (((DOES NOTHING))) update self.__get_atual_competencia_file
:param open_excel: if True => OPENS EXCEL FILE
:return: competencia & excel_path
# responsivo, retorna também o caminho e a competencia para a variável PATH de self._files_path_v2
"""
compt = self.get_compt_only(m_cont, y_cont, past_only)
path = self.file_wtp_only1()
# é o mesmo do de cima, mas to tentando
if file_type:
excel_file_path_updated = r'{}/{}.{}'.format(path, compt, file_type)
with open(self.__get_atual_competencia_file(), 'w') as f:
for line in [compt, excel_file_path_updated]:
# print(compt)
f.write(line + '\n')
if open_excel:
from pgdas_fiscal_oesk.main_excel_manager.main_excel_manager import SheetPathManager
spm = SheetPathManager()
spm.new_xlsxcompt_from_padrao_if_not_exists((compt, excel_file_path_updated))
spm.save_after_changes((compt, excel_file_path_updated))
return compt, excel_file_path_updated
return compt
def get_compt_only(self, m_cont=-1, y_cont=0, past_only=True, sep='-'):
from datetime import datetime as dt
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
month = dt.now().month
year = dt.now().year
now_date = date(year, month, 1)
if past_only:
m_cont = m_cont * (-1) if m_cont > 0 else m_cont
y_cont = y_cont * (-1) if y_cont > 0 else y_cont
# force to be negative
now_date = now_date + relativedelta(months=m_cont)
now_date = now_date + relativedelta(years=y_cont)
month, year = now_date.month, now_date.year
compt = f'{month:02d}{sep}{year}'
return compt
# @staticmethod
def get_last_business_day_of_month(self, month=None, year=None):
from calendar import monthrange
from datetime import datetime
if month is None:
month = datetime.now().month
if year is None:
year = datetime.now().year
init = monthrange(year, month)
ultimo_day = init[1]
business_date = datetime(year, month, ultimo_day)
weekday = business_date.weekday()
while weekday > 4:
now_day = business_date.day
business_date = business_date.replace(day=now_day - 1)
weekday = business_date.weekday()
returned = business_date.day
returned -= 1 if month == 12 else returned
return returned
def first_and_last_day_compt(self, sep='/'):
"""
ELE JÁ PEGA O ANTERIOR MAIS PROX
# É necessario o will_be pois antes dele é botado ao contrário
# tipo: 20200430
# ano 2020, mes 04, dia 30... (exemplo)
:return: ÚLTIMO DIA DO MES
"""
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
compt, file_name = self.compt_and_filename()
mes, ano = compt.split('-') if '-' in compt else '/'
mes, ano = int(mes), int(ano)
# - timedelta(days=1)
# + relativedelta(months=1)
last_now = date(ano, mes, 1) + relativedelta(months=1)
last_now -= timedelta(days=1)
first_now = date(ano, mes, 1)
z, a = last_now, first_now
br1st = f'{a.day:02d}{sep}{a.month:02d}{sep}{a.year}'
brlast = f'{z.day:02d}{sep}{z.month:02d}{sep}{z.year}'
print(br1st, brlast)
return br1st, brlast
def _files_path_v3(self, pasta_client, wexplorer_tup=None):
"""
:param pasta_client: client_name
:param wexplorer_tup: the tuple containing the self.compt_and_file_name()
:return: salva_path (save_path)
"""
import os
if wexplorer_tup is None:
compt, excel_file_name = self.compt_and_filename()
else:
compt, excel_file_name = wexplorer_tup
ano = [compt.split(v)[-1] for v in compt if not v.isdigit()]
ano = ano[0]
possible_folders = ['_Dívidas']
# preciso melhorar, deixar mais responsivo talvez, porém, ele já tá contando com dívidas e responsivo com anos
PATH = '/'.join(excel_file_name.split('/')[:-2])
pasta_client = pasta_client.strip()
volta = os.getcwd()
for acp in [PATH, ano, compt, pasta_client]:
try:
os.chdir(acp)
except FileNotFoundError:
os.mkdir(acp)
os.chdir(acp)
salva_path = os.getcwd()
# print(salva_path)
os.chdir(volta)
return salva_path
def _files_path_defis(self, pasta_client, tup_path, ano=None):
"""
:param pasta_client: client name
:param tup_path: like wexplorer_tup
:return:
# THE FUTURE OF THE SOFTWARE
"""
# insyear: inside this year...
import os
from pathlib import Path
if ano is None:
ano = str(self.y())
insyear, excel_file_name = tup_path
defis_path = Path(os.path.dirname(excel_file_name))
defis_path = defis_path.parent
defis_path_final = [defis_path, ano, insyear, pasta_client]
volta = os.getcwd()
for acp in defis_path_final:
try:
os.chdir(acp)
except FileNotFoundError:
os.mkdir(acp)
os.chdir(acp)
salva_path = os.getcwd()
os.chdir(volta)
return salva_path
def mkdir_hoje(self, und_disco, relative_path=None):
"""
:param und_disco: A, B, C, D, E, F, G, H, I, J, K ... ETC
:param relative_path: path/b_path/c_path
:return: folder de hoje criado com o caminho relativo sem ficar sobreposto
"""
date_folder = f'{self.hj()}-{self.m():02d}-{self.y()}'
if len(und_disco) > 1:
print('Digite somente a letra da unidade de disco')
raise AttributeError
if relative_path is not None:
if '/' == relative_path[0] or '/' == relative_path[-1]:
print('Não use "/" nem "\\" em path[0] or path[-1]')
raise AttributeError
res = self.__new_path_set(f'{und_disco}:/{relative_path}/{date_folder}')
else:
res = self.__new_path_set(f'{und_disco}:/{date_folder}')
return res
def move_file(self, where_from, destiny):
"""
:param where_from: where the files come from
:param destiny: where they're going to
:return: File moved from a place[where_from] to another[destiny]
"""
from shutil import move
move(where_from, destiny)
def __new_path_set(self, path=''):
"""
:param path: default path atual (downloads)
:return: Se caminho não criado, ele cria
# até agora chamado somente por mkdir_hoje
"""
import os
volta = os.getcwd()
if '/' in path:
spliter = '/'
elif '\\' in path:
spliter = '\\'
else:
spliter = ''
print(path)
raise AttributeError
try:
und_disco = path.split('/')[0]
except (IndexError, AttributeError) as e:
raise e
else:
os.chdir(und_disco)
pnow = os.getcwd()[:-1]
for folder in path.split(spliter)[1:]:
pnow = f'{pnow}/{folder}'
if not os.path.exists(pnow):
os.mkdir(folder)
os.chdir(folder)
# print('NOTHING went wrong')
os.chdir(volta)
return path
def certif_feito(self, save_path, add=''):
"""
certificado de que está feito
:param save_path: nome da pasta vinda de _files_path_v2
:param add: um adicional no nome do arquivo
:return: caminho+ nome_arquivo jpeg
"""
client_name = save_path[save_path.index('-')-2: save_path.index('-')+2]
type_arquivo = 'png'
try:
save = r'{}\\{}-SimplesNacionalDeclarado.{}'.format(save_path, add, type_arquivo)
print(save, '---------> SAVE')
return save
except FileNotFoundError:
print('NÃO CONSEGUI RETORNAR SAVE')
def unzipe_file(self, full_path, rm_zip=True):
"""
:param full_path: caminho
:param rm_zip: True -> remove zip, False, não remove
:return: arquivo extraído e excluído o | |
"name": "<NAME>",
"hex": "F08080",
"r": 240,
"g": 128,
"b": 128
},
"F0E68C": {
"name": "Khaki",
"hex": "F0E68C",
"r": 240,
"g": 230,
"b": 140
},
"F0F8FF": {
"name": "<NAME>",
"hex": "F0F8FF",
"r": 240,
"g": 248,
"b": 255
},
"F0FFF0": {
"name": "<NAME>",
"hex": "F0FFF0",
"r": 240,
"g": 255,
"b": 240
},
"F0FFFF": {
"name": "Azure",
"hex": "F0FFFF",
"r": 240,
"g": 255,
"b": 255
},
"F4A460": {
"name": "<NAME>",
"hex": "F4A460",
"r": 244,
"g": 164,
"b": 96
},
"F5DEB3": {
"name": "Wheat",
"hex": "F5DEB3",
"r": 245,
"g": 222,
"b": 179
},
"F5F5DC": {
"name": "Beige",
"hex": "F5F5DC",
"r": 245,
"g": 245,
"b": 220
},
"F5F5F5": {
"name": "<NAME>",
"hex": "F5F5F5",
"r": 245,
"g": 245,
"b": 245
},
"F5FFFA": {
"name": "<NAME>",
"hex": "F5FFFA",
"r": 245,
"g": 255,
"b": 250
},
"F8F8FF": {
"name": "<NAME>",
"hex": "F8F8FF",
"r": 248,
"g": 248,
"b": 255
},
"FA8072": {
"name": "Salmon",
"hex": "FA8072",
"r": 250,
"g": 128,
"b": 114
},
"FAEBD7": {
"name": "<NAME>",
"hex": "FAEBD7",
"r": 250,
"g": 235,
"b": 215
},
"FAF0E6": {
"name": "Linen",
"hex": "FAF0E6",
"r": 250,
"g": 240,
"b": 230
},
"FAFAD2": {
"name": "Light Golden Rod Yellow",
"hex": "FAFAD2",
"r": 250,
"g": 250,
"b": 210
},
"FDF5E6": {
"name": "<NAME>",
"hex": "FDF5E6",
"r": 253,
"g": 245,
"b": 230
},
"FF0000": {
"name": "Red",
"hex": "FF0000",
"r": 255,
"g": 0,
"b": 0
},
"FF00FF": {
"name": "Magenta",
"hex": "FF00FF",
"r": 255,
"g": 0,
"b": 255
},
"FF1493": {
"name": "Deep Pink",
"hex": "FF1493",
"r": 255,
"g": 20,
"b": 147
},
"FF4500": {
"name": "Orange Red",
"hex": "FF4500",
"r": 255,
"g": 69,
"b": 0
},
"FF6347": {
"name": "Tomato",
"hex": "FF6347",
"r": 255,
"g": 99,
"b": 71
},
"FF69B4": {
"name": "Hot Pink",
"hex": "FF69B4",
"r": 255,
"g": 105,
"b": 180
},
"FF7F50": {
"name": "Coral",
"hex": "FF7F50",
"r": 255,
"g": 127,
"b": 80
},
"FF8C00": {
"name": "Dark Orange",
"hex": "FF8C00",
"r": 255,
"g": 140,
"b": 0
},
"FFA07A": {
"name": "Light Salmon",
"hex": "FFA07A",
"r": 255,
"g": 160,
"b": 122
},
"FFA500": {
"name": "Orange",
"hex": "FFA500",
"r": 255,
"g": 165,
"b": 0
},
"FFB6C1": {
"name": "<NAME>",
"hex": "FFB6C1",
"r": 255,
"g": 182,
"b": 193
},
"FFC0CB": {
"name": "Pink",
"hex": "FFC0CB",
"r": 255,
"g": 192,
"b": 203
},
"FFD700": {
"name": "Gold",
"hex": "FFD700",
"r": 255,
"g": 215,
"b": 0
},
"FFDAB9": {
"name": "<NAME>",
"hex": "FFDAB9",
"r": 255,
"g": 218,
"b": 185
},
"FFDEAD": {
"name": "<NAME>",
"hex": "FFDEAD",
"r": 255,
"g": 222,
"b": 173
},
"FFE4B5": {
"name": "Moccasin",
"hex": "FFE4B5",
"r": 255,
"g": 228,
"b": 181
},
"FFE4C4": {
"name": "Bisque",
"hex": "FFE4C4",
"r": 255,
"g": 228,
"b": 196
},
"FFE4E1": {
"name": "<NAME>",
"hex": "FFE4E1",
"r": 255,
"g": 228,
"b": 225
},
"FFEBCD": {
"name": "<NAME>",
"hex": "FFEBCD",
"r": 255,
"g": 235,
"b": 205
},
"FFEFD5": {
"name": "Papaya Whip",
"hex": "FFEFD5",
"r": 255,
"g": 239,
"b": 213
},
"FFF0F5": {
"name": "Lavender Blush",
"hex": "FFF0F5",
"r": 255,
"g": 240,
"b": 245
},
"FFF5EE": {
"name": "Sea Shell",
"hex": "FFF5EE",
"r": 255,
"g": 245,
"b": 238
},
"FFF8DC": {
"name": "Cornsilk",
"hex": "FFF8DC",
"r": 255,
"g": 248,
"b": 220
},
"FFFACD": {
"name": "<NAME>",
"hex": "FFFACD",
"r": 255,
"g": 250,
"b": 205
},
"FFFAF0": {
"name": "<NAME>",
"hex": "FFFAF0",
"r": 255,
"g": 250,
"b": 240
},
"FFFAFA": {
"name": "Snow",
"hex": "FFFAFA",
"r": 255,
"g": 250,
"b": 250
},
"FFFF00": {
"name": "Yellow",
"hex": "FFFF00",
"r": 255,
"g": 255,
"b": 0
},
"FFFFE0": {
"name": "Light Yellow",
"hex": "FFFFE0",
"r": 255,
"g": 255,
"b": 224
},
"FFFFF0": {
"name": "Ivory",
"hex": "FFFFF0",
"r": 255,
"g": 255,
"b": 240
},
"FFFFFF": {
"name": "White",
"hex": "FFFFFF",
"r": 255,
"g": 255,
"b": 255
}
}
html_colors_list = [
{
"name": "Midnight Blue",
"hex": "191970",
"r": 25,
"g": 25,
"b": 112
},
{
"name": "<NAME>",
"hex": "663399",
"r": 102,
"g": 51,
"b": 153
},
{
"name": "Dim Gray",
"hex": "696969",
"r": 105,
"g": 105,
"b": 105
},
{
"name": "Slate Gray",
"hex": "708090",
"r": 112,
"g": 128,
"b": 144
},
{
"name": "Light Slate Gray",
"hex": "778899",
"r": 119,
"g": 136,
"b": 153
},
{
"name": "Maroon",
"hex": "800000",
"r": 128,
"g": 0,
"b": 0
},
{
"name": "Purple",
"hex": "800080",
"r": 128,
"g": 0,
"b": 128
},
{
"name": "Olive",
"hex": "808000",
"r": 128,
"g": 128,
"b": 0
},
{
"name": "Gray",
"hex": "808080",
"r": 128,
"g": 128,
"b": 128
},
{
"name": "Black",
"hex": "000000",
"r": 0,
"g": 0,
"b": 0
},
{
"name": "Navy",
"hex": "000080",
"r": 0,
"g": 0,
"b": 128
},
{
"name": "Dark Blue",
"hex": "00008B",
"r": 0,
"g": 0,
"b": 139
},
{
"name": "Medium Blue",
"hex": "0000CD",
"r": 0,
"g": 0,
"b": 205
},
{
"name": "Blue",
"hex": "0000FF",
"r": 0,
"g": 0,
"b": 255
},
{
"name": "Dark Green",
"hex": "006400",
"r": 0,
"g": 100,
"b": 0
},
{
"name": "Green",
"hex": "008000",
"r": 0,
"g": 128,
"b": 0
},
{
"name": "Teal",
"hex": "008080",
"r": 0,
"g": 128,
"b": 128
},
{
"name": "Dark Cyan",
"hex": "008B8B",
"r": 0,
"g": 139,
"b": 139
},
{
"name": "Deep Sky Blue",
"hex": "00BFFF",
"r": 0,
"g": 191,
"b": 255
},
{
"name": "Dark Turquoise",
"hex": "00CED1",
"r": 0,
"g": 206,
"b": 209
},
{
"name": "Medium Spring Green",
"hex": "00FA9A",
"r": 0,
"g": 250,
"b": 154
},
{
"name": "Lime",
"hex": "00FF00",
"r": 0,
"g": 255,
"b": 0
},
{
"name": "Spring Green",
"hex": "00FF7F",
"r": 0,
"g": 255,
"b": 127
},
{
"name": "Cyan",
"hex": "00FFFF",
"r": 0,
"g": 255,
"b": 255
},
{
"name": "Dodger Blue",
"hex": "1E90FF",
"r": 30,
"g": 144,
"b": 255
},
{
"name": "Light Sea Green",
"hex": "20B2AA",
"r": 32,
"g": 178,
"b": 170
},
{
"name": "Forest Green",
"hex": "228B22",
"r": 34,
"g": 139,
"b": 34
},
{
"name": "Sea Green",
"hex": "2E8B57",
"r": 46,
"g": 139,
"b": 87
},
{
"name": "Dark Slate Gray",
"hex": "2F4F4F",
"r": 47,
"g": 79,
"b": 79
},
{
"name": "Lime Green",
"hex": "32CD32",
"r": 50,
"g": 205,
"b": 50
},
{
"name": "Medium Sea Green",
"hex": "3CB371",
"r": 60,
"g": 179,
"b": 113
},
{
"name": "Turquoise",
"hex": "40E0D0",
"r": 64,
"g": 224,
"b": 208
},
{
"name": "Royal Blue",
"hex": "4169E1",
"r": 65,
"g": 105,
"b": 225
},
{
"name": "Steel Blue",
"hex": "4682B4",
"r": 70,
"g": 130,
"b": 180
},
{
"name": "Dark Slate Blue",
"hex": "483D8B",
"r": 72,
"g": 61,
"b": 139
},
{
"name": "Medium Turquoise",
"hex": "48D1CC",
"r": 72,
"g": 209,
"b": 204
},
{
"name": "Indigo",
"hex": "4B0082",
"r": 75,
"g": 0,
"b": 130
},
{
"name": "Dark Olive Green",
"hex": "556B2F",
"r": 85,
"g": 107,
"b": 47
},
{
"name": "Cadet Blue",
"hex": "5F9EA0",
"r": 95,
"g": 158,
"b": 160
},
{
"name": "Cornflower Blue",
"hex": "6495ED",
"r": 100,
"g": 149,
"b": 237
},
{
"name": "Medium Aqua Marine",
"hex": "66CDAA",
"r": 102,
"g": 205,
"b": 170
},
{
"name": "Slate Blue",
"hex": "6A5ACD",
"r": 106,
"g": 90,
"b": 205
},
{
"name": "<NAME>",
"hex": "6B8E23",
"r": 107,
"g": 142,
"b": 35
},
{
"name": "Medium Slate Blue",
"hex": "7B68EE",
"r": 123,
"g": 104,
"b": 238
},
{
"name": "<NAME>",
"hex": "7CFC00",
"r": 124,
"g": 252,
"b": 0
},
{
"name": "Chartreuse",
"hex": "7FFF00",
"r": 127,
"g": 255,
"b": 0
},
{
"name": "Aquamarine",
"hex": "7FFFD4",
"r": 127,
"g": 255,
"b": 212
},
{
"name": "Sky Blue",
"hex": "87CEEB",
"r": 135,
"g": 206,
"b": 235
},
{
"name": "Light Sky Blue",
"hex": "87CEFA",
"r": 135,
"g": 206,
"b": 250
},
{
"name": "Blue Violet",
"hex": "8A2BE2",
"r": 138,
"g": 43,
"b": 226
},
{
"name": "Dark Red",
"hex": "8B0000",
"r": 139,
"g": 0,
"b": 0
},
{
"name": "Dark Magenta",
"hex": "8B008B",
"r": 139,
"g": 0,
"b": 139
},
{
| |
else:
our_move = white_blocks[self.snake.direction][0]
move_direction = self.snake.direction
del(white_blocks[move_direction])
#If there are any other white or blue block moves we add them to current_grid's list in self.decision_points
if len(white_blocks) > 0:
if self.decision_points.has_key(self.snake.current_grid) is False:
self.decision_points[self.snake.current_grid] = []
for direction, grid in white_blocks.iteritems():
self.decision_points[self.snake.current_grid].append(grid[0])
if len(blue_blocks) > 0:
if self.decision_points.has_key(self.snake.current_grid) is False:
self.decision_points[self.snake.current_grid] = []
for grid in blue_blocks:
self.decision_points[self.snake.current_grid].append(grid[0][0])
#Similarly, remove our current move from the list of decision_points if it exists
for decision_point, grid_list in self.decision_points.iteritems():
if our_move in grid_list:
self.decision_points[decision_point].pop(grid_list.index(our_move))
#If that was the last move for this decision point, remove it from the dict
if len(self.decision_points[decision_point]) == 0:
del(self.decision_points[decision_point])
#Make our move and update our direction of travel
self.snake.direction = move_direction
self.snake.moveSnake(our_move)
#Helper functions for pathfinder
def hl_move(self, grid):
old_grid_item = self.snake.game_grid.item(*self.last_hl_grid)
new_grid_item = self.snake.game_grid.item(*grid)
new_grid_item.setBackground(self.snake.grid_item_states[5])
old_grid_item.setBackground(self.snake.grid_item_states[0])
self.last_hl_grid = grid
def go_to_last_decision_point(self):
#Pull the last decision point.
if len(self.decision_points) > 0:
#We must remember to reinsert this data later if there are still more decision points
dp_grid, dpoints = self.decision_points.pop(0)
# print "%s Going back to previous decision point at %s..." % (str(self.snake.current_grid), dp_grid)
#Cut our current path chain just after that point
try:
dp_grid_index = self.current_path_chain.index(dp_grid)
except:
print "==================================="
print "this error again..."
print self.current_path_chain
print self.decision_points
print self.snake.current_grid
print dp_grid
self.quitting = True
print "-------------------------------------"
return
self.current_path_chain = self.current_path_chain[:dp_grid_index+1]
#Take the next decision point or closer if metric is enabled
if self.metric is True:
#For brevity's sake
g2 = self.snake.context.finish_grid
self.snake_direction = sorted(dpoints.items(), key=lambda (_,g1): sqrt(( (g2[0]-g1[0])**2 )+( (g2[1]-g1[1])**2) ))[0][0]
self.snake.current_grid = dpoints.pop(self.snake_direction)
else:
self.snake_direction, self.snake.current_grid = dpoints.popitem()
#Reinsert the data if there are still more decision points
if len(dpoints) > 0:
self.decision_points.insert(0, (dp_grid, dpoints))
else:
#If this was the last possible move in this decision point we go back one step with our safe bookmark
if len(self.decision_points) > 0:
self.last_decision_point = self.decision_points[0]
return True
else:
print "%s No more decision points, hit the end." % str(self.snake.current_grid)
return False
#Only for start-to-finish grids
#Metric=True will use follow the path that gets closer to the finish
#hl mode will show where the algo is currently at by highlighting its current grid
#This slows down the search considerably however
def pathfinder_1(self, metric=False, hlmode=False):
self.snake_direction = "START"
self.successful_paths = []
self.current_path_chain = []
self.decision_points = []
self.max_chain_length = 1000 #How long before we just stop searching this main branch?
self.last_decision_point = None
self.last_hl_grid = self.snake.current_grid
self.metric = metric
print "%s Testing possible paths..." % str(self.snake.current_grid)
while self.quitting is False:
self.current_path_chain.append(self.snake.current_grid)
raw_possible_moves = self.snake.getMoves()
white_blocks = {}
finish_blocks = {}
if len(self.current_path_chain) > self.max_chain_length:
if self.go_to_last_decision_point() is True:
continue
else:
break
for direction, move in raw_possible_moves.iteritems():
if move[0] is not None:
if move[0] in self.current_path_chain:
continue
#Is this the finish?
if move[1] == 6:
finish_blocks[direction] = move[0]
elif move[1] == 0:
white_blocks[direction] = move[0]
#Remove moves that reverse our direction
if len(self.current_path_chain)> 0 and len(white_blocks) > 0:
for direction, grid in white_blocks.iteritems():
if grid == self.current_path_chain[-1]:
del(white_blocks[direction])
break
#We hit a dead end (wall or our own path)
#We just go back to the last decision point and explore that
if (len(white_blocks) == 0 or len(finish_blocks) > 0):
#Did we finish at least?
if len(finish_blocks) > 0:
#DONE!
self.current_path_chain.append(self.snake.current_grid)
self.current_path_chain.append(finish_blocks.values()[0])
if self.current_path_chain not in self.successful_paths:
#See if its any better than our current path
if len(self.successful_paths) > 0 and len(self.current_path_chain) > len(sorted(self.successful_paths, key=lambda path: len(path))[0])-1:
pass
else:
print "%s Found the finish with chain size %s" % (str(self.snake.current_grid), len(self.current_path_chain)-1)
self.successful_paths.append(self.current_path_chain)
#Set a bookmark here as we branch out to find shorter paths
if len(self.decision_points) > 0:
self.last_decision_point = self.decision_points[0]
else:
print "Already found this path? How the heck."
# else:
# print "%s Hit a dead end!" % str(self.snake.current_grid)
#now go back to last decision point
if self.go_to_last_decision_point() is True:
continue
else:
break
#Current path exceeds smallest successful path (if there is one)
elif len(self.successful_paths) > 0 and len(self.current_path_chain) >= len(sorted(self.successful_paths, key=lambda path: len(path))[0])-1:
#print "Current chain length longer than previously successful chains. Stopping search in this branch."
#Need to delete all decision points made since our last bookmark
if len(self.decision_points) > 0 and self.last_decision_point in self.decision_points:
last_dp_index = self.decision_points.index(self.last_decision_point)
self.decision_points = self.decision_points[last_dp_index:]
if self.go_to_last_decision_point() is True:
continue
else:
break
elif metric is True:
g2 = self.snake.finish_grid
self.snake_direction = sorted(white_blocks.items(), key=lambda (_,g1): sqrt(( (g2[0]-g1[0])**2 )+( (g2[1]-g1[1])**2) ))[0][0]
#Starting off or hit a wall
elif self.snake_direction == "START" or white_blocks.has_key(self.snake_direction) is False:
self.snake_direction = white_blocks.keys()[0] # Could instead pull the val that is in our intended direction
# print "%s Changing direction to %s" % (str(self.snake.current_grid), self.snake_direction)
our_move = white_blocks.pop(self.snake_direction)
#if we have any excess possible move choices we create a decision point
if len(white_blocks) > 0:
dp = white_blocks #Might need to copy instead of assign. We'll see how it goes.
self.decision_points.insert(0, (self.snake.current_grid, dp))
self.snake.current_grid = our_move
#If we haven't found the finish, keep setting a bookmark
if len(self.successful_paths) == 0 and len(self.decision_points) > 0:
self.last_decision_point = self.decision_points[0]
if hlmode:
self.hl_move(our_move)
sleep(float(TICKRATE_MS/float(1000)))
if len(self.successful_paths) > 0:
if hlmode: self.hl_move(self.snake.current_grid)
shortest_path = sorted(self.successful_paths, key=lambda chain: len(chain))[0]
self.snake.current_grid = shortest_path.pop(0)
print "Done searching paths, found %s paths total." % len(self.successful_paths)
print "Shortest path was %s in length. Running that path now..." % len(shortest_path)
self.quitting = False
for grid in shortest_path:
if self.quitting is True:
print "Quit called"
break
self.snake.moveSnake(grid)
Qt.QApplication.processEvents()
sleep(float(TICKRATE_MS/float(1000)))
else:
print "Could not find any solutions to the finish starting from %s" % str(self.snake.start_grid)
print "Pathfinder Finished!"
def a_star(self):
#number of paths to test each round
SAMPLE_SIZE = 5
#Going to use a reverse singly linked list with these grid objects
class Grid(object):
def __init__(gself, coords, prev=None):
gself.coords = coords
gself.prev = prev
#Estimate our path cost: f(x) = g(x) + h(x)
#Shortest distance to finish from this grid: h(x)
distance = sqrt( ((gself.coords[0]-self.snake.finish_grid[0])**2) + ((gself.coords[1]-self.snake.finish_grid[1])**2) )
gself.cost = gself.getLength() + distance
def __eq__(gself, othergrid):
if isinstance(othergrid, Grid):
if othergrid.coords == gself.coords: return True
else: return False
elif isinstance(othergrid, tuple):
if othergrid == gself.coords: return True
else: return False
def extend(gself, newgridcoords):
return Grid(newgridcoords, prev=gself)
def getLength(gself):
length = 1
prev = gself.prev
while prev is not None:
prev = prev.prev
length += 1
return length
#Wrapper around Snake.getMoves() to make the code cleaner
def getMoves(gself):
#We dont care about the direction or grid type, just the coords and filter out 'None' moves
self.snake.current_grid = gself.coords
return [grid for grid, _ in self.snake.getMoves().values() if grid is not None]
fringe_set = []
explored_set = []
finish_set = []
start_grid = Grid(self.snake.start_grid)
finish_grid = Grid(self.snake.finish_grid)
#Populate our set of possible path chains from the start grid
fringe_set = [start_grid]
#Loop through our fringe_set until empty and extend the top SAMPLE_SIZE sets with the lowest path cost
while len(fringe_set) > 0 and self.quitting is False:
fringe_set = sorted(fringe_set, key=lambda path: path.cost)
#Slice off the lowest SAMPLE_SIZE # of sets
test_set = fringe_set[:SAMPLE_SIZE]
fringe_set = fringe_set[SAMPLE_SIZE:]
for testgrid in test_set:
if self.quitting is True:
break
#Add this grid's possible moves to the fringe set if we haven't explored them already
for coords in testgrid.getMoves():
if coords in explored_set or coords in fringe_set:
continue
newgrid = Grid(coords, prev=testgrid)
#check if this path is worse than any solutions we currently have
if len(finish_set) > 0 and newgrid.cost > sorted(finish_set, key=lambda p: p.getLength())[0].cost:
continue
if coords == finish_grid.coords:
finish_set.append(newgrid)
finish_length = newgrid.getLength()-1
print "Found finish at size %s" % finish_length
#Filter our any fringe path ends that are already longer than this successful path
fringe_set = list(filter(lambda p: p.getLength() < finish_length, fringe_set))
else:
fringe_set.append(newgrid)
explored_set.append(testgrid)
#A* is finished! Lets show the shortest successful path if we have one.
if len(finish_set) > 0:
shortest_path_end_node = sorted(finish_set, key=lambda path: path.getLength())[0]
shortest_path = [shortest_path_end_node.coords]
prev = shortest_path_end_node.prev
while prev is not None:
shortest_path.insert(0, prev.coords)
prev = prev.prev
self.snake.current_grid = shortest_path.pop(0)
print "A* Finished! Found %s complete paths after exploring %s grids (Efficiency rating with shortest path (%s long): %.2f%%)!" | |
1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
def test_is_monotonic_decreasing(self):
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
# string ordering
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['three', 'two', 'one']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['zenith', 'next', 'mom']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
'nl0000289783', 'lu0197800237',
'gb00b03mlx29']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
def test_is_strictly_monotonic_increasing(self):
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing
assert not idx._is_strictly_monotonic_increasing
def test_is_strictly_monotonic_decreasing(self):
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
def test_reconstruct_sort(self):
# starts off lexsorted & monotonic
mi = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert mi.is_lexsorted()
assert mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert recons.is_lexsorted()
assert recons.is_monotonic
assert mi is recons
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),
('x', 'b'), ('y', 'a'), ('z', 'b')],
names=['one', 'two'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot | |
remove_all_vertices(self):
''' Removes all vertices from the Stroke.
'''
pass
def remove_vertex(self, vertex: 'StrokeVertex'):
''' Removes the StrokeVertex given as argument from the Stroke. The length and curvilinear abscissa are updated consequently.
:param vertex: the StrokeVertex to remove from the Stroke.
:type vertex: 'StrokeVertex'
'''
pass
def resample(self, n: int):
''' resample(sampling) Resamples the stroke so using one of two methods with the goal of creating a stroke with fewer points and the same shape.
:param n: Resamples the stroke so that it eventually has N points. That means it is going to add N-vertices_size, where vertices_size is the number of points we already have. If vertices_size >= N, no resampling is done.
:type n: int
:param sampling: Resamples the stroke with a given sampling value. If the sampling is smaller than the actual sampling value, no resampling is done.
:type sampling: float
'''
pass
def stroke_vertices_begin(self, t: float = 0.0) -> 'StrokeVertexIterator':
''' Returns a StrokeVertexIterator pointing on the first StrokeVertex of the Stroke. One can specify a sampling value to re-sample the Stroke on the fly if needed.
:param t: The resampling value with which we want our Stroke to be resampled. If 0 is specified, no resampling is done.
:type t: float
:rtype: 'StrokeVertexIterator'
:return: A StrokeVertexIterator pointing on the first StrokeVertex.
'''
pass
def stroke_vertices_end(self) -> 'StrokeVertexIterator':
''' Returns a StrokeVertexIterator pointing after the last StrokeVertex of the Stroke.
:rtype: 'StrokeVertexIterator'
:return: A StrokeVertexIterator pointing after the last StrokeVertex.
'''
pass
def stroke_vertices_size(self) -> int:
''' Returns the number of StrokeVertex constituting the Stroke.
:rtype: int
:return: The number of stroke vertices.
'''
pass
def update_length(self):
''' Updates the 2D length of the Stroke.
'''
pass
class StrokeAttribute:
''' Class to define a set of attributes associated with a StrokeVertex . The attribute set stores the color, alpha and thickness values for a Stroke Vertex.
'''
alpha: float = None
''' Alpha component of the stroke color.
:type: float
'''
color: 'mathutils.Color' = None
''' RGB components of the stroke color.
:type: 'mathutils.Color'
'''
thickness: 'mathutils.Vector' = None
''' Right and left components of the stroke thickness. The right (left) component is the thickness on the right (left) of the vertex when following the stroke.
:type: 'mathutils.Vector'
'''
visible: bool = None
''' The visibility flag. True if the StrokeVertex is visible.
:type: bool
'''
def __init__(self):
''' __init__(brother) __init__(red, green, blue, alpha, thickness_right, thickness_left) __init__(attribute1, attribute2, t) Creates a StrokeAttribute object using either a default constructor, copy constructor, overloaded constructor, or and interpolation constructor to interpolate between two StrokeAttribute objects.
:param brother: A StrokeAttribute object to be used as a copy constructor.
:type brother: 'StrokeAttribute'
:param red: Red component of a stroke color.
:type red: float
:param green: Green component of a stroke color.
:type green: float
:param blue: Blue component of a stroke color.
:type blue: float
:param alpha: Alpha component of a stroke color.
:type alpha: float
:param thickness_right: Stroke thickness on the right.
:type thickness_right: float
:param thickness_left: Stroke thickness on the left.
:type thickness_left: float
:param attribute1: The first StrokeAttribute object.
:type attribute1: 'StrokeAttribute'
:param attribute2: The second StrokeAttribute object.
:type attribute2: 'StrokeAttribute'
:param t: The interpolation parameter (0 <= t <= 1).
:type t: float
'''
pass
def get_attribute_real(self, name: str) -> float:
''' Returns an attribute of float type.
:param name: The name of the attribute.
:type name: str
:rtype: float
:return: The attribute value.
'''
pass
def get_attribute_vec2(self, name: str) -> 'mathutils.Vector':
''' Returns an attribute of two-dimensional vector type.
:param name: The name of the attribute.
:type name: str
:rtype: 'mathutils.Vector'
:return: The attribute value.
'''
pass
def get_attribute_vec3(self, name: str) -> 'mathutils.Vector':
''' Returns an attribute of three-dimensional vector type.
:param name: The name of the attribute.
:type name: str
:rtype: 'mathutils.Vector'
:return: The attribute value.
'''
pass
def has_attribute_real(self, name: str) -> bool:
''' Checks whether the attribute name of float type is available.
:param name: The name of the attribute.
:type name: str
:rtype: bool
:return: True if the attribute is available.
'''
pass
def has_attribute_vec2(self, name: str) -> bool:
''' Checks whether the attribute name of two-dimensional vector type is available.
:param name: The name of the attribute.
:type name: str
:rtype: bool
:return: True if the attribute is available.
'''
pass
def has_attribute_vec3(self, name: str) -> bool:
''' Checks whether the attribute name of three-dimensional vector type is available.
:param name: The name of the attribute.
:type name: str
:rtype: bool
:return: True if the attribute is available.
'''
pass
def set_attribute_real(self, name: str, value: float):
''' Adds a user-defined attribute of float type. If there is no attribute of the given name, it is added. Otherwise, the new value replaces the old one.
:param name: The name of the attribute.
:type name: str
:param value: The attribute value.
:type value: float
'''
pass
def set_attribute_vec2(self, name: str,
value: typing.List['mathutils.Vector']):
''' Adds a user-defined attribute of two-dimensional vector type. If there is no attribute of the given name, it is added. Otherwise, the new value replaces the old one.
:param name: The name of the attribute.
:type name: str
:param value: The attribute value.
:type value: typing.List['mathutils.Vector']
'''
pass
def set_attribute_vec3(self, name: str,
value: typing.List['mathutils.Vector']):
''' Adds a user-defined attribute of three-dimensional vector type. If there is no attribute of the given name, it is added. Otherwise, the new value replaces the old one.
:param name: The name of the attribute.
:type name: str
:param value: The attribute value.
:type value: typing.List['mathutils.Vector']
'''
pass
class StrokeShader:
''' Base class for stroke shaders. Any stroke shader must inherit from this class and overload the shade() method. A StrokeShader is designed to modify stroke attributes such as thickness, color, geometry, texture, blending mode, and so on. The basic way for this operation is to iterate over the stroke vertices of the Stroke and to modify the StrokeAttribute of each vertex. Here is a code example of such an iteration:: it = ioStroke.strokeVerticesBegin() while not it.is_end: att = it.object.attribute ## perform here any attribute modification it.increment()
'''
name: str = None
''' The name of the stroke shader.
:type: str
'''
def __init__(self):
''' Default constructor.
'''
pass
def shade(self, stroke: 'Stroke'):
''' The shading method. Must be overloaded by inherited classes.
:param stroke: A Stroke object.
:type stroke: 'Stroke'
'''
pass
class StrokeVertex:
''' Class hierarchy: Interface0D > CurvePoint > StrokeVertex Class to define a stroke vertex.
'''
attribute: 'StrokeAttribute' = None
''' StrokeAttribute for this StrokeVertex.
:type: 'StrokeAttribute'
'''
curvilinear_abscissa: float = None
''' Curvilinear abscissa of this StrokeVertex in the Stroke.
:type: float
'''
point: 'mathutils.Vector' = None
''' 2D point coordinates.
:type: 'mathutils.Vector'
'''
stroke_length: float = None
''' Stroke length (it is only a value retained by the StrokeVertex, and it won't change the real stroke length).
:type: float
'''
u: float = None
''' Curvilinear abscissa of this StrokeVertex in the Stroke.
:type: float
'''
def __init__(self):
''' __init__(brother) __init__(first_vertex, second_vertex, t3d) __init__(point) __init__(svertex) __init__(svertex, attribute) Builds a StrokeVertex using the default constructor, copy constructor, from 2 StrokeVertex and an interpolation parameter, from a CurvePoint, from a SVertex, or a SVertex and a StrokeAttribute object.
:param brother: A StrokeVertex object.
:type brother: 'StrokeVertex'
:param first_vertex: The first StrokeVertex.
:type first_vertex: 'StrokeVertex'
:param second_vertex: The second StrokeVertex.
:type second_vertex: 'StrokeVertex'
:param t3d: An interpolation parameter.
:type t3d: float
:param point: A CurvePoint object.
:type point: 'CurvePoint'
:param svertex: An SVertex object. An SVertex object.
:type svertex: 'SVertex'
:param svertex: An SVertex object. An SVertex object.
:type svertex: 'SVertex'
:param attribute: A StrokeAttribute object.
:type attribute: 'StrokeAttribute'
'''
pass
class StrokeVertexIterator:
''' Class hierarchy: Iterator > StrokeVertexIterator Class defining an iterator designed to iterate over the StrokeVertex of a Stroke . An instance of a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.