hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
89bda989e539c7632adc7d04344b9f91d361ac15 | 24,644 | py | Python | psx/_dump_/10/_dump_ida_/overlay_b/make_psx.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 15 | 2018-06-28T01:11:25.000Z | 2021-09-27T15:57:18.000Z | psx/_dump_/10/_dump_ida_/overlay_b/make_psx.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 7 | 2018-06-29T04:08:23.000Z | 2019-10-17T13:57:22.000Z | psx/_dump_/10/_dump_ida_/overlay_b/make_psx.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 7 | 2018-06-28T01:11:34.000Z | 2020-05-23T09:21:48.000Z | set_name(0x80122CDC, "PreGameOnlyTestRoutine__Fv", SN_NOWARN)
set_name(0x80124DA0, "DRLG_PlaceDoor__Fii", SN_NOWARN)
set_name(0x80125274, "DRLG_L1Shadows__Fv", SN_NOWARN)
set_name(0x8012568C, "DRLG_PlaceMiniSet__FPCUciiiiiii", SN_NOWARN)
set_name(0x80125AF8, "DRLG_L1Floor__Fv", SN_NOWARN)
set_name(0x80125BE4, "StoreBlock__FPiii", SN_NOWARN)
set_name(0x80125C90, "DRLG_L1Pass3__Fv", SN_NOWARN)
set_name(0x80125E44, "DRLG_LoadL1SP__Fv", SN_NOWARN)
set_name(0x80125F20, "DRLG_FreeL1SP__Fv", SN_NOWARN)
set_name(0x80125F50, "DRLG_Init_Globals__Fv", SN_NOWARN)
set_name(0x80125FF4, "set_restore_lighting__Fv", SN_NOWARN)
set_name(0x80126084, "DRLG_InitL1Vals__Fv", SN_NOWARN)
set_name(0x8012608C, "LoadL1Dungeon__FPcii", SN_NOWARN)
set_name(0x80126258, "LoadPreL1Dungeon__FPcii", SN_NOWARN)
set_name(0x80126410, "InitL5Dungeon__Fv", SN_NOWARN)
set_name(0x80126470, "L5ClearFlags__Fv", SN_NOWARN)
set_name(0x801264BC, "L5drawRoom__Fiiii", SN_NOWARN)
set_name(0x80126528, "L5checkRoom__Fiiii", SN_NOWARN)
set_name(0x801265BC, "L5roomGen__Fiiiii", SN_NOWARN)
set_name(0x801268B8, "L5firstRoom__Fv", SN_NOWARN)
set_name(0x80126C74, "L5GetArea__Fv", SN_NOWARN)
set_name(0x80126CD4, "L5makeDungeon__Fv", SN_NOWARN)
set_name(0x80126D60, "L5makeDmt__Fv", SN_NOWARN)
set_name(0x80126E48, "L5HWallOk__Fii", SN_NOWARN)
set_name(0x80126F84, "L5VWallOk__Fii", SN_NOWARN)
set_name(0x801270D0, "L5HorizWall__Fiici", SN_NOWARN)
set_name(0x80127310, "L5VertWall__Fiici", SN_NOWARN)
set_name(0x80127544, "L5AddWall__Fv", SN_NOWARN)
set_name(0x801277B4, "DRLG_L5GChamber__Fiiiiii", SN_NOWARN)
set_name(0x80127A74, "DRLG_L5GHall__Fiiii", SN_NOWARN)
set_name(0x80127B28, "L5tileFix__Fv", SN_NOWARN)
set_name(0x801283EC, "DRLG_L5Subs__Fv", SN_NOWARN)
set_name(0x801285E4, "DRLG_L5SetRoom__Fii", SN_NOWARN)
set_name(0x801286E4, "L5FillChambers__Fv", SN_NOWARN)
set_name(0x80128DD0, "DRLG_L5FTVR__Fiiiii", SN_NOWARN)
set_name(0x80129320, "DRLG_L5FloodTVal__Fv", SN_NOWARN)
set_name(0x80129424, "DRLG_L5TransFix__Fv", SN_NOWARN)
set_name(0x80129634, "DRLG_L5DirtFix__Fv", SN_NOWARN)
set_name(0x80129790, "DRLG_L5CornerFix__Fv", SN_NOWARN)
set_name(0x801298A0, "DRLG_L5__Fi", SN_NOWARN)
set_name(0x80129DC0, "CreateL5Dungeon__FUii", SN_NOWARN)
set_name(0x8012C364, "DRLG_L2PlaceMiniSet__FPUciiiiii", SN_NOWARN)
set_name(0x8012C758, "DRLG_L2PlaceRndSet__FPUci", SN_NOWARN)
set_name(0x8012CA58, "DRLG_L2Subs__Fv", SN_NOWARN)
set_name(0x8012CC4C, "DRLG_L2Shadows__Fv", SN_NOWARN)
set_name(0x8012CE10, "InitDungeon__Fv", SN_NOWARN)
set_name(0x8012CE70, "DRLG_LoadL2SP__Fv", SN_NOWARN)
set_name(0x8012CF10, "DRLG_FreeL2SP__Fv", SN_NOWARN)
set_name(0x8012CF40, "DRLG_L2SetRoom__Fii", SN_NOWARN)
set_name(0x8012D040, "DefineRoom__Fiiiii", SN_NOWARN)
set_name(0x8012D24C, "CreateDoorType__Fii", SN_NOWARN)
set_name(0x8012D330, "PlaceHallExt__Fii", SN_NOWARN)
set_name(0x8012D368, "AddHall__Fiiiii", SN_NOWARN)
set_name(0x8012D440, "CreateRoom__Fiiiiiiiii", SN_NOWARN)
set_name(0x8012DAC8, "GetHall__FPiN40", SN_NOWARN)
set_name(0x8012DB60, "ConnectHall__Fiiiii", SN_NOWARN)
set_name(0x8012E1C8, "DoPatternCheck__Fii", SN_NOWARN)
set_name(0x8012E47C, "L2TileFix__Fv", SN_NOWARN)
set_name(0x8012E5A0, "DL2_Cont__FUcUcUcUc", SN_NOWARN)
set_name(0x8012E620, "DL2_NumNoChar__Fv", SN_NOWARN)
set_name(0x8012E67C, "DL2_DrawRoom__Fiiii", SN_NOWARN)
set_name(0x8012E780, "DL2_KnockWalls__Fiiii", SN_NOWARN)
set_name(0x8012E950, "DL2_FillVoids__Fv", SN_NOWARN)
set_name(0x8012F2D4, "CreateDungeon__Fv", SN_NOWARN)
set_name(0x8012F5E0, "DRLG_L2Pass3__Fv", SN_NOWARN)
set_name(0x8012F778, "DRLG_L2FTVR__Fiiiii", SN_NOWARN)
set_name(0x8012FCC0, "DRLG_L2FloodTVal__Fv", SN_NOWARN)
set_name(0x8012FDC4, "DRLG_L2TransFix__Fv", SN_NOWARN)
set_name(0x8012FFD4, "L2DirtFix__Fv", SN_NOWARN)
set_name(0x80130134, "L2LockoutFix__Fv", SN_NOWARN)
set_name(0x801304C0, "L2DoorFix__Fv", SN_NOWARN)
set_name(0x80130570, "DRLG_L2__Fi", SN_NOWARN)
set_name(0x80130FBC, "DRLG_InitL2Vals__Fv", SN_NOWARN)
set_name(0x80130FC4, "LoadL2Dungeon__FPcii", SN_NOWARN)
set_name(0x801311B4, "LoadPreL2Dungeon__FPcii", SN_NOWARN)
set_name(0x801313A0, "CreateL2Dungeon__FUii", SN_NOWARN)
set_name(0x80131D58, "InitL3Dungeon__Fv", SN_NOWARN)
set_name(0x80131DE0, "DRLG_L3FillRoom__Fiiii", SN_NOWARN)
set_name(0x8013203C, "DRLG_L3CreateBlock__Fiiii", SN_NOWARN)
set_name(0x801322D8, "DRLG_L3FloorArea__Fiiii", SN_NOWARN)
set_name(0x80132340, "DRLG_L3FillDiags__Fv", SN_NOWARN)
set_name(0x80132470, "DRLG_L3FillSingles__Fv", SN_NOWARN)
set_name(0x8013253C, "DRLG_L3FillStraights__Fv", SN_NOWARN)
set_name(0x80132900, "DRLG_L3Edges__Fv", SN_NOWARN)
set_name(0x80132940, "DRLG_L3GetFloorArea__Fv", SN_NOWARN)
set_name(0x80132990, "DRLG_L3MakeMegas__Fv", SN_NOWARN)
set_name(0x80132AD4, "DRLG_L3River__Fv", SN_NOWARN)
set_name(0x80133514, "DRLG_L3SpawnEdge__FiiPi", SN_NOWARN)
set_name(0x801337A0, "DRLG_L3Spawn__FiiPi", SN_NOWARN)
set_name(0x801339B4, "DRLG_L3Pool__Fv", SN_NOWARN)
set_name(0x80133C08, "DRLG_L3PoolFix__Fv", SN_NOWARN)
set_name(0x80133D3C, "DRLG_L3PlaceMiniSet__FPCUciiiiii", SN_NOWARN)
set_name(0x801340BC, "DRLG_L3PlaceRndSet__FPCUci", SN_NOWARN)
set_name(0x80134404, "WoodVertU__Fii", SN_NOWARN)
set_name(0x801344B0, "WoodVertD__Fii", SN_NOWARN)
set_name(0x8013454C, "WoodHorizL__Fii", SN_NOWARN)
set_name(0x801345E0, "WoodHorizR__Fii", SN_NOWARN)
set_name(0x80134664, "AddFenceDoors__Fv", SN_NOWARN)
set_name(0x80134748, "FenceDoorFix__Fv", SN_NOWARN)
set_name(0x8013493C, "DRLG_L3Wood__Fv", SN_NOWARN)
set_name(0x8013512C, "DRLG_L3Anvil__Fv", SN_NOWARN)
set_name(0x80135388, "FixL3Warp__Fv", SN_NOWARN)
set_name(0x80135470, "FixL3HallofHeroes__Fv", SN_NOWARN)
set_name(0x801355C4, "DRLG_L3LockRec__Fii", SN_NOWARN)
set_name(0x80135660, "DRLG_L3Lockout__Fv", SN_NOWARN)
set_name(0x80135720, "DRLG_L3__Fi", SN_NOWARN)
set_name(0x80135E40, "DRLG_L3Pass3__Fv", SN_NOWARN)
set_name(0x80135FE4, "CreateL3Dungeon__FUii", SN_NOWARN)
set_name(0x801360F8, "LoadL3Dungeon__FPcii", SN_NOWARN)
set_name(0x8013631C, "LoadPreL3Dungeon__FPcii", SN_NOWARN)
set_name(0x80138168, "DRLG_L4Shadows__Fv", SN_NOWARN)
set_name(0x8013822C, "InitL4Dungeon__Fv", SN_NOWARN)
set_name(0x801382C8, "DRLG_LoadL4SP__Fv", SN_NOWARN)
set_name(0x8013836C, "DRLG_FreeL4SP__Fv", SN_NOWARN)
set_name(0x80138394, "DRLG_L4SetSPRoom__Fii", SN_NOWARN)
set_name(0x80138494, "L4makeDmt__Fv", SN_NOWARN)
set_name(0x80138538, "L4HWallOk__Fii", SN_NOWARN)
set_name(0x80138688, "L4VWallOk__Fii", SN_NOWARN)
set_name(0x80138804, "L4HorizWall__Fiii", SN_NOWARN)
set_name(0x801389D4, "L4VertWall__Fiii", SN_NOWARN)
set_name(0x80138B9C, "L4AddWall__Fv", SN_NOWARN)
set_name(0x8013907C, "L4tileFix__Fv", SN_NOWARN)
set_name(0x8013B264, "DRLG_L4Subs__Fv", SN_NOWARN)
set_name(0x8013B43C, "L4makeDungeon__Fv", SN_NOWARN)
set_name(0x8013B674, "uShape__Fv", SN_NOWARN)
set_name(0x8013B918, "GetArea__Fv", SN_NOWARN)
set_name(0x8013B974, "L4drawRoom__Fiiii", SN_NOWARN)
set_name(0x8013B9DC, "L4checkRoom__Fiiii", SN_NOWARN)
set_name(0x8013BA78, "L4roomGen__Fiiiii", SN_NOWARN)
set_name(0x8013BD74, "L4firstRoom__Fv", SN_NOWARN)
set_name(0x8013BF90, "L4SaveQuads__Fv", SN_NOWARN)
set_name(0x8013C030, "DRLG_L4SetRoom__FPUcii", SN_NOWARN)
set_name(0x8013C104, "DRLG_LoadDiabQuads__FUc", SN_NOWARN)
set_name(0x8013C268, "DRLG_L4PlaceMiniSet__FPCUciiiiii", SN_NOWARN)
set_name(0x8013C680, "DRLG_L4FTVR__Fiiiii", SN_NOWARN)
set_name(0x8013CBC8, "DRLG_L4FloodTVal__Fv", SN_NOWARN)
set_name(0x8013CCCC, "IsDURWall__Fc", SN_NOWARN)
set_name(0x8013CCFC, "IsDLLWall__Fc", SN_NOWARN)
set_name(0x8013CD2C, "DRLG_L4TransFix__Fv", SN_NOWARN)
set_name(0x8013D084, "DRLG_L4Corners__Fv", SN_NOWARN)
set_name(0x8013D118, "L4FixRim__Fv", SN_NOWARN)
set_name(0x8013D154, "DRLG_L4GeneralFix__Fv", SN_NOWARN)
set_name(0x8013D1F8, "DRLG_L4__Fi", SN_NOWARN)
set_name(0x8013DAF4, "DRLG_L4Pass3__Fv", SN_NOWARN)
set_name(0x8013DC98, "CreateL4Dungeon__FUii", SN_NOWARN)
set_name(0x8013DD28, "ObjIndex__Fii", SN_NOWARN)
set_name(0x8013DDDC, "AddSKingObjs__Fv", SN_NOWARN)
set_name(0x8013DF0C, "AddSChamObjs__Fv", SN_NOWARN)
set_name(0x8013DF88, "AddVileObjs__Fv", SN_NOWARN)
set_name(0x8013E034, "DRLG_SetMapTrans__FPc", SN_NOWARN)
set_name(0x8013E0F8, "LoadSetMap__Fv", SN_NOWARN)
set_name(0x8013E400, "CM_QuestToBitPattern__Fi", SN_NOWARN)
set_name(0x8013E4D0, "CM_ShowMonsterList__Fii", SN_NOWARN)
set_name(0x8013E548, "CM_ChooseMonsterList__FiUl", SN_NOWARN)
set_name(0x8013E5E8, "NoUiListChoose__FiUl", SN_NOWARN)
set_name(0x8013E5F0, "ChooseTask__FP4TASK", SN_NOWARN)
set_name(0x8013E6F8, "ShowTask__FP4TASK", SN_NOWARN)
set_name(0x8013E914, "GetListsAvailable__FiUlPUc", SN_NOWARN)
set_name(0x8013EA38, "GetDown__C4CPad", SN_NOWARN)
set_name(0x8013EA60, "AddL1Door__Fiiii", SN_NOWARN)
set_name(0x8013EB98, "AddSCambBook__Fi", SN_NOWARN)
set_name(0x8013EC38, "AddChest__Fii", SN_NOWARN)
set_name(0x8013EE18, "AddL2Door__Fiiii", SN_NOWARN)
set_name(0x8013EF64, "AddL3Door__Fiiii", SN_NOWARN)
set_name(0x8013EFF8, "AddSarc__Fi", SN_NOWARN)
set_name(0x8013F0D4, "AddFlameTrap__Fi", SN_NOWARN)
set_name(0x8013F130, "AddTrap__Fii", SN_NOWARN)
set_name(0x8013F228, "AddObjLight__Fii", SN_NOWARN)
set_name(0x8013F2D0, "AddBarrel__Fii", SN_NOWARN)
set_name(0x8013F380, "AddShrine__Fi", SN_NOWARN)
set_name(0x8013F4D0, "AddBookcase__Fi", SN_NOWARN)
set_name(0x8013F528, "AddBookstand__Fi", SN_NOWARN)
set_name(0x8013F570, "AddBloodFtn__Fi", SN_NOWARN)
set_name(0x8013F5B8, "AddPurifyingFountain__Fi", SN_NOWARN)
set_name(0x8013F694, "AddArmorStand__Fi", SN_NOWARN)
set_name(0x8013F71C, "AddGoatShrine__Fi", SN_NOWARN)
set_name(0x8013F764, "AddCauldron__Fi", SN_NOWARN)
set_name(0x8013F7AC, "AddMurkyFountain__Fi", SN_NOWARN)
set_name(0x8013F888, "AddTearFountain__Fi", SN_NOWARN)
set_name(0x8013F8D0, "AddDecap__Fi", SN_NOWARN)
set_name(0x8013F94C, "AddVilebook__Fi", SN_NOWARN)
set_name(0x8013F99C, "AddMagicCircle__Fi", SN_NOWARN)
set_name(0x8013FA10, "AddBrnCross__Fi", SN_NOWARN)
set_name(0x8013FA58, "AddPedistal__Fi", SN_NOWARN)
set_name(0x8013FACC, "AddStoryBook__Fi", SN_NOWARN)
set_name(0x8013FC9C, "AddWeaponRack__Fi", SN_NOWARN)
set_name(0x8013FD24, "AddTorturedBody__Fi", SN_NOWARN)
set_name(0x8013FDA0, "AddFlameLvr__Fi", SN_NOWARN)
set_name(0x8013FDE0, "GetRndObjLoc__FiRiT1", SN_NOWARN)
set_name(0x8013FEEC, "AddMushPatch__Fv", SN_NOWARN)
set_name(0x80140010, "AddSlainHero__Fv", SN_NOWARN)
set_name(0x80140050, "RndLocOk__Fii", SN_NOWARN)
set_name(0x80140134, "TrapLocOk__Fii", SN_NOWARN)
set_name(0x8014019C, "RoomLocOk__Fii", SN_NOWARN)
set_name(0x80140234, "InitRndLocObj__Fiii", SN_NOWARN)
set_name(0x801403E0, "InitRndLocBigObj__Fiii", SN_NOWARN)
set_name(0x801405D8, "InitRndLocObj5x5__Fiii", SN_NOWARN)
set_name(0x80140700, "SetMapObjects__FPUcii", SN_NOWARN)
set_name(0x801409A0, "ClrAllObjects__Fv", SN_NOWARN)
set_name(0x80140A90, "AddTortures__Fv", SN_NOWARN)
set_name(0x80140C1C, "AddCandles__Fv", SN_NOWARN)
set_name(0x80140CA4, "AddTrapLine__Fiiii", SN_NOWARN)
set_name(0x80141040, "AddLeverObj__Fiiiiiiii", SN_NOWARN)
set_name(0x80141048, "AddBookLever__Fiiiiiiiii", SN_NOWARN)
set_name(0x8014125C, "InitRndBarrels__Fv", SN_NOWARN)
set_name(0x801413F8, "AddL1Objs__Fiiii", SN_NOWARN)
set_name(0x80141530, "AddL2Objs__Fiiii", SN_NOWARN)
set_name(0x80141644, "AddL3Objs__Fiiii", SN_NOWARN)
set_name(0x80141744, "TorchLocOK__Fii", SN_NOWARN)
set_name(0x80141784, "AddL2Torches__Fv", SN_NOWARN)
set_name(0x80141938, "WallTrapLocOk__Fii", SN_NOWARN)
set_name(0x801419A0, "AddObjTraps__Fv", SN_NOWARN)
set_name(0x80141D18, "AddChestTraps__Fv", SN_NOWARN)
set_name(0x80141E68, "LoadMapObjects__FPUciiiiiii", SN_NOWARN)
set_name(0x80141FD4, "AddDiabObjs__Fv", SN_NOWARN)
set_name(0x80142128, "AddStoryBooks__Fv", SN_NOWARN)
set_name(0x80142278, "AddHookedBodies__Fi", SN_NOWARN)
set_name(0x80142470, "AddL4Goodies__Fv", SN_NOWARN)
set_name(0x80142520, "AddLazStand__Fv", SN_NOWARN)
set_name(0x801426B4, "InitObjects__Fv", SN_NOWARN)
set_name(0x80142D18, "PreObjObjAddSwitch__Fiiii", SN_NOWARN)
set_name(0x80143020, "FillSolidBlockTbls__Fv", SN_NOWARN)
set_name(0x801431CC, "SetDungeonMicros__Fv", SN_NOWARN)
set_name(0x801431D4, "DRLG_InitTrans__Fv", SN_NOWARN)
set_name(0x80143248, "DRLG_MRectTrans__Fiiii", SN_NOWARN)
set_name(0x801432E8, "DRLG_RectTrans__Fiiii", SN_NOWARN)
set_name(0x80143368, "DRLG_CopyTrans__Fiiii", SN_NOWARN)
set_name(0x801433D0, "DRLG_ListTrans__FiPUc", SN_NOWARN)
set_name(0x80143444, "DRLG_AreaTrans__FiPUc", SN_NOWARN)
set_name(0x801434D4, "DRLG_InitSetPC__Fv", SN_NOWARN)
set_name(0x801434EC, "DRLG_SetPC__Fv", SN_NOWARN)
set_name(0x8014359C, "Make_SetPC__Fiiii", SN_NOWARN)
set_name(0x8014363C, "DRLG_WillThemeRoomFit__FiiiiiPiT5", SN_NOWARN)
set_name(0x80143904, "DRLG_CreateThemeRoom__Fi", SN_NOWARN)
set_name(0x8014490C, "DRLG_PlaceThemeRooms__FiiiiUc", SN_NOWARN)
set_name(0x80144BB4, "DRLG_HoldThemeRooms__Fv", SN_NOWARN)
set_name(0x80144D68, "SkipThemeRoom__Fii", SN_NOWARN)
set_name(0x80144E34, "InitLevels__Fv", SN_NOWARN)
set_name(0x80144F38, "TFit_Shrine__Fi", SN_NOWARN)
set_name(0x801451A8, "TFit_Obj5__Fi", SN_NOWARN)
set_name(0x8014537C, "TFit_SkelRoom__Fi", SN_NOWARN)
set_name(0x8014542C, "TFit_GoatShrine__Fi", SN_NOWARN)
set_name(0x801454C4, "CheckThemeObj3__Fiiii", SN_NOWARN)
set_name(0x80145614, "TFit_Obj3__Fi", SN_NOWARN)
set_name(0x801456D4, "CheckThemeReqs__Fi", SN_NOWARN)
set_name(0x801457A0, "SpecialThemeFit__Fii", SN_NOWARN)
set_name(0x8014597C, "CheckThemeRoom__Fi", SN_NOWARN)
set_name(0x80145C28, "InitThemes__Fv", SN_NOWARN)
set_name(0x80145F74, "HoldThemeRooms__Fv", SN_NOWARN)
set_name(0x8014605C, "PlaceThemeMonsts__Fii", SN_NOWARN)
set_name(0x80146200, "Theme_Barrel__Fi", SN_NOWARN)
set_name(0x80146378, "Theme_Shrine__Fi", SN_NOWARN)
set_name(0x80146460, "Theme_MonstPit__Fi", SN_NOWARN)
set_name(0x8014658C, "Theme_SkelRoom__Fi", SN_NOWARN)
set_name(0x80146890, "Theme_Treasure__Fi", SN_NOWARN)
set_name(0x80146AF4, "Theme_Library__Fi", SN_NOWARN)
set_name(0x80146D64, "Theme_Torture__Fi", SN_NOWARN)
set_name(0x80146ED4, "Theme_BloodFountain__Fi", SN_NOWARN)
set_name(0x80146F48, "Theme_Decap__Fi", SN_NOWARN)
set_name(0x801470B8, "Theme_PurifyingFountain__Fi", SN_NOWARN)
set_name(0x8014712C, "Theme_ArmorStand__Fi", SN_NOWARN)
set_name(0x801472C4, "Theme_GoatShrine__Fi", SN_NOWARN)
set_name(0x80147414, "Theme_Cauldron__Fi", SN_NOWARN)
set_name(0x80147488, "Theme_MurkyFountain__Fi", SN_NOWARN)
set_name(0x801474FC, "Theme_TearFountain__Fi", SN_NOWARN)
set_name(0x80147570, "Theme_BrnCross__Fi", SN_NOWARN)
set_name(0x801476E8, "Theme_WeaponRack__Fi", SN_NOWARN)
set_name(0x80147880, "UpdateL4Trans__Fv", SN_NOWARN)
set_name(0x801478E0, "CreateThemeRooms__Fv", SN_NOWARN)
set_name(0x80147AC4, "InitPortals__Fv", SN_NOWARN)
set_name(0x80147B24, "InitQuests__Fv", SN_NOWARN)
set_name(0x80147F28, "DrawButcher__Fv", SN_NOWARN)
set_name(0x80147F6C, "DrawSkelKing__Fiii", SN_NOWARN)
set_name(0x80147FA8, "DrawWarLord__Fii", SN_NOWARN)
set_name(0x801480A4, "DrawSChamber__Fiii", SN_NOWARN)
set_name(0x801481E0, "DrawLTBanner__Fii", SN_NOWARN)
set_name(0x801482BC, "DrawBlind__Fii", SN_NOWARN)
set_name(0x80148398, "DrawBlood__Fii", SN_NOWARN)
set_name(0x80148478, "DRLG_CheckQuests__Fii", SN_NOWARN)
set_name(0x801485B4, "InitInv__Fv", SN_NOWARN)
set_name(0x80148614, "InitAutomap__Fv", SN_NOWARN)
set_name(0x801487D8, "InitAutomapOnce__Fv", SN_NOWARN)
set_name(0x801487E8, "MonstPlace__Fii", SN_NOWARN)
set_name(0x801488A4, "InitMonsterGFX__Fi", SN_NOWARN)
set_name(0x8014897C, "PlaceMonster__Fiiii", SN_NOWARN)
set_name(0x80148A1C, "AddMonsterType__Fii", SN_NOWARN)
set_name(0x80148B18, "GetMonsterTypes__FUl", SN_NOWARN)
set_name(0x80148BC8, "ClrAllMonsters__Fv", SN_NOWARN)
set_name(0x80148D08, "InitLevelMonsters__Fv", SN_NOWARN)
set_name(0x80148D8C, "GetLevelMTypes__Fv", SN_NOWARN)
set_name(0x801491F4, "PlaceQuestMonsters__Fv", SN_NOWARN)
set_name(0x801495B8, "LoadDiabMonsts__Fv", SN_NOWARN)
set_name(0x801496C8, "PlaceGroup__FiiUci", SN_NOWARN)
set_name(0x80149BFC, "SetMapMonsters__FPUcii", SN_NOWARN)
set_name(0x80149E20, "InitMonsters__Fv", SN_NOWARN)
set_name(0x8014A1D0, "PlaceUniqueMonst__Fiii", SN_NOWARN)
set_name(0x8014A93C, "PlaceUniques__Fv", SN_NOWARN)
set_name(0x8014AACC, "PreSpawnSkeleton__Fv", SN_NOWARN)
set_name(0x8014AC0C, "encode_enemy__Fi", SN_NOWARN)
set_name(0x8014AC64, "decode_enemy__Fii", SN_NOWARN)
set_name(0x8014AD7C, "IsGoat__Fi", SN_NOWARN)
set_name(0x8014ADA8, "InitMissiles__Fv", SN_NOWARN)
set_name(0x8014AF80, "InitNoTriggers__Fv", SN_NOWARN)
set_name(0x8014AFA4, "InitTownTriggers__Fv", SN_NOWARN)
set_name(0x8014B304, "InitL1Triggers__Fv", SN_NOWARN)
set_name(0x8014B418, "InitL2Triggers__Fv", SN_NOWARN)
set_name(0x8014B5A8, "InitL3Triggers__Fv", SN_NOWARN)
set_name(0x8014B704, "InitL4Triggers__Fv", SN_NOWARN)
set_name(0x8014B918, "InitSKingTriggers__Fv", SN_NOWARN)
set_name(0x8014B964, "InitSChambTriggers__Fv", SN_NOWARN)
set_name(0x8014B9B0, "InitPWaterTriggers__Fv", SN_NOWARN)
set_name(0x8014B9FC, "InitVPTriggers__Fv", SN_NOWARN)
set_name(0x8014BA48, "InitStores__Fv", SN_NOWARN)
set_name(0x8014BAC8, "SetupTownStores__Fv", SN_NOWARN)
set_name(0x8014BC78, "DeltaLoadLevel__Fv", SN_NOWARN)
set_name(0x8014C46C, "SmithItemOk__Fi", SN_NOWARN)
set_name(0x8014C4D0, "RndSmithItem__Fi", SN_NOWARN)
set_name(0x8014C5DC, "WitchItemOk__Fi", SN_NOWARN)
set_name(0x8014C71C, "RndWitchItem__Fi", SN_NOWARN)
set_name(0x8014C81C, "BubbleSwapItem__FP10ItemStructT0", SN_NOWARN)
set_name(0x8014C900, "SortWitch__Fv", SN_NOWARN)
set_name(0x8014CA20, "RndBoyItem__Fi", SN_NOWARN)
set_name(0x8014CB44, "HealerItemOk__Fi", SN_NOWARN)
set_name(0x8014CCF8, "RndHealerItem__Fi", SN_NOWARN)
set_name(0x8014CDF8, "RecreatePremiumItem__Fiiii", SN_NOWARN)
set_name(0x8014CEC0, "RecreateWitchItem__Fiiii", SN_NOWARN)
set_name(0x8014D018, "RecreateSmithItem__Fiiii", SN_NOWARN)
set_name(0x8014D0B4, "RecreateHealerItem__Fiiii", SN_NOWARN)
set_name(0x8014D174, "RecreateBoyItem__Fiiii", SN_NOWARN)
set_name(0x8014D238, "RecreateTownItem__FiiUsii", SN_NOWARN)
set_name(0x8014D2C4, "SpawnSmith__Fi", SN_NOWARN)
set_name(0x8014D460, "SpawnWitch__Fi", SN_NOWARN)
set_name(0x8014D7CC, "SpawnHealer__Fi", SN_NOWARN)
set_name(0x8014DAE8, "SpawnBoy__Fi", SN_NOWARN)
set_name(0x8014DC3C, "SortSmith__Fv", SN_NOWARN)
set_name(0x8014DD50, "SortHealer__Fv", SN_NOWARN)
set_name(0x8014DE70, "RecreateItem__FiiUsii", SN_NOWARN)
set_name(0x80122D48, "themeLoc", SN_NOWARN)
set_name(0x80123490, "OldBlock", SN_NOWARN)
set_name(0x801234A0, "L5dungeon", SN_NOWARN)
set_name(0x80123130, "SPATS", SN_NOWARN)
set_name(0x80123234, "BSTYPES", SN_NOWARN)
set_name(0x80123304, "L5BTYPES", SN_NOWARN)
set_name(0x801233D4, "STAIRSUP", SN_NOWARN)
set_name(0x801233F8, "L5STAIRSUP", SN_NOWARN)
set_name(0x8012341C, "STAIRSDOWN", SN_NOWARN)
set_name(0x80123438, "LAMPS", SN_NOWARN)
set_name(0x80123444, "PWATERIN", SN_NOWARN)
set_name(0x80122D38, "L5ConvTbl", SN_NOWARN)
set_name(0x8012B6D0, "RoomList", SN_NOWARN)
set_name(0x8012BD24, "predungeon", SN_NOWARN)
set_name(0x80129E60, "Dir_Xadd", SN_NOWARN)
set_name(0x80129E74, "Dir_Yadd", SN_NOWARN)
set_name(0x80129E88, "SPATSL2", SN_NOWARN)
set_name(0x80129E98, "BTYPESL2", SN_NOWARN)
set_name(0x80129F3C, "BSTYPESL2", SN_NOWARN)
set_name(0x80129FE0, "VARCH1", SN_NOWARN)
set_name(0x80129FF4, "VARCH2", SN_NOWARN)
set_name(0x8012A008, "VARCH3", SN_NOWARN)
set_name(0x8012A01C, "VARCH4", SN_NOWARN)
set_name(0x8012A030, "VARCH5", SN_NOWARN)
set_name(0x8012A044, "VARCH6", SN_NOWARN)
set_name(0x8012A058, "VARCH7", SN_NOWARN)
set_name(0x8012A06C, "VARCH8", SN_NOWARN)
set_name(0x8012A080, "VARCH9", SN_NOWARN)
set_name(0x8012A094, "VARCH10", SN_NOWARN)
set_name(0x8012A0A8, "VARCH11", SN_NOWARN)
set_name(0x8012A0BC, "VARCH12", SN_NOWARN)
set_name(0x8012A0D0, "VARCH13", SN_NOWARN)
set_name(0x8012A0E4, "VARCH14", SN_NOWARN)
set_name(0x8012A0F8, "VARCH15", SN_NOWARN)
set_name(0x8012A10C, "VARCH16", SN_NOWARN)
set_name(0x8012A120, "VARCH17", SN_NOWARN)
set_name(0x8012A130, "VARCH18", SN_NOWARN)
set_name(0x8012A140, "VARCH19", SN_NOWARN)
set_name(0x8012A150, "VARCH20", SN_NOWARN)
set_name(0x8012A160, "VARCH21", SN_NOWARN)
set_name(0x8012A170, "VARCH22", SN_NOWARN)
set_name(0x8012A180, "VARCH23", SN_NOWARN)
set_name(0x8012A190, "VARCH24", SN_NOWARN)
set_name(0x8012A1A0, "VARCH25", SN_NOWARN)
set_name(0x8012A1B4, "VARCH26", SN_NOWARN)
set_name(0x8012A1C8, "VARCH27", SN_NOWARN)
set_name(0x8012A1DC, "VARCH28", SN_NOWARN)
set_name(0x8012A1F0, "VARCH29", SN_NOWARN)
set_name(0x8012A204, "VARCH30", SN_NOWARN)
set_name(0x8012A218, "VARCH31", SN_NOWARN)
set_name(0x8012A22C, "VARCH32", SN_NOWARN)
set_name(0x8012A240, "VARCH33", SN_NOWARN)
set_name(0x8012A254, "VARCH34", SN_NOWARN)
set_name(0x8012A268, "VARCH35", SN_NOWARN)
set_name(0x8012A27C, "VARCH36", SN_NOWARN)
set_name(0x8012A290, "VARCH37", SN_NOWARN)
set_name(0x8012A2A4, "VARCH38", SN_NOWARN)
set_name(0x8012A2B8, "VARCH39", SN_NOWARN)
set_name(0x8012A2CC, "VARCH40", SN_NOWARN)
set_name(0x8012A2E0, "HARCH1", SN_NOWARN)
set_name(0x8012A2F0, "HARCH2", SN_NOWARN)
set_name(0x8012A300, "HARCH3", SN_NOWARN)
set_name(0x8012A310, "HARCH4", SN_NOWARN)
set_name(0x8012A320, "HARCH5", SN_NOWARN)
set_name(0x8012A330, "HARCH6", SN_NOWARN)
set_name(0x8012A340, "HARCH7", SN_NOWARN)
set_name(0x8012A350, "HARCH8", SN_NOWARN)
set_name(0x8012A360, "HARCH9", SN_NOWARN)
set_name(0x8012A370, "HARCH10", SN_NOWARN)
set_name(0x8012A380, "HARCH11", SN_NOWARN)
set_name(0x8012A390, "HARCH12", SN_NOWARN)
set_name(0x8012A3A0, "HARCH13", SN_NOWARN)
set_name(0x8012A3B0, "HARCH14", SN_NOWARN)
set_name(0x8012A3C0, "HARCH15", SN_NOWARN)
set_name(0x8012A3D0, "HARCH16", SN_NOWARN)
set_name(0x8012A3E0, "HARCH17", SN_NOWARN)
set_name(0x8012A3F0, "HARCH18", SN_NOWARN)
set_name(0x8012A400, "HARCH19", SN_NOWARN)
set_name(0x8012A410, "HARCH20", SN_NOWARN)
set_name(0x8012A420, "HARCH21", SN_NOWARN)
set_name(0x8012A430, "HARCH22", SN_NOWARN)
set_name(0x8012A440, "HARCH23", SN_NOWARN)
set_name(0x8012A450, "HARCH24", SN_NOWARN)
set_name(0x8012A460, "HARCH25", SN_NOWARN)
set_name(0x8012A470, "HARCH26", SN_NOWARN)
set_name(0x8012A480, "HARCH27", SN_NOWARN)
set_name(0x8012A490, "HARCH28", SN_NOWARN)
set_name(0x8012A4A0, "HARCH29", SN_NOWARN)
set_name(0x8012A4B0, "HARCH30", SN_NOWARN)
set_name(0x8012A4C0, "HARCH31", SN_NOWARN)
set_name(0x8012A4D0, "HARCH32", SN_NOWARN)
set_name(0x8012A4E0, "HARCH33", SN_NOWARN)
set_name(0x8012A4F0, "HARCH34", SN_NOWARN)
set_name(0x8012A500, "HARCH35", SN_NOWARN)
set_name(0x8012A510, "HARCH36", SN_NOWARN)
set_name(0x8012A520, "HARCH37", SN_NOWARN)
set_name(0x8012A530, "HARCH38", SN_NOWARN)
set_name(0x8012A540, "HARCH39", SN_NOWARN)
set_name(0x8012A550, "HARCH40", SN_NOWARN)
set_name(0x8012A560, "USTAIRS", SN_NOWARN)
set_name(0x8012A584, "DSTAIRS", SN_NOWARN)
set_name(0x8012A5A8, "WARPSTAIRS", SN_NOWARN)
set_name(0x8012A5CC, "CRUSHCOL", SN_NOWARN)
set_name(0x8012A5E0, "BIG1", SN_NOWARN)
set_name(0x8012A5EC, "BIG2", SN_NOWARN)
set_name(0x8012A5F8, "BIG5", SN_NOWARN)
set_name(0x8012A604, "BIG8", SN_NOWARN)
set_name(0x8012A610, "BIG9", SN_NOWARN)
set_name(0x8012A61C, "BIG10", SN_NOWARN)
set_name(0x8012A628, "PANCREAS1", SN_NOWARN)
set_name(0x8012A648, "PANCREAS2", SN_NOWARN)
set_name(0x8012A668, "CTRDOOR1", SN_NOWARN)
set_name(0x8012A67C, "CTRDOOR2", SN_NOWARN)
set_name(0x8012A690, "CTRDOOR3", SN_NOWARN)
set_name(0x8012A6A4, "CTRDOOR4", SN_NOWARN)
set_name(0x8012A6B8, "CTRDOOR5", SN_NOWARN)
set_name(0x8012A6CC, "CTRDOOR6", SN_NOWARN)
set_name(0x8012A6E0, "CTRDOOR7", SN_NOWARN)
set_name(0x8012A6F4, "CTRDOOR8", SN_NOWARN)
set_name(0x8012A708, "Patterns", SN_NOWARN)
set_name(0x80131718, "lockout", SN_NOWARN)
set_name(0x80131478, "L3ConvTbl", SN_NOWARN)
set_name(0x80131488, "L3UP", SN_NOWARN)
set_name(0x8013149C, "L3DOWN", SN_NOWARN)
set_name(0x801314B0, "L3HOLDWARP", SN_NOWARN)
set_name(0x801314C4, "L3TITE1", SN_NOWARN)
set_name(0x801314E8, "L3TITE2", SN_NOWARN)
set_name(0x8013150C, "L3TITE3", SN_NOWARN)
set_name(0x80131530, "L3TITE6", SN_NOWARN)
set_name(0x8013155C, "L3TITE7", SN_NOWARN)
set_name(0x80131588, "L3TITE8", SN_NOWARN)
set_name(0x8013159C, "L3TITE9", SN_NOWARN)
set_name(0x801315B0, "L3TITE10", SN_NOWARN)
set_name(0x801315C4, "L3TITE11", SN_NOWARN)
set_name(0x801315D8, "L3ISLE1", SN_NOWARN)
set_name(0x801315E8, "L3ISLE2", SN_NOWARN)
set_name(0x801315F8, "L3ISLE3", SN_NOWARN)
set_name(0x80131608, "L3ISLE4", SN_NOWARN)
set_name(0x80131618, "L3ISLE5", SN_NOWARN)
set_name(0x80131624, "L3ANVIL", SN_NOWARN)
set_name(0x80136534, "dung", SN_NOWARN)
set_name(0x801366C4, "hallok", SN_NOWARN)
set_name(0x801366D8, "L4dungeon", SN_NOWARN)
set_name(0x80137FD8, "L4ConvTbl", SN_NOWARN)
set_name(0x80137FE8, "L4USTAIRS", SN_NOWARN)
set_name(0x80138014, "L4TWARP", SN_NOWARN)
set_name(0x80138040, "L4DSTAIRS", SN_NOWARN)
set_name(0x80138074, "L4PENTA", SN_NOWARN)
set_name(0x801380A8, "L4PENTA2", SN_NOWARN)
set_name(0x801380DC, "L4BTYPES", SN_NOWARN)
| 50.5 | 68 | 0.822147 |
c4312b8b9b3276cd82f195ec746b56497843ecbb | 9,085 | py | Python | route_report.py | cmosig/route-report | 35599caded6b78f665446dfb5cca4df3d8a86dbd | [
"MIT"
] | 5 | 2021-06-02T10:20:36.000Z | 2021-06-26T13:13:35.000Z | route_report.py | cmosig/route-report | 35599caded6b78f665446dfb5cca4df3d8a86dbd | [
"MIT"
] | 3 | 2021-06-02T10:37:49.000Z | 2021-06-03T21:04:11.000Z | route_report.py | cmosig/route-report | 35599caded6b78f665446dfb5cca4df3d8a86dbd | [
"MIT"
] | null | null | null | import osm_tags
import output
import utility as uti
import metadata
import country_detection
import gpxpy
import pandas as pd
import argparse
from termcolor import colored
# ------------------------------------------------------------
# INIT
# ------------------------------------------------------------
def setup_parser():
parser = argparse.ArgumentParser(
description='Finds stuff next to your route.')
# input file
parser.add_argument('-f',
'--input-file',
metavar='route.gpx',
required=True,
type=str,
nargs='?',
help='used to supply your gpx file',
dest="input-file")
# search distance
parser.add_argument(
'-d',
'--search-distance',
metavar='<distance>',
required=False,
type=float,
nargs='?',
help=
"defines approx. search radius around route in kilometers (default=1km)",
dest="search-distance",
default=1)
# list of countries
parser.add_argument(
'-c',
'--country-codes',
metavar='countries',
default="AUTO",
required=False,
type=str,
nargs='?',
help=
"comma separated list of country codes (ISO 3166-1 Alpha-2 --> see Wikipedia), e.g., DE,US,FR (default=AUTO --> autodetection)",
dest="country-codes")
# use cache or not
parser.add_argument(
'-r',
'--redownload-files',
action='store_true',
required=False,
help="""if you want to redownload the large file from the openstreetmap
repository. This does not include processing of the file. Regardless of
this option files will be downloaded automatically if they do not
exist.""",
dest="redownload-files")
# use cache or not
parser.add_argument(
'-m',
'--reprocess-files',
action='store_true',
required=False,
help=
"""if you wat to reprocess the large openstreetmap file into the metadata
file that is used for finding points of interest. Regardless of this
option files will be processed automatically if the processed file does not exist.""",
dest="reprocess-files")
# set output mode
parser.add_argument(
'-o',
'--output-modes',
required=False,
metavar="print|csv|google-sheets|pdf|html-map",
type=str,
default="csv,print,html-map",
help=
"comma separated list of output modes, e.g., print,csv (default=csv,print,html-map)",
dest="output-modes")
# choose points of interest
parser.add_argument(
'-p',
'--points-of-interest',
required=False,
metavar="|".join(osm_tags.get_osm_tag_mapping()
["route-report-group"].drop_duplicates().to_list()),
default="food-shop,water,petrol-station",
type=str,
help=
"""comma separated list of points-of-interest the program is supposed to
look for along the route (default=food-shop,water,petrol-station)""",
dest="points-of-interest")
return vars(parser.parse_args())
# ------------------------------------------------------------
# FILE INTERACTIONS
# ------------------------------------------------------------
def extract_points(filename):
gpx_file = open(filename, 'r')
gpx = gpxpy.parse(gpx_file)
all_points = []
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
all_points.append({
"latitude": point.latitude,
"longitude": point.longitude
})
route = pd.DataFrame(all_points)
# compute distances between points to get about the position/kilometer on
# the route where the point is
route["lat_next"] = route["latitude"].shift(1)
route["long_next"] = route["longitude"].shift(1)
route["diff_distance"] = route.apply(uti.metric_distance_between_latlong,
args=("latitude", "longitude",
"lat_next", "long_next"),
axis=1)
route["cum_distance_km"] = route["diff_distance"].cumsum().apply(int)
return route[["latitude", "longitude", "cum_distance_km"]]
# ------------------------------------------------------------
# MAIN STUFF
# ------------------------------------------------------------
def get_poi(ser):
"""gets points of interested around a given point (lat, long)"""
# TODO performance?
# TODO make nice and less hacky
lati = ser["latitude"]
long = ser["longitude"]
subset = poi[((poi["longitude"] - long).abs() < search_distance)
& ((poi["latitude"] - lati).abs() < search_distance)]
list_of_poi = [(row[1]["id"], row[1]["name"], row[1]["latitude"],
row[1]["longitude"], row[1]["poi_groups"])
for row in subset.iterrows()]
return list_of_poi
def postprocess_route_results(route):
# TODO remove duplicates by distance to each other --> check if there are
# duplicate supermarkets --> saved as way and node
# remove points without poi
route = route[route["poi"].apply(len) != 0]
# first get one pois per line
route = route.explode("poi")
# extract data for pois
route["poi_id"] = route["poi"].str[0]
route["poi_name"] = route["poi"].str[1]
route["poi_lat"] = route["poi"].str[2]
route["poi_long"] = route["poi"].str[3]
route["poi_groups"] = route["poi"].str[4]
del route["poi"]
# compute distance between route point and poi
route["poi_distance_to_route"] = route.apply(
uti.metric_distance_between_latlong,
args=["latitude", "longitude", "poi_lat", "poi_long"],
axis=1)
# if poi is listed multiple times, then keep the one with the closest
# distance to route
route = route.sort_values(by="poi_distance_to_route")
route = route.drop_duplicates(subset=["poi_id"], keep="first")
# sort list by cum. km and pois name
route = route.sort_values(by=["cum_distance_km", "poi_name"])
return route
def main(args):
global search_distance
if args["search-distance"] is None:
uti.log(
colored(
"If you use the -d option you also need to supply a distance!",
"red"))
exit()
search_distance = uti.convert_km_to_latlong(args["search-distance"])
# processing the gpx file
uti.log("reading and preprocessing route...", expect_more=True)
route = extract_points(args["input-file"])
orignal_route = route.copy(deep=True)
uti.log("DONE", append=True)
# detecting countries on route
if args["country-codes"] == "AUTO":
uti.log("detecting countries on route...", expect_more=True)
country_codes = country_detection.detect_country_for_points(
route[["latitude", "longitude"]])
uti.log(",".join(country_codes) + "...", append=True, expect_more=True)
uti.log("DONE", append=True)
else:
country_codes = args["country-codes"].split(',')
# downlaod metadata files if necessary
metadata.download_and_preprocess_metadata(country_codes,
args["redownload-files"],
args["reprocess-files"])
# read in metadata
uti.log("reading metadata...", expect_more=True)
global poi
poi = metadata.read_metadata(country_codes)
# only take poi the user wants
points_of_interest_group_filter = set(
args["points-of-interest"].split(','))
poi = poi[poi["poi_groups"].apply(lambda groups: any(
[group in points_of_interest_group_filter for group in groups]))]
uti.log("DONE", append=True)
# get the poi
uti.log("searching for points of interest...", expect_more=True)
route["poi"] = route.apply(get_poi, axis=1)
# the approach below using polygons is only twice as fast and still requires postprocessing and matching with route
# create route and add search distance area around it
# route_polygon = LineString(map(Point,zip(route["latitude"], route["longitude"]))).buffer(search_distance)
# filter poi that are in polygon
# poi = poi[poi[["latitude", "longitude"]].apply(lambda point: route_polygon.contains(Point(tuple(point))), axis=1)]
route = postprocess_route_results(route)
uti.log("DONE", append=True)
return route, orignal_route
if __name__ == "__main__":
args = setup_parser()
route_with_shops, orignal_route = main(args)
output.output_results(route_with_shops,
orignal_route,
modes=args["output-modes"].split(','),
original_filename=args["input-file"].replace(
".gpx", "").split('/')[-1])
| 35.07722 | 136 | 0.574353 |
932ee2092e9077898e421cc7f9287f8342e8ca96 | 2,111 | py | Python | iridium/inveniordm/models/base.py | chriz-uniba/iridium | 4d357dc9d61aebfedd3c3e6a6b6451798c2c7122 | [
"MIT"
] | 2 | 2022-01-21T14:00:31.000Z | 2022-03-29T13:47:20.000Z | iridium/inveniordm/models/base.py | chriz-uniba/iridium | 4d357dc9d61aebfedd3c3e6a6b6451798c2c7122 | [
"MIT"
] | 8 | 2022-01-21T10:18:09.000Z | 2022-03-25T13:11:21.000Z | iridium/inveniordm/models/base.py | chriz-uniba/iridium | 4d357dc9d61aebfedd3c3e6a6b6451798c2c7122 | [
"MIT"
] | 2 | 2022-02-15T16:48:38.000Z | 2022-02-16T14:58:24.000Z | """Modified base model with enhanced pretty-printing."""
import json
from typing import cast
from pydantic import BaseModel
from ...pprint import pp
class JSONModel(BaseModel):
"""
Subclass adding additional features to pydantic BaseModel for API responses.
Models deriving from this variant:
* automatically are pretty-printed as JSON (for user convenience)
* can have read-only attributes declared that prevent direct setting
* can be toggled to return original, raw JSON dict (for debugging)
Only use this for parsing JSON responses from API requests!
Otherwise these enhancements might lead to unintended consequences.
"""
@property
def _read_only(self):
return []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __setattr__(self, key, value):
if key in self._read_only:
raise ValueError(f"'{key}' is a read-only attribute!")
super().__setattr__(key, value)
_raw_json: bool = False
@classmethod
def raw_json(cls, val: bool):
cls._raw_json = val
def __repr__(self) -> str:
"""
Pretty-printed appropriate representation of JSON-based objects.
In normal circumstances, this should be __str__ instead, because __repr__
is supposed to REPRoduce the object, i.e. be a Python expression yielding the
object.
But in our case the distinction between "user" and "developer" is not that
clear-cut and as users will use this in a Python interpreter context,
making this __repr__ seems to be the lesser evil for enhanced usability.
"""
return pp(json.loads(self.json(exclude_none=True)))
@classmethod
def parse_obj(cls, val, *args, **kwargs):
"""
If _raw_json is set, return back the raw JSON dict instead of parsed object.
NOTE: This is a DEBUGGING HACK and should only be used as such!
"""
if cls._raw_json:
return cast(cls, val)
else:
return cast(cls, super().parse_obj(val, *args, **kwargs))
| 31.507463 | 85 | 0.663667 |
d1d29a0c496b91cbd883e24adeb75d8e67249bff | 1,100 | py | Python | jobs/types/feedback_validation_errors.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 2 | 2021-05-24T10:23:32.000Z | 2021-08-22T18:50:14.000Z | jobs/types/feedback_validation_errors.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 11 | 2021-03-03T07:21:27.000Z | 2022-03-12T01:03:44.000Z | jobs/types/feedback_validation_errors.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 1 | 2017-12-06T19:41:49.000Z | 2017-12-06T19:41:49.000Z | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error classes for feedback model audits."""
from __future__ import absolute_import
from __future__ import unicode_literals
from jobs.types import base_validation_errors
class InvalidEntityTypeError(base_validation_errors.BaseAuditError):
"""Error class for models that have invalid entity type."""
def __init__(self, model):
message = 'entity type %s is invalid.' % model.entity_type
super(InvalidEntityTypeError, self).__init__(message, model)
| 35.483871 | 74 | 0.761818 |
4066e3b597675f9403e717731bdd050d5683c108 | 1,257 | py | Python | core/logic.py | tfagerlind/verso | c2ecbce47180396e640e97450ee72aa034f704b0 | [
"MIT"
] | null | null | null | core/logic.py | tfagerlind/verso | c2ecbce47180396e640e97450ee72aa034f704b0 | [
"MIT"
] | 3 | 2022-01-21T21:52:52.000Z | 2022-01-21T22:03:26.000Z | core/logic.py | tfagerlind/verso | c2ecbce47180396e640e97450ee72aa034f704b0 | [
"MIT"
] | null | null | null | """Provides the main logic of the application"""
import logging
import semver
logger = logging.getLogger()
def get_current_version(tags):
"""Get the current version.
Args:
tags(list): list of git tags
Returns:
string: A version that corresponds to the tag that represents
the last version.
"""
versions = [tag[1:] for tag in tags]
valid_versions = [version
for version
in versions
if semver.VersionInfo.isvalid(version)]
semver_versions = [semver.VersionInfo.parse(version)
for version
in valid_versions]
return str(max(semver_versions)) if semver_versions else "0.0.0"
def get_next_version(tags):
"""Get the next version."""
versions = [tag[1:] for tag in tags]
valid_versions = [version
for version
in versions
if semver.VersionInfo.isvalid(version)]
semver_versions = [semver.VersionInfo.parse(version)
for version
in valid_versions]
return (str(max(semver_versions).bump_patch())
if semver_versions
else "0.1.0")
| 29.232558 | 69 | 0.568019 |
13820e0f0290b16fcc6bb034e6e8b87cd7825b40 | 4,116 | py | Python | test/system/anarchism/molecule/default/tests/test_testaid_system_anarchism_project_templates.py | RebelCodeBase/testaid | 998c827b826fe4374ecf0a234fef61a975e2fcd7 | [
"Apache-2.0"
] | 17 | 2019-08-04T09:29:19.000Z | 2020-05-16T02:25:20.000Z | test/system/anarchism/molecule/default/tests/test_testaid_system_anarchism_project_templates.py | RebelCodeBase/testaid | 998c827b826fe4374ecf0a234fef61a975e2fcd7 | [
"Apache-2.0"
] | 12 | 2019-07-19T22:20:42.000Z | 2020-01-20T06:45:38.000Z | test/system/anarchism/molecule/default/tests/test_testaid_system_anarchism_project_templates.py | RebelCodeBase/testaid | 998c827b826fe4374ecf0a234fef61a975e2fcd7 | [
"Apache-2.0"
] | 3 | 2019-08-08T18:18:13.000Z | 2019-10-07T13:46:03.000Z | import json
import testaid
testinfra_hosts = testaid.hosts()
def test_testaid_system_templates_resolve_template_string(
host,
testvars):
assert testvars['project_my_var_1'] == 'my_string_1'
def test_testaid_system_templates_resolve_template_string_reference(
host,
testvars):
assert testvars['project_template1'] == 'my_string_1'
def test_testaid_system_templates_resolve_template_string_twice(
host,
testvars):
assert testvars['project_template2'] == 'my_string_1'
def test_testaid_system_templates_resolve_template_string_transitive(
host,
testvars):
assert testvars['project_template3'] == 'my_string_1'
def test_testaid_system_templates_resolve_template_string_inline_front(
host,
testvars):
assert testvars['project_template4'] == 'inline+my_string_1'
def test_testaid_system_templates_resolve_template_string_inline_back(
host,
testvars):
assert testvars['project_template5'] == 'my_string_1+inline'
def test_testaid_system_templates_resolve_template_string_inline_both(
host,
testvars):
assert testvars['project_template6'] == 'inline+my_string_1+inline'
def test_testaid_system_templates_resolve_template_no_string(
host,
testvars):
assert testvars['project_my_var_2'] == 99
def test_testaid_system_templates_resolve_template_no_string_reference(
host,
testvars):
assert testvars['project_template7'] == 99
def test_testaid_system_templates_resolve_template_no_string_transitive(
host,
testvars):
# FIXME: Why is this suddenly a string?
assert testvars['project_template8'] == '99'
def test_testaid_system_templates_resolve_template_no_string_inline_front(
host,
testvars):
assert testvars['project_template9'] == 'inline+99'
def test_testaid_system_templates_resolve_template_no_string_inline_back(
host,
testvars):
assert testvars['project_template10'] == '99+inline'
def test_testaid_system_templates_resolve_template_no_string_inline_both(
host,
testvars):
assert testvars['project_template11'] == 'inline+99+inline'
def test_testaid_system_templates_resolve_template_special_chars_1(
host,
testvars):
assert testvars['project_special1'] == "äö(ü'!)§$;~é"
def test_testaid_system_templates_resolve_template_special_chars_2(
host,
testvars):
assert testvars['project_special2'] == 'ñô‰(„}»")¯]¿¬'
def test_testaid_system_template_resolve_lookup(
host,
testvars):
assert testvars['project_lookup_flattened'] == [1, 2, 3, 4, 5, 6]
def test_testaid_system_templates_resolve_template_list(
host,
testvars):
list1_json = '["first_list_item", "second_list_item"]'
assert json.dumps(testvars['project_list1']) == list1_json
def test_testaid_system_templates_resolve_template_nested_list(
host,
testvars):
list2_json = '["first_list_item", "second_list_item"]'
assert isinstance(testvars['project_list2'], list)
assert json.dumps(testvars['project_list2']) == list2_json
def test_testaid_system_templates_resolve_template_dict(
host,
testvars):
dict1_json = '{"first_key": "first_value", "second_key": "second_value"}'
assert json.dumps(testvars['project_dict1']) == dict1_json
def test_testaid_system_templates_resolve_template_filter_zip(
host,
testvars):
filter_zip_json = '[["first_list_item", "anarchism"], '
filter_zip_json += '["second_list_item", "fortune-anarchism"]]'
assert json.dumps(testvars['project_filter_zip']) == filter_zip_json
def test_testaid_system_templates_resolve_template_filter_dict2items(
host,
testvars):
filter_dict_json = '[{"key": "first_key", '
filter_dict_json += '"value": "first_value"}, '
filter_dict_json += '{"key": "second_key", '
filter_dict_json += '"value": "second_value"}]'
assert json.dumps(testvars['project_filter_dict2items']) == \
filter_dict_json
| 28.783217 | 77 | 0.719631 |
163064019a865a44ee4212a77f525a36189841fe | 563 | py | Python | adapters/innr/SP120.py | lily148/domoticz-zigbee2mqtt-plugin | 5d8b8121b0a86d341ca054df68cb26697e023af9 | [
"MIT"
] | null | null | null | adapters/innr/SP120.py | lily148/domoticz-zigbee2mqtt-plugin | 5d8b8121b0a86d341ca054df68cb26697e023af9 | [
"MIT"
] | null | null | null | adapters/innr/SP120.py | lily148/domoticz-zigbee2mqtt-plugin | 5d8b8121b0a86d341ca054df68cb26697e023af9 | [
"MIT"
] | null | null | null | from adapters.on_off_switch_adapter import OnOffSwitchAdapter
from devices.sensor.current import CurrentSensor
from devices.sensor.voltage import VoltageSensor
from devices.sensor.kwh import KwhSensor
class InnrSP120Plug(OnOffSwitchAdapter):
def __init__(self, devices):
super().__init__(devices)
self.devices.append(VoltageSensor(devices, 'volt', 'voltage', ' (Voltage)'))
self.devices.append(CurrentSensor(devices, 'ampere', 'current', ' (Current)'))
self.devices.append(KwhSensor(devices, 'power', ['power'], ' (Power)'))
| 43.307692 | 86 | 0.738899 |
a1ca0e5b229e00901396d5e239fe4ddba417f443 | 7,812 | py | Python | pymodbus/utilities.py | vmacari/pymodbus | ec97e2f2b50c6db0a932f44e550a5dee60bf0970 | [
"BSD-3-Clause"
] | null | null | null | pymodbus/utilities.py | vmacari/pymodbus | ec97e2f2b50c6db0a932f44e550a5dee60bf0970 | [
"BSD-3-Clause"
] | null | null | null | pymodbus/utilities.py | vmacari/pymodbus | ec97e2f2b50c6db0a932f44e550a5dee60bf0970 | [
"BSD-3-Clause"
] | null | null | null | """
Modbus Utilities
-----------------
A collection of utilities for packing data, unpacking
data computing checksums, and decode checksums.
"""
from pymodbus.compat import int2byte, byte2int, IS_PYTHON3
from six import string_types
import logging
_logger = logging.getLogger(__name__)
class ModbusTransactionState(object):
"""
Modbus Client States
"""
IDLE = 0
SENDING = 1
WAITING_FOR_REPLY = 2
WAITING_TURNAROUND_DELAY = 3
PROCESSING_REPLY = 4
PROCESSING_ERROR = 5
TRANSACTION_COMPLETE = 6
RETRYING = 7
NO_RESPONSE_STATE = 8
@classmethod
def to_string(cls, state):
states = {
ModbusTransactionState.IDLE: "IDLE",
ModbusTransactionState.SENDING: "SENDING",
ModbusTransactionState.WAITING_FOR_REPLY: "WAITING_FOR_REPLY",
ModbusTransactionState.WAITING_TURNAROUND_DELAY: "WAITING_TURNAROUND_DELAY",
ModbusTransactionState.PROCESSING_REPLY: "PROCESSING_REPLY",
ModbusTransactionState.PROCESSING_ERROR: "PROCESSING_ERROR",
ModbusTransactionState.TRANSACTION_COMPLETE: "TRANSACTION_COMPLETE",
ModbusTransactionState.RETRYING: "RETRYING TRANSACTION",
}
return states.get(state, None)
# --------------------------------------------------------------------------- #
# Helpers
# --------------------------------------------------------------------------- #
def default(value):
"""
Given a python object, return the default value
of that object.
:param value: The value to get the default of
:returns: The default value
"""
return type(value)()
def dict_property(store, index):
""" Helper to create class properties from a dictionary.
Basically this allows you to remove a lot of possible
boilerplate code.
:param store: The store store to pull from
:param index: The index into the store to close over
:returns: An initialized property set
"""
if hasattr(store, '__call__'):
getter = lambda self: store(self)[index]
setter = lambda self, value: store(self).__setitem__(index, value)
elif isinstance(store, str):
getter = lambda self: self.__getattribute__(store)[index]
setter = lambda self, value: self.__getattribute__(store).__setitem__(
index, value)
else:
getter = lambda self: store[index]
setter = lambda self, value: store.__setitem__(index, value)
return property(getter, setter)
# --------------------------------------------------------------------------- #
# Bit packing functions
# --------------------------------------------------------------------------- #
def pack_bitstring(bits):
""" Creates a string out of an array of bits
:param bits: A bit array
example::
bits = [False, True, False, True]
result = pack_bitstring(bits)
"""
ret = b''
i = packed = 0
for bit in bits:
if bit:
packed += 128
i += 1
if i == 8:
ret += int2byte(packed)
i = packed = 0
else:
packed >>= 1
if 0 < i < 8:
packed >>= (7 - i)
ret += int2byte(packed)
return ret
def unpack_bitstring(string):
""" Creates bit array out of a string
:param string: The modbus data packet to decode
example::
bytes = 'bytes to decode'
result = unpack_bitstring(bytes)
"""
byte_count = len(string)
bits = []
for byte in range(byte_count):
if IS_PYTHON3:
value = byte2int(int(string[byte]))
else:
value = byte2int(string[byte])
for _ in range(8):
bits.append((value & 1) == 1)
value >>= 1
return bits
def make_byte_string(s):
"""
Returns byte string from a given string, python3 specific fix
:param s:
:return:
"""
if IS_PYTHON3 and isinstance(s, string_types):
s = s.encode()
return s
# --------------------------------------------------------------------------- #
# Error Detection Functions
# --------------------------------------------------------------------------- #
def __generate_crc16_table():
""" Generates a crc16 lookup table
.. note:: This will only be generated once
"""
result = []
for byte in range(256):
crc = 0x0000
for _ in range(8):
if (byte ^ crc) & 0x0001:
crc = (crc >> 1) ^ 0xa001
else: crc >>= 1
byte >>= 1
result.append(crc)
return result
__crc16_table = __generate_crc16_table()
def computeCRC(data):
""" Computes a crc16 on the passed in string. For modbus,
this is only used on the binary serial protocols (in this
case RTU).
The difference between modbus's crc16 and a normal crc16
is that modbus starts the crc value out at 0xffff.
:param data: The data to create a crc16 of
:returns: The calculated CRC
"""
crc = 0xffff
for a in data:
idx = __crc16_table[(crc ^ byte2int(a)) & 0xff]
crc = ((crc >> 8) & 0xff) ^ idx
return ((crc << 8) & 0xff00) | ((crc >> 8) & 0x00ff)
def checkCRC(data, check):
""" Checks if the data matches the passed in CRC
:param data: The data to create a crc16 of
:param check: The CRC to validate
:returns: True if matched, False otherwise
"""
calculated_crc = computeCRC(data)
_logger.debug(f"Calculated CRC {hex(calculated_crc)}, expected {hex(check)}")
return calculated_crc == check
def computeLRC(data):
""" Used to compute the longitudinal redundancy check
against a string. This is only used on the serial ASCII
modbus protocol. A full description of this implementation
can be found in appendex B of the serial line modbus description.
:param data: The data to apply a lrc to
:returns: The calculated LRC
"""
lrc = sum(byte2int(a) for a in data) & 0xff
lrc = (lrc ^ 0xff) + 1
return lrc & 0xff
def checkLRC(data, check):
""" Checks if the passed in data matches the LRC
:param data: The data to calculate
:param check: The LRC to validate
:returns: True if matched, False otherwise
"""
return computeLRC(data) == check
def rtuFrameSize(data, byte_count_pos):
""" Calculates the size of the frame based on the byte count.
:param data: The buffer containing the frame.
:param byte_count_pos: The index of the byte count in the buffer.
:returns: The size of the frame.
The structure of frames with a byte count field is always the
same:
- first, there are some header fields
- then the byte count field
- then as many data bytes as indicated by the byte count,
- finally the CRC (two bytes).
To calculate the frame size, it is therefore sufficient to extract
the contents of the byte count field, add the position of this
field, and finally increment the sum by three (one byte for the
byte count field, two for the CRC).
"""
# slave_id + fucntion_code + 2 CRCs
return byte2int(len(data) - 2 - 2)
#return byte2int(data[byte_count_pos]) + byte_count_pos + 3
def hexlify_packets(packet):
"""
Returns hex representation of bytestring received
:param packet:
:return:
"""
if not packet:
return ''
if IS_PYTHON3:
return " ".join([hex(byte2int(x)) for x in packet])
else:
return u" ".join([hex(byte2int(x)) for x in packet])
# --------------------------------------------------------------------------- #
# Exported symbols
# --------------------------------------------------------------------------- #
__all__ = [
'pack_bitstring', 'unpack_bitstring', 'default',
'computeCRC', 'checkCRC', 'computeLRC', 'checkLRC', 'rtuFrameSize'
]
| 29.258427 | 88 | 0.584357 |
76a11a27553fb8227bce6ed35f26e14a0df47d01 | 2,256 | py | Python | utils.py | asakko/covid-vaccination-monitor | cce99652958842eb57f5b6a42d8d9dc94f068dc6 | [
"MIT"
] | null | null | null | utils.py | asakko/covid-vaccination-monitor | cce99652958842eb57f5b6a42d8d9dc94f068dc6 | [
"MIT"
] | null | null | null | utils.py | asakko/covid-vaccination-monitor | cce99652958842eb57f5b6a42d8d9dc94f068dc6 | [
"MIT"
] | null | null | null | import json
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
import requests
import sys
import warnings
from collections import OrderedDict
from datetime import datetime, timezone
from io import StringIO
def load_data():
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv'
r = requests.get(url, allow_redirects=True)
df = pd.read_csv(StringIO(r.content.decode("utf-8")), low_memory=False, parse_dates=['date'])[['location', 'date', 'total_vaccinations_per_hundred']]
return df
def plot_chart(ax, df, cmap, m:int = 30):
n = len(df.date.unique())
min_date, max_date = df.date.min(), df.date.max()
days = (max_date-min_date).days
delta = 1+days//m
dates = [min_date+pd.Timedelta(days=i*delta) for i in range(1+days//delta)]
for loc, color in cmap.items():
ax.plot('date', 'total_vaccinations_per_hundred', data=df[(df.location==loc) & (~df.total_vaccinations_per_hundred.isna())].sort_values('date'), marker='o', color=color, label=loc)
ax.set_ylabel('Vaccinated [%]')
ax.legend(loc=2)
ax.set_xticks(dates)
ax.set_xticklabels(dates)
ax.tick_params(axis='x', labelrotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
main_country_vs_color = OrderedDict({
'Israel': '#1A85FF',
'Bahrain': '#FEFE62',
'United Kingdom': '#40B0A6',
'United States': '#4B0092',
'Canada': '#D35FB7',
'China': '#DC3220',
'Russia': '#994F00',
'European Union': '#E66100',
'Japan': '#E1BE6A',
'Australia': '#000000',
'India': '#D35FB7'
})
european_country_vs_color = OrderedDict({
'Austria': '#000000',
'Bulgaria': '#004949',
'Croatia': '#009292',
'Denmark': '#ff6db6',
'Estonia': '#ffb6db',
'France': '#490092',
'Germany': '#006ddb',
'Greece': '#b66dff',
'Hungary': '#6db6ff',
'Italy': '#b6dbff',
'Latvia': '#920000',
'Lithuania': '#924900',
'Luxembourg': '#db6d00',
'Poland': '#24ff24',
'Portugal': '#ffff6d',
'Romania': '#000000'
}) | 33.176471 | 188 | 0.612145 |
32717b0f34ce8f25426fa5c1e9d84844a6f2effa | 1,216 | py | Python | electrum_dsv/gui/kivy/uix/dialogs/qr_scanner.py | mboyd1/electrum-dsv | 1f8e26e6f6a50827fd83dfe018c5916fadde10c1 | [
"MIT"
] | null | null | null | electrum_dsv/gui/kivy/uix/dialogs/qr_scanner.py | mboyd1/electrum-dsv | 1f8e26e6f6a50827fd83dfe018c5916fadde10c1 | [
"MIT"
] | null | null | null | electrum_dsv/gui/kivy/uix/dialogs/qr_scanner.py | mboyd1/electrum-dsv | 1f8e26e6f6a50827fd83dfe018c5916fadde10c1 | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.factory import Factory
from kivy.lang import Builder
Factory.register('QRScanner', module='electrum_dsv.gui.kivy.qr_scanner')
class QrScannerDialog(Factory.AnimatedPopup):
__events__ = ('on_complete', )
def on_symbols(self, instance, value):
instance.stop()
self.dismiss()
data = value[0].data
self.dispatch('on_complete', data)
def on_complete(self, x):
''' Default Handler for on_complete event.
'''
print(x)
Builder.load_string('''
#:import KIVY_GUI_PATH electrum_dsv.gui.kivy.KIVY_GUI_PATH
<QrScannerDialog>
title:
_(\
'[size=18dp]Hold your QRCode up to the camera[/size][size=7dp]\\n[/size]')
title_size: '24sp'
border: 7, 7, 7, 7
size_hint: None, None
size: '340dp', '290dp'
pos_hint: {'center_y': .53}
#separator_color: .89, .89, .89, 1
#separator_height: '1.2dp'
#title_color: .437, .437, .437, 1
#background: f'atlas://{KIVY_GUI_PATH}/theming/light/dialog'
on_activate:
qrscr.start()
qrscr.size = self.size
on_deactivate: qrscr.stop()
QRScanner:
id: qrscr
on_symbols: root.on_symbols(*args)
''')
| 25.87234 | 82 | 0.635691 |
f0a60b0e5f1343a97320ef8e18114c4bdd37831e | 32,708 | py | Python | os_ken/lib/packet/icmpv6.py | rolaya/os-ken | 10009e41539c737c7c423f13e4f5bc5f46d219ff | [
"Apache-2.0"
] | 1 | 2019-04-24T04:01:07.000Z | 2019-04-24T04:01:07.000Z | os_ken/lib/packet/icmpv6.py | anlaneg/os-ken | 379a7694c3129cc0156343af71f4fca8830d9de5 | [
"Apache-2.0"
] | null | null | null | os_ken/lib/packet/icmpv6.py | anlaneg/os-ken | 379a7694c3129cc0156343af71f4fca8830d9de5 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import struct
import six
import sys
import array
import binascii
from . import packet_base
from . import packet_utils
from os_ken.lib import addrconv
from os_ken.lib import stringify
ICMPV6_DST_UNREACH = 1 # dest unreachable, codes:
ICMPV6_PACKET_TOO_BIG = 2 # packet too big
ICMPV6_TIME_EXCEEDED = 3 # time exceeded, code:
ICMPV6_PARAM_PROB = 4 # ip6 header bad
ICMPV6_ECHO_REQUEST = 128 # echo service
ICMPV6_ECHO_REPLY = 129 # echo reply
MLD_LISTENER_QUERY = 130 # multicast listener query
MLD_LISTENER_REPOR = 131 # multicast listener report
MLD_LISTENER_DONE = 132 # multicast listener done
MLDV2_LISTENER_REPORT = 143 # multicast listern report (v2)
# RFC2292 decls
ICMPV6_MEMBERSHIP_QUERY = 130 # group membership query
ICMPV6_MEMBERSHIP_REPORT = 131 # group membership report
ICMPV6_MEMBERSHIP_REDUCTION = 132 # group membership termination
ND_ROUTER_SOLICIT = 133 # router solicitation
ND_ROUTER_ADVERT = 134 # router advertisment
ND_NEIGHBOR_SOLICIT = 135 # neighbor solicitation
ND_NEIGHBOR_ADVERT = 136 # neighbor advertisment
ND_REDIREC = 137 # redirect
ICMPV6_ROUTER_RENUMBERING = 138 # router renumbering
ICMPV6_WRUREQUEST = 139 # who are you request
ICMPV6_WRUREPLY = 140 # who are you reply
ICMPV6_FQDN_QUERY = 139 # FQDN query
ICMPV6_FQDN_REPLY = 140 # FQDN reply
ICMPV6_NI_QUERY = 139 # node information request
ICMPV6_NI_REPLY = 140 # node information reply
ICMPV6_MAXTYPE = 201
# ND_OPTIONS from RFC 4861
ND_OPTION_SLA = 1 # Source Link-Layer Address
ND_OPTION_TLA = 2 # Target Link-Layer Address
ND_OPTION_PI = 3 # Prefix Information
ND_OPTION_RH = 4 # Redirected Header
ND_OPTION_MTU = 5 # MTU
MODE_IS_INCLUDE = 1
MODE_IS_EXCLUDE = 2
CHANGE_TO_INCLUDE_MODE = 3
CHANGE_TO_EXCLUDE_MODE = 4
ALLOW_NEW_SOURCES = 5
BLOCK_OLD_SOURCES = 6
class icmpv6(packet_base.PacketBase):
r"""ICMPv6 (RFC 2463) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
type\_ Type
code Code
csum CheckSum
(0 means automatically-calculate when encoding)
data Payload.
os_ken.lib.packet.icmpv6.echo object, \
os_ken.lib.packet.icmpv6.nd_neighbor object, \
os_ken.lib.packet.icmpv6.nd_router_solicit object, \
os_ken.lib.packet.icmpv6.nd_router_advert object, \
os_ken.lib.packet.icmpv6.mld object, \
or a bytearray.
============== ====================
"""
_PACK_STR = '!BBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ICMPV6_TYPES = {}
@staticmethod
def register_icmpv6_type(*args):
def _register_icmpv6_type(cls):
for type_ in args:
icmpv6._ICMPV6_TYPES[type_] = cls
return cls
return _register_icmpv6_type
def __init__(self, type_=0, code=0, csum=0, data=b''):
super(icmpv6, self).__init__()
self.type_ = type_
self.code = code
self.csum = csum
self.data = data
@classmethod
def parser(cls, buf):
(type_, code, csum) = struct.unpack_from(cls._PACK_STR, buf)
msg = cls(type_, code, csum)
offset = cls._MIN_LEN
if len(buf) > offset:
cls_ = cls._ICMPV6_TYPES.get(type_, None)
if cls_:
msg.data = cls_.parser(buf, offset)
else:
msg.data = buf[offset:]
return msg, None, None
def serialize(self, payload, prev):
hdr = bytearray(struct.pack(icmpv6._PACK_STR, self.type_,
self.code, self.csum))
if self.data:
if self.type_ in icmpv6._ICMPV6_TYPES:
assert isinstance(self.data, _ICMPv6Payload)
hdr += self.data.serialize()
else:
hdr += self.data
if self.csum == 0:
self.csum = packet_utils.checksum_ip(prev, len(hdr), hdr + payload)
struct.pack_into('!H', hdr, 2, self.csum)
return hdr
def __len__(self):
return self._MIN_LEN + len(self.data)
@six.add_metaclass(abc.ABCMeta)
class _ICMPv6Payload(stringify.StringifyMixin):
"""
Base class for the payload of ICMPv6 packet.
"""
@icmpv6.register_icmpv6_type(ND_NEIGHBOR_SOLICIT, ND_NEIGHBOR_ADVERT)
class nd_neighbor(_ICMPv6Payload):
"""ICMPv6 sub encoder/decoder class for Neighbor Solicitation and
Neighbor Advertisement messages. (RFC 4861)
This is used with os_ken.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
res R,S,O Flags for Neighbor Advertisement. \
The 3 MSBs of "Reserved" field for Neighbor Solicitation.
dst Target Address
option a derived object of os_ken.lib.packet.icmpv6.nd_option \
or a bytearray. None if no options.
============== ====================
"""
_PACK_STR = '!I16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ND_OPTION_TYPES = {}
_TYPE = {
'ascii': [
'dst'
]
}
@staticmethod
def register_nd_option_type(*args):
def _register_nd_option_type(cls):
nd_neighbor._ND_OPTION_TYPES[cls.option_type()] = cls
return cls
return _register_nd_option_type(args[0])
def __init__(self, res=0, dst='::', option=None):
self.res = res
self.dst = dst
self.option = option
@classmethod
def parser(cls, buf, offset):
(res, dst) = struct.unpack_from(cls._PACK_STR, buf, offset)
offset += cls._MIN_LEN
option = None
if len(buf) > offset:
(type_, length) = struct.unpack_from('!BB', buf, offset)
if length == 0:
raise struct.error('Invalid length: {len}'.format(len=length))
cls_ = cls._ND_OPTION_TYPES.get(type_)
if cls_ is not None:
option = cls_.parser(buf, offset)
else:
option = buf[offset:]
msg = cls(res >> 29, addrconv.ipv6.bin_to_text(dst), option)
return msg
def serialize(self):
res = self.res << 29
hdr = bytearray(struct.pack(
nd_neighbor._PACK_STR, res,
addrconv.ipv6.text_to_bin(self.dst)))
if self.option is not None:
if isinstance(self.option, nd_option):
hdr.extend(self.option.serialize())
else:
hdr.extend(self.option)
return six.binary_type(hdr)
def __len__(self):
length = self._MIN_LEN
if self.option is not None:
length += len(self.option)
return length
@icmpv6.register_icmpv6_type(ND_ROUTER_SOLICIT)
class nd_router_solicit(_ICMPv6Payload):
"""ICMPv6 sub encoder/decoder class for Router Solicitation messages.
(RFC 4861)
This is used with os_ken.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
res This field is unused. It MUST be initialized to zero.
option a derived object of os_ken.lib.packet.icmpv6.nd_option \
or a bytearray. None if no options.
============== ====================
"""
_PACK_STR = '!I'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ND_OPTION_TYPES = {}
@staticmethod
def register_nd_option_type(*args):
def _register_nd_option_type(cls):
nd_router_solicit._ND_OPTION_TYPES[cls.option_type()] = cls
return cls
return _register_nd_option_type(args[0])
def __init__(self, res=0, option=None):
self.res = res
self.option = option
@classmethod
def parser(cls, buf, offset):
(res, ) = struct.unpack_from(cls._PACK_STR, buf, offset)
offset += cls._MIN_LEN
option = None
if len(buf) > offset:
(type_, length) = struct.unpack_from('!BB', buf, offset)
if length == 0:
raise struct.error('Invalid length: {len}'.format(len=length))
cls_ = cls._ND_OPTION_TYPES.get(type_)
if cls_ is not None:
option = cls_.parser(buf, offset)
else:
option = buf[offset:]
msg = cls(res, option)
return msg
def serialize(self):
hdr = bytearray(struct.pack(
nd_router_solicit._PACK_STR, self.res))
if self.option is not None:
if isinstance(self.option, nd_option):
hdr.extend(self.option.serialize())
else:
hdr.extend(self.option)
return six.binary_type(hdr)
def __len__(self):
length = self._MIN_LEN
if self.option is not None:
length += len(self.option)
return length
@icmpv6.register_icmpv6_type(ND_ROUTER_ADVERT)
class nd_router_advert(_ICMPv6Payload):
"""ICMPv6 sub encoder/decoder class for Router Advertisement messages.
(RFC 4861)
This is used with os_ken.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
ch_l Cur Hop Limit.
res M,O Flags for Router Advertisement.
rou_l Router Lifetime.
rea_t Reachable Time.
ret_t Retrans Timer.
options List of a derived object of \
os_ken.lib.packet.icmpv6.nd_option or a bytearray. \
None if no options.
============== ====================
"""
_PACK_STR = '!BBHII'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ND_OPTION_TYPES = {}
@staticmethod
def register_nd_option_type(*args):
def _register_nd_option_type(cls):
nd_router_advert._ND_OPTION_TYPES[cls.option_type()] = cls
return cls
return _register_nd_option_type(args[0])
def __init__(self, ch_l=0, res=0, rou_l=0, rea_t=0, ret_t=0, options=None):
self.ch_l = ch_l
self.res = res
self.rou_l = rou_l
self.rea_t = rea_t
self.ret_t = ret_t
options = options or []
assert isinstance(options, list)
self.options = options
@classmethod
def parser(cls, buf, offset):
(ch_l, res, rou_l, rea_t, ret_t
) = struct.unpack_from(cls._PACK_STR, buf, offset)
offset += cls._MIN_LEN
options = []
while len(buf) > offset:
(type_, length) = struct.unpack_from('!BB', buf, offset)
if length == 0:
raise struct.error('Invalid length: {len}'.format(len=length))
cls_ = cls._ND_OPTION_TYPES.get(type_)
if cls_ is not None:
option = cls_.parser(buf, offset)
else:
option = buf[offset:offset + (length * 8)]
options.append(option)
offset += len(option)
msg = cls(ch_l, res >> 6, rou_l, rea_t, ret_t, options)
return msg
def serialize(self):
res = self.res << 6
hdr = bytearray(struct.pack(
nd_router_advert._PACK_STR, self.ch_l, res, self.rou_l,
self.rea_t, self.ret_t))
for option in self.options:
if isinstance(option, nd_option):
hdr.extend(option.serialize())
else:
hdr.extend(option)
return six.binary_type(hdr)
def __len__(self):
length = self._MIN_LEN
for option in self.options:
length += len(option)
return length
@six.add_metaclass(abc.ABCMeta)
class nd_option(stringify.StringifyMixin):
@classmethod
@abc.abstractmethod
def option_type(cls):
pass
@abc.abstractmethod
def __init__(self, _type, length):
self._type = _type
self.length = length
@classmethod
@abc.abstractmethod
def parser(cls, buf):
pass
@abc.abstractmethod
def serialize(self):
pass
def __len__(self):
return self._MIN_LEN
class nd_option_la(nd_option):
_PACK_STR = '!BB6s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'hw_src'
]
}
@abc.abstractmethod
def __init__(self, length, hw_src, data):
super(nd_option_la, self).__init__(self.option_type(), length)
self.hw_src = hw_src
self.data = data
@classmethod
def parser(cls, buf, offset):
(_, length, hw_src) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(length, addrconv.mac.bin_to_text(hw_src))
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
buf = bytearray(struct.pack(
self._PACK_STR, self.option_type(), self.length,
addrconv.mac.text_to_bin(self.hw_src)))
if self.data is not None:
buf.extend(self.data)
mod = len(buf) % 8
if mod:
buf.extend(bytearray(8 - mod))
if 0 == self.length:
self.length = len(buf) // 8
struct.pack_into('!B', buf, 1, self.length)
return six.binary_type(buf)
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
@nd_neighbor.register_nd_option_type
@nd_router_solicit.register_nd_option_type
@nd_router_advert.register_nd_option_type
class nd_option_sla(nd_option_la):
"""ICMPv6 sub encoder/decoder class for Neighbor discovery
Source Link-Layer Address Option. (RFC 4861)
This is used with os_ken.lib.packet.icmpv6.nd_neighbor,
os_ken.lib.packet.icmpv6.nd_router_solicit or
os_ken.lib.packet.icmpv6.nd_router_advert.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
length length of the option. \
(0 means automatically-calculate when encoding)
hw_src Link-Layer Address. \
NOTE: If the address is longer than 6 octets this contains \
the first 6 octets in the address. \
This implementation assumes the address has at least \
6 octets.
data A bytearray which contains the rest of Link-Layer Address \
and padding. When encoding a packet, it's user's \
responsibility to provide necessary padding for 8-octets \
alignment required by the protocol.
============== ====================
"""
@classmethod
def option_type(cls):
return ND_OPTION_SLA
def __init__(self, length=0, hw_src='00:00:00:00:00:00', data=None):
super(nd_option_sla, self).__init__(length, hw_src, data)
@nd_neighbor.register_nd_option_type
class nd_option_tla(nd_option_la):
"""ICMPv6 sub encoder/decoder class for Neighbor discovery
Target Link-Layer Address Option. (RFC 4861)
This is used with os_ken.lib.packet.icmpv6.nd_neighbor.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
length length of the option. \
(0 means automatically-calculate when encoding)
hw_src Link-Layer Address. \
NOTE: If the address is longer than 6 octets this contains \
the first 6 octets in the address. \
This implementation assumes the address has at least \
6 octets.
data A bytearray which contains the rest of Link-Layer Address \
and padding. When encoding a packet, it's user's \
responsibility to provide necessary padding for 8-octets \
alignment required by the protocol.
============== ====================
"""
@classmethod
def option_type(cls):
return ND_OPTION_TLA
def __init__(self, length=0, hw_src='00:00:00:00:00:00', data=None):
super(nd_option_tla, self).__init__(length, hw_src, data)
@nd_router_advert.register_nd_option_type
class nd_option_pi(nd_option):
r"""ICMPv6 sub encoder/decoder class for Neighbor discovery
Prefix Information Option. (RFC 4861)
This is used with os_ken.lib.packet.icmpv6.nd_router_advert.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
length length of the option. \
(0 means automatically-calculate when encoding)
pl Prefix Length.
res1 L,A,R\* Flags for Prefix Information.
val_l Valid Lifetime.
pre_l Preferred Lifetime.
res2 This field is unused. It MUST be initialized to zero.
prefix An IP address or a prefix of an IP address.
============== ====================
\*R flag is defined in (RFC 3775)
"""
_PACK_STR = '!BBBBIII16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'prefix'
]
}
@classmethod
def option_type(cls):
return ND_OPTION_PI
def __init__(self, length=0, pl=0, res1=0, val_l=0, pre_l=0, res2=0,
prefix='::'):
super(nd_option_pi, self).__init__(self.option_type(), length)
self.pl = pl
self.res1 = res1
self.val_l = val_l
self.pre_l = pre_l
self.res2 = res2
self.prefix = prefix
@classmethod
def parser(cls, buf, offset):
(_, length, pl, res1, val_l, pre_l, res2, prefix
) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(length, pl, res1 >> 5, val_l, pre_l, res2,
addrconv.ipv6.bin_to_text(prefix))
return msg
def serialize(self):
res1 = self.res1 << 5
hdr = bytearray(struct.pack(
self._PACK_STR, self.option_type(), self.length, self.pl,
res1, self.val_l, self.pre_l, self.res2,
addrconv.ipv6.text_to_bin(self.prefix)))
if 0 == self.length:
self.length = len(hdr) // 8
struct.pack_into('!B', hdr, 1, self.length)
return six.binary_type(hdr)
@icmpv6.register_icmpv6_type(ICMPV6_ECHO_REPLY, ICMPV6_ECHO_REQUEST)
class echo(_ICMPv6Payload):
"""ICMPv6 sub encoder/decoder class for Echo Request and Echo Reply
messages.
This is used with os_ken.lib.packet.icmpv6.icmpv6 for
ICMPv6 Echo Request and Echo Reply messages.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== ====================
Attribute Description
============== ====================
id Identifier
seq Sequence Number
data Data
============== ====================
"""
_PACK_STR = '!HH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, id_=0, seq=0, data=None):
self.id = id_
self.seq = seq
self.data = data
@classmethod
def parser(cls, buf, offset):
(id_, seq) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(id_, seq)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(echo._PACK_STR, self.id,
self.seq))
if self.data is not None:
hdr += bytearray(self.data)
return hdr
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
@icmpv6.register_icmpv6_type(
MLD_LISTENER_QUERY, MLD_LISTENER_REPOR, MLD_LISTENER_DONE)
class mld(_ICMPv6Payload):
"""ICMPv6 sub encoder/decoder class for MLD Lister Query,
MLD Listener Report, and MLD Listener Done messages. (RFC 2710)
http://www.ietf.org/rfc/rfc2710.txt
This is used with os_ken.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== =========================================
Attribute Description
============== =========================================
maxresp max response time in millisecond. it is
meaningful only in Query Message.
address a group address value.
============== =========================================
"""
_PACK_STR = '!H2x16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
]
}
def __init__(self, maxresp=0, address='::'):
self.maxresp = maxresp
self.address = address
@classmethod
def parser(cls, buf, offset):
if cls._MIN_LEN < len(buf[offset:]):
msg = mldv2_query.parser(buf[offset:])
else:
(maxresp, address) = struct.unpack_from(
cls._PACK_STR, buf, offset)
msg = cls(maxresp, addrconv.ipv6.bin_to_text(address))
return msg
def serialize(self):
buf = struct.pack(mld._PACK_STR, self.maxresp,
addrconv.ipv6.text_to_bin(self.address))
return buf
def __len__(self):
return self._MIN_LEN
class mldv2_query(mld):
"""
ICMPv6 sub encoder/decoder class for MLD v2 Lister Query messages.
(RFC 3810)
http://www.ietf.org/rfc/rfc3810.txt
This is used with os_ken.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== =========================================
Attribute Description
============== =========================================
maxresp max response time in millisecond. it is
meaningful only in Query Message.
address a group address value.
s_flg when set to 1, routers suppress the timer
process.
qrv robustness variable for a querier.
qqic an interval time for a querier in unit of
seconds.
num a number of the multicast servers.
srcs a list of IPv6 addresses of the multicast
servers.
============== =========================================
"""
_PACK_STR = '!H2x16sBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
],
'asciilist': [
'srcs'
]
}
def __init__(self, maxresp=0, address='::', s_flg=0, qrv=2,
qqic=0, num=0, srcs=None):
super(mldv2_query, self).__init__(maxresp, address)
self.s_flg = s_flg
self.qrv = qrv
self.qqic = qqic
self.num = num
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
@classmethod
def parser(cls, buf):
(maxresp, address, s_qrv, qqic, num
) = struct.unpack_from(cls._PACK_STR, buf)
s_flg = (s_qrv >> 3) & 0b1
qrv = s_qrv & 0b111
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 16 <= len(buf[offset:])
(src, ) = struct.unpack_from('16s', buf, offset)
srcs.append(addrconv.ipv6.bin_to_text(src))
offset += 16
assert num == len(srcs)
return cls(maxresp, addrconv.ipv6.bin_to_text(address), s_flg,
qrv, qqic, num, srcs)
def serialize(self):
s_qrv = self.s_flg << 3 | self.qrv
buf = bytearray(struct.pack(self._PACK_STR, self.maxresp,
addrconv.ipv6.text_to_bin(self.address), s_qrv,
self.qqic, self.num))
for src in self.srcs:
buf.extend(struct.pack('16s', addrconv.ipv6.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 22, self.num)
return six.binary_type(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 16
@icmpv6.register_icmpv6_type(MLDV2_LISTENER_REPORT)
class mldv2_report(mld):
"""
ICMPv6 sub encoder/decoder class for MLD v2 Lister Report messages.
(RFC 3810)
http://www.ietf.org/rfc/rfc3810.txt
This is used with os_ken.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== =========================================
Attribute Description
============== =========================================
record_num a number of the group records.
records a list of os_ken.lib.packet.icmpv6.mldv2_report_group.
None if no records.
============== =========================================
"""
_PACK_STR = '!2xH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_class_prefixes = ['mldv2_report_group']
def __init__(self, record_num=0, records=None):
self.record_num = record_num
records = records or []
assert isinstance(records, list)
for record in records:
assert isinstance(record, mldv2_report_group)
self.records = records
@classmethod
def parser(cls, buf, offset):
(record_num, ) = struct.unpack_from(cls._PACK_STR, buf, offset)
offset += cls._MIN_LEN
records = []
while 0 < len(buf[offset:]) and record_num > len(records):
record = mldv2_report_group.parser(buf[offset:])
records.append(record)
offset += len(record)
assert record_num == len(records)
return cls(record_num, records)
def serialize(self):
buf = bytearray(struct.pack(self._PACK_STR, self.record_num))
for record in self.records:
buf.extend(record.serialize())
if 0 == self.record_num:
self.record_num = len(self.records)
struct.pack_into('!H', buf, 2, self.record_num)
return six.binary_type(buf)
def __len__(self):
records_len = 0
for record in self.records:
records_len += len(record)
return self._MIN_LEN + records_len
class mldv2_report_group(stringify.StringifyMixin):
r"""
ICMPv6 sub encoder/decoder class for MLD v2 Lister Report Group
Record messages. (RFC 3810)
This is used with os_ken.lib.packet.icmpv6.mldv2_report.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
=============== ====================================================
Attribute Description
=============== ====================================================
type\_ a group record type for v3.
aux_len the length of the auxiliary data in 32-bit words.
num a number of the multicast servers.
address a group address value.
srcs a list of IPv6 addresses of the multicast servers.
aux the auxiliary data.
=============== ====================================================
"""
_PACK_STR = '!BBH16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
],
'asciilist': [
'srcs'
]
}
def __init__(self, type_=0, aux_len=0, num=0, address='::',
srcs=None, aux=None):
self.type_ = type_
self.aux_len = aux_len
self.num = num
self.address = address
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
self.aux = aux
@classmethod
def parser(cls, buf):
(type_, aux_len, num, address
) = struct.unpack_from(cls._PACK_STR, buf)
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 16 <= len(buf[offset:])
(src, ) = struct.unpack_from('16s', buf, offset)
srcs.append(addrconv.ipv6.bin_to_text(src))
offset += 16
assert num == len(srcs)
aux = None
if aux_len:
(aux, ) = struct.unpack_from('%ds' % (aux_len * 4), buf, offset)
msg = cls(type_, aux_len, num, addrconv.ipv6.bin_to_text(address),
srcs, aux)
return msg
def serialize(self):
buf = bytearray(struct.pack(self._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv6.text_to_bin(self.address)))
for src in self.srcs:
buf.extend(struct.pack('16s', addrconv.ipv6.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 2, self.num)
if self.aux is not None:
mod = len(self.aux) % 4
if mod:
self.aux += bytearray(4 - mod)
self.aux = six.binary_type(self.aux)
buf.extend(self.aux)
if 0 == self.aux_len:
self.aux_len = len(self.aux) // 4
struct.pack_into('!B', buf, 1, self.aux_len)
return six.binary_type(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 16 + self.aux_len * 4
icmpv6.set_classes(icmpv6._ICMPV6_TYPES)
nd_neighbor.set_classes(nd_neighbor._ND_OPTION_TYPES)
nd_router_solicit.set_classes(nd_router_solicit._ND_OPTION_TYPES)
nd_router_advert.set_classes(nd_router_advert._ND_OPTION_TYPES)
| 33.37551 | 83 | 0.578054 |
3926809d4781ea2e15e890d37af86da93bd799ee | 1,667 | py | Python | setup.py | manuhortet/prom2teams | cde06758af1b5574182beff01a991697a151c264 | [
"Apache-2.0"
] | null | null | null | setup.py | manuhortet/prom2teams | cde06758af1b5574182beff01a991697a151c264 | [
"Apache-2.0"
] | null | null | null | setup.py | manuhortet/prom2teams | cde06758af1b5574182beff01a991697a151c264 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
with open('requirements.txt') as req:
requirements = req.read().splitlines()
with open('README.md') as f:
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError) as error:
readme = open('README.md').read()
with open('LICENSE') as f:
license = f.read()
setup(name='prom2teams',
version='2.2.0',
description='Project that redirects Prometheus Alert Manager '
'notifications to Microsoft Teams',
long_description=readme,
install_requires=requirements,
setup_requires=[
'flake8',
'pypandoc'
],
scripts=[
'bin/prom2teams',
'bin/prom2teams_uwsgi'
],
package_data={
'': ['*.ini', '*.j2'],
},
include_package_data=True,
data_files=[
('/usr/local/etc/prom2teams', ['bin/wsgi.py'])
],
url='http://github.com/idealista/prom2teams',
author='Idealista, S.A.U',
author_email='labs@idealista.com',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
keywords='microsoft teams prometheus alert',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Utilities',
'Topic :: Communications :: Chat',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
zip_safe=False)
| 28.254237 | 68 | 0.592082 |
e74560db67e019622b29707819590fdab77909f8 | 724 | py | Python | Chapter10/hillar_django_restful_10_02/restful01/drones/v2/views.py | weiliy/Django-RESTful-Web-Services | 0b26a84ec8005c6a2cef61671b6d009f0780a9cc | [
"MIT"
] | 95 | 2018-01-22T21:35:21.000Z | 2022-03-30T10:13:35.000Z | Chapter10/hillar_django_restful_10_02/restful01/drones/v2/views.py | weiliy/Django-RESTful-Web-Services | 0b26a84ec8005c6a2cef61671b6d009f0780a9cc | [
"MIT"
] | 6 | 2020-03-24T16:37:46.000Z | 2021-06-10T21:04:36.000Z | Chapter10/hillar_django_restful_10_02/restful01/drones/v2/views.py | weiliy/Django-RESTful-Web-Services | 0b26a84ec8005c6a2cef61671b6d009f0780a9cc | [
"MIT"
] | 73 | 2018-01-24T02:38:17.000Z | 2022-01-23T21:02:41.000Z | """
Book: Django RESTful Web Services
Author: Gaston C. Hillar - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.reverse import reverse
from drones import views
class ApiRootVersion2(generics.GenericAPIView):
name = 'api-root'
def get(self, request, *args, **kwargs):
return Response({
'vehicle-categories': reverse(views.DroneCategoryList.name,
request=request),
'vehicles': reverse(views.DroneList.name, request=request),
'pilots': reverse(views.PilotList.name, request=request),
'competitions': reverse(views.CompetitionList.name,
request=request)
})
| 31.478261 | 62 | 0.762431 |
a49a852899938510ac9b67ee3a445c00d24d80b3 | 6,668 | py | Python | simple.py | acamara1498/algs | 683ea919607cc5e4a22d2d5f7095fb643000a6a5 | [
"Apache-2.0"
] | null | null | null | simple.py | acamara1498/algs | 683ea919607cc5e4a22d2d5f7095fb643000a6a5 | [
"Apache-2.0"
] | null | null | null | simple.py | acamara1498/algs | 683ea919607cc5e4a22d2d5f7095fb643000a6a5 | [
"Apache-2.0"
] | null | null | null | import alpaca_trade_api as tradeapi
import logging
import pandas as pd
import time
import universe
api = tradeapi.REST()
Universe = universe.Universe
NY = 'America/New_York'
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def main():
done = None
logging.info('start running')
while True:
# clock API returns the server time including
# the boolean flag for market open
clock = api.get_clock()
now = clock.timestamp
if clock.is_open and done != now.strftime('%Y-%m-%d'):
# ** do our stuff here! **
price_df = prices(Universe)
print ("price_df", price_df)
orders = get_orders(api, price_df)
print ("orders", orders)
trade(orders)
# flag it as done so it doesn't work again for the day
# TODO: this isn't tolerant to process restarts, so this
# flag should probably be saved on disk
done = now.strftime('%Y-%m-%d')
logger.info(f'Done for {done}')
time.sleep(1)
def prices(symbols):
now = pd.Timestamp.now(tz=NY)
end_dt = now
if now.time() >= pd.Timestamp('09:30', tz=NY).time():
end_dt = now - \
pd.Timedelta(now.strftime('%H:%M:%S')) - pd.Timedelta('1 minute')
return _get_prices(symbols, end_dt)
def _get_prices(symbols, end_dt, max_workers=5):
'''Get the map of DataFrame price data from Alpaca's data API.'''
start_dt = end_dt - pd.Timedelta('50 days')
start = start_dt.strftime('%Y-%-m-%-d')
end = end_dt.strftime('%Y-%-m-%-d')
def get_barset(symbols):
return api.get_barset(
symbols,
'day',
limit = 50,
start=start,
end=end
)
# The maximum number of symbols we can request at once is 200.
barset = None
idx = 0
while idx <= len(symbols) - 1:
if barset is None:
barset = get_barset(symbols[idx:idx+200])
else:
barset.update(get_barset(symbols[idx:idx+200]))
idx += 200
return barset.df
def calc_scores(price_df, dayindex=-1):
'''Calculate scores based on the indicator and
return the sorted result.
'''
diffs = {}
param = 10
for symbol in price_df.columns.levels[0]:
df = price_df[symbol]
if len(df.close.values) <= param:
continue
ema = df.close.ewm(span=param).mean()[dayindex]
last = df.close.values[dayindex]
diff = (last - ema) / last
diffs[symbol] = diff
return sorted(diffs.items(), key=lambda x: x[1])
def get_orders(api, price_df, position_size=100, max_positions=5):
'''Calculate the scores within the universe to build the optimal
portfolio as of today, and extract orders to transition from our
current portfolio to the desired state.
'''
# rank the stocks based on the indicators.
ranked = calc_scores(price_df)
to_buy = set()
to_sell = set()
account = api.get_account()
# take the top one twentieth out of ranking,
# excluding stocks too expensive to buy a share
for symbol, _ in ranked[:len(ranked) // 20]:
price = float(price_df[symbol].close.values[-1])
if price > float(account.cash):
continue
to_buy.add(symbol)
# now get the current positions and see what to buy,
# what to sell to transition to today's desired portfolio.
positions = api.list_positions()
logger.info(positions)
holdings = {p.symbol: p for p in positions}
holding_symbol = set(holdings.keys())
to_sell = holding_symbol - to_buy
to_buy = to_buy - holding_symbol
orders = []
# if a stock is in the portfolio, and not in the desired
# portfolio, sell it
for symbol in to_sell:
shares = holdings[symbol].qty
orders.append({
'symbol': symbol,
'qty': shares,
'side': 'sell',
})
logger.info(f'order(sell): {symbol} for {shares}')
# likewise, if the portfoio is missing stocks from the
# desired portfolio, buy them. We sent a limit for the total
# position size so that we don't end up holding too many positions.
max_to_buy = max_positions - (len(positions) - len(to_sell))
for symbol in to_buy:
if max_to_buy <= 0:
break
shares = position_size // float(price_df[symbol].close.values[-1])
if shares == 0.0:
continue
orders.append({
'symbol': symbol,
'qty': shares,
'side': 'buy',
})
logger.info(f'order(buy): {symbol} for {shares}')
max_to_buy -= 1
return orders
def trade(orders, wait=30):
'''This is where we actually submit the orders and wait for them to fill.
Waiting is an important step since the orders aren't filled automatically,
which means if your buys happen to come before your sells have filled,
the buy orders will be bounced. In order to make the transition smooth,
we sell first and wait for all the sell orders to fill before submitting
our buy orders.
'''
# process the sell orders first
sells = [o for o in orders if o['side'] == 'sell']
for order in sells:
try:
logger.info(f'submit(sell): {order}')
api.submit_order(
symbol=order['symbol'],
qty=order['qty'],
side='sell',
type='market',
time_in_force='day',
)
except Exception as e:
logger.error(e)
count = wait
while count > 0:
pending = api.list_orders()
if len(pending) == 0:
logger.info(f'all sell orders done')
break
logger.info(f'{len(pending)} sell orders pending...')
time.sleep(1)
count -= 1
# process the buy orders next
buys = [o for o in orders if o['side'] == 'buy']
for order in buys:
try:
logger.info(f'submit(buy): {order}')
api.submit_order(
symbol=order['symbol'],
qty=order['qty'],
side='buy',
type='market',
time_in_force='day',
)
except Exception as e:
print("no")
logger.error(e)
count = wait
while count > 0:
pending = api.list_orders()
if len(pending) == 0:
logger.info(f'all buy orders done')
break
logger.info(f'{len(pending)} buy orders pending...')
time.sleep(1)
count -= 1
if __name__ == '__main__':
main()
| 31.158879 | 78 | 0.579784 |
aed661f8b6c823ad6cda792a43435af31b428d1b | 1,433 | py | Python | setup.py | idlesign/django-logexpose | 5f9839d3211bc5bf39ada11a928b95a2efd2525d | [
"BSD-3-Clause"
] | 1 | 2016-08-28T14:51:12.000Z | 2016-08-28T14:51:12.000Z | setup.py | idlesign/django-logexpose | 5f9839d3211bc5bf39ada11a928b95a2efd2525d | [
"BSD-3-Clause"
] | 1 | 2019-07-08T00:18:02.000Z | 2019-07-08T08:10:49.000Z | setup.py | idlesign/django-logexpose | 5f9839d3211bc5bf39ada11a928b95a2efd2525d | [
"BSD-3-Clause"
] | 2 | 2016-05-25T08:14:26.000Z | 2019-07-08T00:12:19.000Z | import os
from setuptools import setup
from logexpose import VERSION
PATH_BASE = os.path.dirname(__file__)
PATH_BIN = os.path.join(PATH_BASE, 'bin')
SCRIPTS = None
if os.path.exists(PATH_BIN):
SCRIPTS = [os.path.join('bin', f) for f in os.listdir(PATH_BIN) if os.path.join(PATH_BIN, f)]
f = open(os.path.join(PATH_BASE, 'README.rst'))
README = f.read()
f.close()
setup(
name='django-logexpose',
version='.'.join(map(str, VERSION)),
url='https://github.com/idlesign/django-logexpose',
description='Reusable application for Django exposing logs for further analysis.',
long_description=README,
license='BSD 3-Clause License',
author='Igor `idle sign` Starikov',
author_email='idlesign@yandex.ru',
packages=['logexpose'],
include_package_data=True,
zip_safe=False,
install_requires=[],
scripts=SCRIPTS,
classifiers=[
# As in https://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: BSD License'
],
)
| 28.098039 | 97 | 0.648988 |
bd97927c3f3e0280f0e97c61411770abed362e7c | 3,194 | py | Python | deployment/pypi/setup.py | xumeng723/nni | f47ce0d0adc0f4cd5e3dd2e0f382f646cac03d0e | [
"MIT"
] | null | null | null | deployment/pypi/setup.py | xumeng723/nni | f47ce0d0adc0f4cd5e3dd2e0f382f646cac03d0e | [
"MIT"
] | null | null | null | deployment/pypi/setup.py | xumeng723/nni | f47ce0d0adc0f4cd5e3dd2e0f382f646cac03d0e | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import setuptools
import platform
from os import walk, path
os_type = platform.system()
if os_type == 'Linux':
os_name = 'POSIX :: Linux'
elif os_type == 'Darwin':
os_name = 'MacOS'
else:
raise NotImplementedError('current platform {} not supported'.format(os_type))
data_files = [('bin', ['node-{}-x64/bin/node'.format(os_type.lower())])]
for (dirpath, dirnames, filenames) in walk('./nni'):
files = [path.normpath(path.join(dirpath, filename)) for filename in filenames]
data_files.append((path.normpath(dirpath), files))
with open('../../README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name = 'nni',
version = '999.0.0-developing',
author = 'Microsoft NNI team',
author_email = 'nni@microsoft.com',
description = 'Neural Network Intelligence package',
long_description = long_description,
long_description_content_type = 'text/markdown',
license = 'MIT',
url = 'https://github.com/Microsoft/nni',
packages = setuptools.find_packages('../../tools') + setuptools.find_packages('../../src/sdk/pynni', exclude=['tests']),
package_dir = {
'nni_annotation': '../../tools/nni_annotation',
'nni_cmd': '../../tools/nni_cmd',
'nni_trial_tool': '../../tools/nni_trial_tool',
'nni_gpu_tool': '../../tools/nni_gpu_tool',
'nni': '../../src/sdk/pynni/nni'
},
python_requires = '>=3.5',
install_requires = [
'schema',
'pyyaml',
'psutil',
'requests',
'astor',
'pyhdfs',
'hyperopt',
'json_tricks',
'numpy',
'scipy',
'coverage'
],
classifiers = [
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: ' + os_name
],
data_files = data_files,
entry_points = {
'console_scripts' : [
'nnictl = nni_cmd.nnictl:parse_args'
]
}
) | 38.02381 | 124 | 0.641828 |
8c8360a1e775fa05470933c6e700234ab8ccc198 | 12,707 | py | Python | tests/elm_car_simulator.py | EdwardApollo/panda | fcec81cbaf58494bf66eef2067efcf1a6d4e4b7f | [
"MIT"
] | 1,279 | 2017-04-07T02:11:39.000Z | 2022-03-28T05:01:30.000Z | tests/elm_car_simulator.py | EdwardApollo/panda | fcec81cbaf58494bf66eef2067efcf1a6d4e4b7f | [
"MIT"
] | 473 | 2017-05-03T06:54:54.000Z | 2022-03-31T07:09:12.000Z | tests/elm_car_simulator.py | EdwardApollo/panda | fcec81cbaf58494bf66eef2067efcf1a6d4e4b7f | [
"MIT"
] | 610 | 2017-04-07T05:17:33.000Z | 2022-03-26T14:58:32.000Z | #!/usr/bin/env python3
# flake8: noqa
"""Used to Reverse/Test ELM protocol auto detect and OBD message response without a car."""
import sys
import os
import struct
import binascii
import time
import threading
from collections import deque
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from panda import Panda # noqa: E402
def lin_checksum(dat):
return sum(dat) % 0x100
class ELMCarSimulator():
def __init__(self, sn, silent=False, can_kbaud=500,
can=True, can11b=True, can29b=True,
lin=True):
self.__p = Panda(sn if sn else Panda.list()[0])
self.__on = True
self.__stop = False
self.__silent = silent
self.__lin_timer = None
self.__lin_active = False
self.__lin_enable = lin
self.__lin_monitor_thread = threading.Thread(target=self.__lin_monitor)
self.__can_multipart_data = None
self.__can_kbaud = can_kbaud
self.__can_extra_noise_msgs = deque()
self.__can_enable = can
self.__can11b = can11b
self.__can29b = can29b
self.__can_monitor_thread = threading.Thread(target=self.__can_monitor)
@property
def panda(self):
return self.__p
def stop(self):
if self.__lin_timer:
self.__lin_timer.cancel()
self.__lin_timeout_handler()
self.__stop = True
def join(self):
if self.__lin_monitor_thread.is_alive():
self.__lin_monitor_thread.join()
if self.__can_monitor_thread.is_alive():
self.__can_monitor_thread.join()
if self.__p:
print("closing handle")
self.__p.close()
def set_enable(self, on):
self.__on = on
def start(self):
self.panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
if self.__lin_enable:
self.__lin_monitor_thread.start()
if self.__can_enable:
self.__can_monitor_thread.start()
#########################
# LIN related functions #
#########################
def __lin_monitor(self):
print("STARTING LIN THREAD")
self.panda.set_uart_baud(2, 10400)
self.panda.kline_drain() # Toss whatever was already there
lin_buff = bytearray()
while not self.__stop:
lin_msg = self.panda.serial_read(2)
if not lin_msg:
continue
lin_buff += lin_msg
#print(" ** Buff", lin_buff)
if lin_buff.endswith(b'\x00\xc1\x33\xf1\x81\x66'): # Leading 0 is wakeup
lin_buff = bytearray()
self.__lin_active = True
print("GOT LIN (KWP FAST) WAKEUP SIGNAL")
self._lin_send(0x10, b'\xC1\x8F\xE9')
self.__reset_lin_timeout()
continue
if self.__lin_active:
msglen = lin_buff[0] & 0x7
if lin_buff[0] & 0xF8 not in (0x80, 0xC0):
print("Invalid bytes at start of message")
print(" BUFF", lin_buff)
continue
if len(lin_buff) < msglen + 4: continue
if lin_checksum(lin_buff[:-1]) != lin_buff[-1]: continue
self.__lin_process_msg(lin_buff[0] & 0xF8, # Priority
lin_buff[1], lin_buff[2], lin_buff[3:-1])
lin_buff = bytearray()
def _lin_send(self, to_addr, msg):
if not self.__silent:
print(" LIN Reply (%x)" % to_addr, binascii.hexlify(msg))
PHYS_ADDR = 0x80
#FUNC_ADDR = 0xC0
RECV = 0xF1
#SEND = 0x33 # Car OBD Functional Address
headers = struct.pack("BBB", PHYS_ADDR | len(msg), RECV, to_addr)
if not self.__silent:
print(" Sending LIN", binascii.hexlify(headers + msg),
hex(sum(bytearray(headers + msg)) % 0x100))
self.panda.kline_send(headers + msg)
def __reset_lin_timeout(self):
if self.__lin_timer:
self.__lin_timer.cancel()
self.__lin_timer = threading.Timer(5, self.__lin_timeout_handler)
self.__lin_timer.start()
def __lin_timeout_handler(self):
print("LIN TIMEOUT")
self.__lin_timer = None
self.__lin_active = False
@property
def lin_active(self):
return self.__lin_active
def __lin_process_msg(self, priority, toaddr, fromaddr, data):
self.__reset_lin_timeout()
if not self.__silent and data != b'\x3E':
print("LIN MSG", "Addr:", hex(toaddr), "obdLen:", len(data),
binascii.hexlify(data))
outmsg = None
#if data == b'\x3E':
# print("KEEP ALIVE")
#el
if len(data) > 1:
outmsg = self._process_obd(data[0], data[1])
if outmsg:
obd_header = struct.pack("BB", 0x40 | data[0], data[1])
if len(outmsg) <= 5:
self._lin_send(0x10, obd_header + outmsg)
else:
first_msg_len = min(4, len(outmsg) % 4) or 4
self._lin_send(0x10, obd_header + b'\x01' +
b'\x00' * (4 - first_msg_len) +
outmsg[:first_msg_len])
for num, i in enumerate(range(first_msg_len, len(outmsg), 4)):
self._lin_send(0x10, obd_header +
struct.pack('B', (num + 2) % 0x100) + outmsg[i:i + 4])
#########################
# CAN related functions #
#########################
def __can_monitor(self):
print("STARTING CAN THREAD")
self.panda.set_can_speed_kbps(0, self.__can_kbaud)
self.panda.can_recv() # Toss whatever was already there
while not self.__stop:
for address, ts, data, src in self.panda.can_recv():
if self.__on and src == 0 and len(data) == 8 and data[0] >= 2:
if not self.__silent:
print("Processing CAN message", src, hex(address), binascii.hexlify(data))
self.__can_process_msg(data[1], data[2], address, ts, data, src)
elif not self.__silent:
print("Rejecting CAN message", src, hex(address), binascii.hexlify(data))
def can_mode_11b(self):
self.__can11b = True
self.__can29b = False
def can_mode_29b(self):
self.__can11b = False
self.__can29b = True
def can_mode_11b_29b(self):
self.__can11b = True
self.__can29b = True
def change_can_baud(self, kbaud):
self.__can_kbaud = kbaud
self.panda.set_can_speed_kbps(0, self.__can_kbaud)
def can_add_extra_noise(self, noise_msg, addr=None):
self.__can_extra_noise_msgs.append((addr, noise_msg))
def _can_send(self, addr, msg):
if not self.__silent:
print(" CAN Reply (%x)" % addr, binascii.hexlify(msg))
self.panda.can_send(addr, msg + b'\x00' * (8 - len(msg)), 0)
if self.__can_extra_noise_msgs:
noise = self.__can_extra_noise_msgs.popleft()
self.panda.can_send(noise[0] if noise[0] is not None else addr,
noise[1] + b'\x00' * (8 - len(noise[1])), 0)
def _can_addr_matches(self, addr):
if self.__can11b and (addr == 0x7DF or (addr & 0x7F8) == 0x7E0):
return True
if self.__can29b and (addr == 0x18db33f1 or (addr & 0x1FFF00FF) == 0x18da00f1):
return True
return False
def __can_process_msg(self, mode, pid, address, ts, data, src):
if not self.__silent:
print("CAN MSG", binascii.hexlify(data[1:1 + data[0]]),
"Addr:", hex(address), "Mode:", hex(mode)[2:].zfill(2),
"PID:", hex(pid)[2:].zfill(2), "canLen:", len(data),
binascii.hexlify(data))
if self._can_addr_matches(address) and len(data) == 8:
outmsg = None
if data[:3] == b'\x30\x00\x00' and len(self.__can_multipart_data):
if not self.__silent:
print("Request for more data")
outaddr = 0x7E8 if address == 0x7DF or address == 0x7E0 else 0x18DAF110
msgnum = 1
while(self.__can_multipart_data):
datalen = min(7, len(self.__can_multipart_data))
msgpiece = struct.pack("B", 0x20 | msgnum) + self.__can_multipart_data[:datalen]
self._can_send(outaddr, msgpiece)
self.__can_multipart_data = self.__can_multipart_data[7:]
msgnum = (msgnum + 1) % 0x10
time.sleep(0.01)
else:
outmsg = self._process_obd(mode, pid)
if outmsg:
outaddr = 0x7E8 if address == 0x7DF or address == 0x7E0 else 0x18DAF110
if len(outmsg) <= 5:
self._can_send(outaddr,
struct.pack("BBB", len(outmsg) + 2, 0x40 | data[1], pid) + outmsg)
else:
first_msg_len = min(3, len(outmsg) % 7)
payload_len = len(outmsg) + 3
msgpiece = struct.pack("BBBBB", 0x10 | ((payload_len >> 8) & 0xF),
payload_len & 0xFF,
0x40 | data[1], pid, 1) + outmsg[:first_msg_len]
self._can_send(outaddr, msgpiece)
self.__can_multipart_data = outmsg[first_msg_len:]
#########################
# General OBD functions #
#########################
def _process_obd(self, mode, pid):
if mode == 0x01: # Mode: Show current data
if pid == 0x00: # List supported things
return b"\xff\xff\xff\xfe" # b"\xBE\x1F\xB8\x10" #Bitfield, random features
elif pid == 0x01: # Monitor Status since DTC cleared
return b"\x00\x00\x00\x00" # Bitfield, random features
elif pid == 0x04: # Calculated engine load
return b"\x2f"
elif pid == 0x05: # Engine coolant temperature
return b"\x3c"
elif pid == 0x0B: # Intake manifold absolute pressure
return b"\x90"
elif pid == 0x0C: # Engine RPM
return b"\x1A\xF8"
elif pid == 0x0D: # Vehicle Speed
return b"\x53"
elif pid == 0x10: # MAF air flow rate
return b"\x01\xA0"
elif pid == 0x11: # Throttle Position
return b"\x90"
elif pid == 0x33: # Absolute Barometric Pressure
return b"\x90"
elif mode == 0x09: # Mode: Request vehicle information
if pid == 0x02: # Show VIN
return b"1D4GP00R55B123456"
if pid == 0xFC: # test long multi message. Ligned up for LIN responses
return b''.join((struct.pack(">BBH", 0xAA, 0xAA, num + 1) for num in range(80)))
if pid == 0xFD: # test long multi message
parts = (b'\xAA\xAA\xAA' + struct.pack(">I", num) for num in range(80))
return b'\xAA\xAA\xAA' + b''.join(parts)
if pid == 0xFE: # test very long multi message
parts = (b'\xAA\xAA\xAA' + struct.pack(">I", num) for num in range(584))
return b'\xAA\xAA\xAA' + b''.join(parts) + b'\xAA'
if pid == 0xFF:
return b'\xAA\x00\x00' + \
b"".join(((b'\xAA' * 5) + struct.pack(">H", num + 1) for num in range(584)))
#return b"\xAA"*100#(0xFFF-3)
if __name__ == "__main__":
serial = os.getenv("SERIAL") if os.getenv("SERIAL") else None
kbaud = int(os.getenv("CANKBAUD")) if os.getenv("CANKBAUD") else 500 # type: ignore
bitwidth = int(os.getenv("CANBITWIDTH")) if os.getenv("CANBITWIDTH") else 0 # type: ignore
canenable = bool(int(os.getenv("CANENABLE"))) if os.getenv("CANENABLE") else True # type: ignore
linenable = bool(int(os.getenv("LINENABLE"))) if os.getenv("LINENABLE") else True # type: ignore
sim = ELMCarSimulator(serial, can_kbaud=kbaud, can=canenable, lin=linenable)
if(bitwidth == 0):
sim.can_mode_11b_29b()
if(bitwidth == 11):
sim.can_mode_11b()
if(bitwidth == 29):
sim.can_mode_29b()
import signal
def signal_handler(signal, frame):
print('\nShutting down simulator')
sim.stop()
sim.join()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
sim.start()
signal.pause()
| 38.6231 | 101 | 0.544424 |
e3e2d8ea3f47d4df3fe07ca4d01e83d8a36eeef2 | 8,303 | py | Python | test/vanilla/version-tolerant/Expected/AcceptanceTests/MediaTypesVersionTolerant/mediatypesversiontolerant/aio/operations/_operations.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/vanilla/version-tolerant/Expected/AcceptanceTests/MediaTypesVersionTolerant/mediatypesversiontolerant/aio/operations/_operations.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/vanilla/version-tolerant/Expected/AcceptanceTests/MediaTypesVersionTolerant/mediatypesversiontolerant/aio/operations/_operations.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ...operations._operations import (
build_analyze_body_no_accept_header_request,
build_analyze_body_request,
build_content_type_with_encoding_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MediaTypesClientOperationsMixin:
@distributed_trace_async
async def analyze_body(self, input: Optional[Union[IO, Any]] = None, **kwargs: Any) -> str:
"""Analyze body, that could be different media types.
:param input: Input parameter.
:type input: IO or Any
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/pdf", "image/jpeg", "image/png",
"image/tiff", "application/json."
:return: str
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
input = {
"source": "str" # Optional. File source path.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
json = None
content = None
if content_type.split(";")[0] in ["application/pdf", "image/jpeg", "image/png", "image/tiff"]:
content = input
elif content_type.split(";")[0] in ["application/json"]:
if input is not None:
json = input
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(content_type)
)
request = build_analyze_body_request(
content_type=content_type,
json=json,
content=content,
template_url=self.analyze_body.metadata["url"],
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
analyze_body.metadata = {"url": "/mediatypes/analyze"} # type: ignore
@distributed_trace_async
async def analyze_body_no_accept_header(self, input: Optional[Union[IO, Any]] = None, **kwargs: Any) -> None:
"""Analyze body, that could be different media types. Adds to AnalyzeBody by not having an accept
type.
:param input: Input parameter.
:type input: IO or Any
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/pdf", "image/jpeg", "image/png",
"image/tiff", "application/json."
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
input = {
"source": "str" # Optional. File source path.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
json = None
content = None
if content_type.split(";")[0] in ["application/pdf", "image/jpeg", "image/png", "image/tiff"]:
content = input
elif content_type.split(";")[0] in ["application/json"]:
if input is not None:
json = input
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(content_type)
)
request = build_analyze_body_no_accept_header_request(
content_type=content_type,
json=json,
content=content,
template_url=self.analyze_body_no_accept_header.metadata["url"],
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
analyze_body_no_accept_header.metadata = {"url": "/mediatypes/analyzeNoAccept"} # type: ignore
@distributed_trace_async
async def content_type_with_encoding(self, input: Optional[str] = None, **kwargs: Any) -> str:
"""Pass in contentType 'text/plain; encoding=UTF-8' to pass test. Value for input does not matter.
:param input: Input parameter.
:type input: str
:return: str
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "text/plain") # type: Optional[str]
if input is not None:
content = input
else:
content = None
request = build_content_type_with_encoding_request(
content_type=content_type,
content=content,
template_url=self.content_type_with_encoding.metadata["url"],
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
content_type_with_encoding.metadata = {"url": "/mediatypes/contentTypeWithEncoding"} # type: ignore
| 39.727273 | 119 | 0.628809 |
07eb3236e96168107973e4e4db2af6ef79943b55 | 709 | py | Python | geemap/__init__.py | Yisheng-Li/geemap | 0594917a4acedfebb85879cfe2bcb6a406a55f39 | [
"MIT"
] | null | null | null | geemap/__init__.py | Yisheng-Li/geemap | 0594917a4acedfebb85879cfe2bcb6a406a55f39 | [
"MIT"
] | null | null | null | geemap/__init__.py | Yisheng-Li/geemap | 0594917a4acedfebb85879cfe2bcb6a406a55f39 | [
"MIT"
] | null | null | null | """Top-level package for geemap."""
__author__ = """Qiusheng Wu"""
__email__ = "giswqs@gmail.com"
__version__ = "0.11.4"
import os
def in_colab_shell():
"""Tests if the code is being executed within Google Colab."""
import sys
if "google.colab" in sys.modules:
return True
else:
return False
def use_folium():
"""Whether to use the folium or ipyleaflet plotting backend."""
if os.environ.get("USE_FOLIUM") is not None:
return True
else:
return False
if use_folium():
from .foliumap import *
else:
from .geemap import *
if in_colab_shell():
from google.colab import output
output.enable_custom_widget_manager()
| 19.162162 | 67 | 0.64598 |
10be8316d7a849e45d5c434df043d6296f870245 | 877 | py | Python | project_euler/python/094_almost_equilateral_triangles.py | Sabihxh/secret | fb940df9af9c6d440150ffe43b80fcb49ff6c2b4 | [
"MIT"
] | null | null | null | project_euler/python/094_almost_equilateral_triangles.py | Sabihxh/secret | fb940df9af9c6d440150ffe43b80fcb49ff6c2b4 | [
"MIT"
] | null | null | null | project_euler/python/094_almost_equilateral_triangles.py | Sabihxh/secret | fb940df9af9c6d440150ffe43b80fcb49ff6c2b4 | [
"MIT"
] | null | null | null | import numpy as np
from math import sqrt
n = 10**2
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def solution(limit):
squares = {x**2: x for x in np.arange(2, int(sqrt(limit) + 1))}
result = []
for p_2 in squares:
factors = prime_factors(p_2)
if factors.count(2) > 1:
continue
if 2 in factors:
factors.remove(2)
for factor in factors:
if factor % 4 != 1:
break
else:
q_2 = 0.25 * ((squares[p_2] + 1) ** 2)
if p_2 - q_2 in squares:
result.append(p_2)
result = [squares[x] + 1 for x in result]
return result
print(solution(10**9))
| 19.065217 | 67 | 0.476625 |
cca1b9f233aa0ed3fcaa4b0ffe2fa52e36d50d77 | 18,311 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Protocol/SDP.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Protocol/SDP.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Protocol/SDP.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | #!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
==========================================
Session Description Protocol (SDP) Support
==========================================
The SDPParser component parses Session Description Protocol (see `RFC 4566`_) data
sent to it as individual lines of text (not multiline strings) and outputs a
dictionary containing the parsed session description.
.. _`RFC 4566`: http://tools.ietf.org/html/rfc4566
Example Usage
-------------
Fetch SDP data from a URL, parse it, and display the output::
Pipeline( OneShot("http://www.mysite.com/sessiondescription.sdp"),
SimpleHTTPClient(),
chunks_to_lines(),
SDPParser(),
ConsoleEchoer(),
).run()
If the session description at the URL provided is this::
v=0
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
c=IN IP4 224.2.17.12/127
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
Then parsing will return this dictionary::
{ 'protocol_version': 0,
'origin' : ('jdoe', 2890844526, 2890842807, 'IN', 'IP4', '10.47.16.5'),
'sessionname': 'SDP Seminar',
'information': 'A Seminar on the session description protocol',
'connection' : ('IN', 'IP4', '224.2.17.12', '127', 1),
'time' : [(2873397496L, 2873404696L, [])],
'URI' : 'http://www.example.com/seminars/sdp.pdf',
'email' : 'j.doe@example.com (Jane Doe)',
'attribute' : ['recvonly'],
'media':
[ { 'media' : ('audio', 49170, 1, 'RTP/AVP', '0'),
'connection': ('IN', 'IP4', '224.2.17.12', '127', 1)
},
{ 'media' : ('video', 51372, 1, 'RTP/AVP', '99'),
'connection': ('IN', 'IP4', '224.2.17.12', '127', 1),
'attribute' : ['rtpmap:99 h263-1998/90000']
}
],
}
Behaviour
---------
Send individual lines as strings to SDPParser's "inbox" inbox. SDPParser cannot
handle multiple lines in the same string.
When SDPParser receives a producerFinished() message on its "control" inbox, or
if it encounter another "v=" line then it knows it has reached the end of the
SDP data and will output the parsed data as a dictionary to its "outbox" outbox.
The SDP format does *not* contain any kind of marker to signify the end of a
session description - so SDPParser only deduces this by being told that the
producer/data source has finished, or if it encounters a "v=" line indicating
the start of another session description.
SDPParser can parse more than one session description, one after the other.
If the SDP data is malformed AssertionError, or other exceptions, may be raised.
SDPParser does not rigorously test for exact compliance - it just complains if
there are glaring problems, such as fields appearing in the wrong sections!
If a producerFinished or shutdownMicroprocess message is received on the
"control" inbox then, once any pending data at the "inbox" inbox has been
processed, this component will terminate. It will send the message on out of
its "signal" outbox.
Only if the message is a producerFinished message will it output the session
description is has been parsing. A shutdownMicroprocess message will not result
in it being output.
Format of parsed output
-----------------------
The result of parsing SDP data is a dictionary mapping descriptive names of
types to values:
====== ====================== ======================================================================
Session Description
------------------------------------------------------------------------------------------------------
Type Dictionary key Format of the value
====== ====================== ======================================================================
v "protocol_version" version_number
o "origin" ("user", session_id, session_version, "net_type", "addr_type", "addr")
s "sessionname" "session name"
t & r "time" (starttime, stoptime, [repeat,repeat, ...])
where repeat = (interval,duration,[offset,offset, ...])
a "attribute" "value of attribute"
b "bandwidth" (mode, bitspersecond)
i "information" "value"
e "email" "email-address"
u "URI" "uri"
p "phone" "phone-number"
c "connection" ("net_type", "addr_type", "addr", ttl, groupsize)
z "timezone adjustments" [(adj-time,offset), (adj-time,offset), ...]
k "encryption" ("method","value")
m "media" [media-description, media-description, ... ]
see next table for media description structure
====== ====================== ======================================================================
Note that 't' and 'r' lines are combined in the dictionary into a single
"time" key containing both the start and end times specified in the 't' line
and a list of any repeats specified in any 'r' lines present.
The "media" key contains a list of media descriptions. Like for the overall
session description, each is parsed into a dictionary, that will contain some
or all of the following:
====== ====================== ======================================================================
Media Descriptions
------------------------------------------------------------------------------------------------------
Type Dictionary key Format of the value
====== ====================== ======================================================================
m "media" ("media-type", port-number, number-of-ports, "protocol", "format")
c "connection" ("net_type", "addr_type", "addr", ttl, groupsize)
b "bandwidth" (mode, bitspersecond)
i "information" "value"
k "encryption" ("method","value")
a "attribute" "value of attribute"
====== ====================== ======================================================================
Some lines are optional in SDP. If they are not included, then the parsed output
will not contain the corresponding key.
The formats of values are left unchanged by the parsing. For example, integers
representing times are simply converted to integers, but the units used remain
unchanged (ie. they will not be converted to unix time units).
"""
# Basic Parser for SDP data, as defined in RFC 4566
#
# assuming the data is already split into lines
#
# ignores attribute lines to simplify parsing
from Axon.Component import component
from Axon.Ipc import producerFinished,shutdownMicroprocess
import re
class SDPParser(component):
"""\
SDPParser() -> new SDPParser component.
Parses Session Description Protocol data (see RFC 4566) sent to its "inbox"
inbox as individual strings for each line of the SDP data. Outputs a dict
containing the parsed data from its "outbox" outbox.
"""
Inboxes = { "inbox" : "SDP data in strings, each containing a single line",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Parsed SDP data in a dictionary",
"signal" : "Shutdown signalling",
}
def handleControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg,producerFinished):
self.shutdownMsg = msg
raise "DONE"
elif isinstance(msg,shutdownMicroprocess):
self.shutdownMsg = msg
raise "STOP"
else:
self.send(msg,"signal")
def readline(self):
while 1:
if self.dataReady("inbox"):
line = self.recv("inbox")
if line != "":
yield line
return
self.handleControl()
self.pause()
yield None
def main(self):
self.shutdownMsg = None
session = {}
mandatory = "XXX"
try:
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
while 1:
# begin by parsing the session section
session = {}
mandatory = "vost"
multiple_allowed = "abtr"
single_allowed = "vosiuepcbzk"
most_recent_t = None
while type != "m":
# check to see if we've been getting SDP data, then another 'v' has come along
# signifying the start of a new one
if type=="v" and "v" not in mandatory:
break
mandatory=mandatory.replace(type,"")
assert((type in single_allowed) or (type in multiple_allowed))
single_allowed=single_allowed.replace(type,"")
if type in multiple_allowed:
if type=="r":
assert(most_recent_t is not None)
most_recent_t[2].append(value) # tag repeats into list on end of time field
else:
session[key] = session.get(key,[])
session[key].append(value)
else:
session[key] = value
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
# we've hit an 'm' so its the end of the session section
assert(mandatory=="")
# now move onto media sections
mandatory_additional=""
if "c" in single_allowed:
mandatory_additional+="c"
session['media'] = []
# do a media section
while type=="m":
mandatory = "" + mandatory_additional
multiple_allowed = "a"
single_allowed = "icbk"
media={key:value}
session['media'].append(media)
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
while type != "m" and type != "v":
mandatory=mandatory.replace(type,"")
assert((type in single_allowed) or (type in multiple_allowed))
single_allowed=single_allowed.replace(type,"")
if type in multiple_allowed:
media[key] = media.get(key,[])
media[key].append(value)
else:
media[key] = value
for line in self.readline(): yield 1
# self.readline() generator complete ... line now contains a line with something on it
type,key,value = _parseline(line)
# end of media section
assert(mandatory=="")
# end of complete SDP file (we've hit another 'v' signifying the start of a new one)
self.sendOutParsedSDP(session)
except "DONE":
if mandatory=="":
self.sendOutParsedSDP(session)
yield 1
except "STOP":
pass
if self.shutdownMsg is None:
self.shutdownMsg = producerFinished()
self.send(self.shutdownMsg,"signal")
def sendOutParsedSDP(self,session):
# normalise it a bit first
if "connection" in session:
for media in session['media']:
media['connection'] = session['connection']
self.send(session,"outbox")
def _parseline(line):
match = re.match("^(.)=(.*)",line)
type,value = match.group(1), match.group(2)
if type=="v":
assert(value=="0")
return type, 'protocol_version', int(value)
elif type=="o":
user,sid,ver,ntype,atype,addr = re.match("^ *(\S+) +(\d+) +(\d+) +(IN) +(IP[46]) +(.+)",value).groups()
return type, 'origin', (user,int(sid),int(ver),ntype,atype,addr)
elif type=="s":
return type, 'sessionname', value
elif type=="i":
return type, 'information', value
elif type=="u":
return type, 'URI', value
elif type=="e":
return type, 'email', value
elif type=="p":
return type, 'phone', value
elif type=="c":
if re.match("^ *IN +IP4 +.*$",value):
match = re.match("^ *IN +IP4 +([^/]+)(?:/(\d+)(?:/(\d+))?)? *$",value)
ntype,atype = "IN","IP4"
addr,ttl,groupsize = match.groups()
if ttl is None:
ttl=127
if groupsize is None:
groupsize=1
elif re.match("^ *IN +IP6 +.*$",value):
match = re.match("^ *IN +IP6 +([abcdefABCDEF0123456789:.]+)(?:/(\d+))? *$")
ntype,atype = "IN","IP6"
addr,groupsize = match.groups()
else:
assert(False)
return type, 'connection', (ntype,atype,addr,ttl,groupsize)
elif type=="b":
mode,rate = \
re.match("^ *((?:AS)|(?:CT)|(?:X-[^:]+)):(\d+) *$",value).groups()
bitspersecond=long(rate)*1000
return type, 'bandwidth', (mode,bitspersecond)
elif type=="t":
start,stop = [ long(x) for x in re.match("^ *(\d+) +(\d+) *$",value).groups() ]
repeats = []
return type, 'time', (start,stop,repeats)
elif type=="r":
terms=re.split("\s+",value)
parsedterms = []
for term in terms:
value, unit = re.match("^\d+([dhms])?$").groups()
value = long(value) * {None:1, "s":1, "m":60, "h":3600, "d":86400}[unit]
parsedterms.append(value)
interval,duration=parsedterms[0], parsedterms[1]
offsets=parsedterms[2:]
return type, 'repeats', (interval,duration,offsets)
elif type=="z":
adjustments=[]
while value.strip() != "":
adjtime,offset,offsetunit,value = re.match("^ *(\d+) +([+-]?\d+)([dhms])? *?(.*)$",value).groups()
adjtime=long(adjtime)
offset=long(offset) * {None:1, "s":1, "m":60, "h":3600, "d":86400}[offsetunit]
adjustments.append((adjtime,offset))
return type, 'timezone adjustments', adjustments
elif type=="k":
method,value = re.match("^(clear|base64|uri|prompt)(?:[:](.*))?$",value).groups()
return type, "encryption", (method,value)
elif type=="a":
return type, 'attribute', value
elif type=="m":
media, port, numports, protocol, fmt = re.match("^(audio|video|text|application|message) +(\d+)(?:[/](\d+))? +([^ ]+) +(.+)$",value).groups()
port=int(port)
if numports is None:
numports=1
else:
numports=int(numports)
return type, 'media', (media,port,numports,protocol,fmt)
else:
return type, 'unknown', value
__kamaelia_components__ = ( SDPParser, )
if __name__ == "__main__":
from Kamaelia.Util.DataSource import DataSource
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Console import ConsoleEchoer
sdp = """\
v=0
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
s=SDP Seminar
i=A Seminar on the session description protocol
u=http://www.example.com/seminars/sdp.pdf
e=j.doe@example.com (Jane Doe)
c=IN IP4 224.2.17.12/127
t=2873397496 2873404696
a=recvonly
m=audio 49170 RTP/AVP 0
m=video 51372 RTP/AVP 99
a=rtpmap:99 h263-1998/90000
v=0
o=bfcrd 1140190501 1140190501 IN IP4 132.185.224.80
s=BFC ONE [H.264/AVC]
i=Multicast trial service from the BBC! Get BFC FLURBLE here!
a=x-qt-text-nam:BFC FLURBLE [H.264/AVC]
a=x-qt-text-aut:BFC Research & Development
a=x-qt-text-cpy:Copyright (c) 2006 British Flurbling Corporation
u=http://www.bbc.co.uk/multicast/
e=Multicast Support <multicast-tech@bfc.co.uk>
t=0 0
c=IN IP4 233.122.227.151/32
m=video 5150 RTP/AVP 33
b=AS:1200000
a=type:broadcast
a=mux:m2t
v=0
""".splitlines()
Pipeline( DataSource(sdp),
SDPParser(),
ConsoleEchoer(),
).run()
| 37.369388 | 149 | 0.531102 |
e5f7b538daf2a631271a40cb18d32a3a66fa3bd8 | 3,259 | py | Python | tests/automaton_test.py | University-Projects-UH/ia-sim-cmp | c8bcb11950584c0989ef92789d06a2afd0aa05f4 | [
"MIT"
] | null | null | null | tests/automaton_test.py | University-Projects-UH/ia-sim-cmp | c8bcb11950584c0989ef92789d06a2afd0aa05f4 | [
"MIT"
] | null | null | null | tests/automaton_test.py | University-Projects-UH/ia-sim-cmp | c8bcb11950584c0989ef92789d06a2afd0aa05f4 | [
"MIT"
] | null | null | null | from core import Automaton, nfa_to_dfa, DFA, union_automatas, concat_automatas, \
closure_automaton
def test_transform_nfa_to_dfa():
automaton = Automaton(3, 0, [2], transitions=[
(0, 'a', [0]),
(0, 'b', [1]),
(1, 'a', [2]),
(1, 'b', [1]),
(2, 'a', [0]),
(2, 'b', [1]),
])
automaton = nfa_to_dfa(automaton)
assert automaton.recognize('ba')
assert automaton.recognize('aababbaba')
assert not automaton.recognize('')
assert not automaton.recognize('aabaa')
assert not automaton.recognize('aababb')
dfa = nfa_to_dfa(Automaton(6, 0, [3, 5], transitions=[
(0, '', [1, 2]),
(1, '', [3]),
(1, 'b', [4]),
(2, 'a', [4]),
(3, 'c', [3]),
(4, '', [5]),
(5, 'd', [5]),
]))
assert dfa.states == 4
assert len(dfa.finals_states) == 4
assert not dfa.recognize('dddddd')
assert not dfa.recognize('cdddd')
assert not dfa.recognize('aa')
assert not dfa.recognize('ab')
assert not dfa.recognize('ddddc')
assert dfa.recognize('')
assert dfa.recognize('a')
assert dfa.recognize('b')
assert dfa.recognize('cccccc')
assert dfa.recognize('adddd')
assert dfa.recognize('bdddd')
def test_automatas_union():
aut1 = Automaton(2, 0, [1], [(0, '1', [1])])
aut2 = Automaton(2, 0, [1], [(0, '2', [1])])
aut3 = Automaton(2, 0, [1], [(0, '3', [1])])
un = union_automatas(aut1, aut2)
un = union_automatas(un, aut3)
dfa = nfa_to_dfa(un)
assert dfa.recognize("1")
assert dfa.recognize("2")
assert dfa.recognize("3")
automaton = DFA(2, 0, [1], transitions=[
(0, 'a', [0]),
(0, 'b', [1]),
(1, 'a', [0]),
(1, 'b', [1]),
])
union = union_automatas(automaton, automaton)
recognize = nfa_to_dfa(union).recognize
assert union.states == 2 * automaton.states + 2
assert recognize('b')
assert recognize('abbb')
assert recognize('abaaababab')
assert not recognize('')
assert not recognize('a')
assert not recognize('abbbbaa')
def test_automatas_concat():
automaton = DFA(2, 0, [1], transitions=[
(0, 'a', [0]),
(0, 'b', [1]),
(1, 'a', [0]),
(1, 'b', [1]),
])
concat = concat_automatas(automaton, automaton)
recognize = nfa_to_dfa(concat).recognize
assert concat.states == 2 * automaton.states + 1
assert recognize('bb')
assert recognize('abbb')
assert recognize('abaaababab')
assert not recognize('')
assert not recognize('a')
assert not recognize('b')
assert not recognize('ab')
assert not recognize('aaaab')
assert not recognize('abbbbaa')
def test_automaton_closure():
automaton = DFA(2, 0, [1], transitions=[
(0, 'a', [0]),
(0, 'b', [1]),
(1, 'a', [0]),
(1, 'b', [1]),
])
closure = closure_automaton(automaton)
recognize = nfa_to_dfa(closure).recognize
assert closure.states == automaton.states + 2
assert recognize('')
assert recognize('b')
assert recognize('ab')
assert recognize('bb')
assert recognize('abbb')
assert recognize('abaaababab')
assert not recognize('a')
assert not recognize('abbbbaa')
| 26.933884 | 81 | 0.561522 |
764da7952bce284a6e360b42e546625cf9330b26 | 5,417 | py | Python | ck_maml_train.py | zhangming880102/facial_expression_recognition_maml_pytorch | 072655d25028e7d1e384f488d9b344b584d0a254 | [
"MIT"
] | 1 | 2021-08-10T06:01:06.000Z | 2021-08-10T06:01:06.000Z | ck_maml_train.py | zhangming880102/facial_expression_recognition_maml_pytorch | 072655d25028e7d1e384f488d9b344b584d0a254 | [
"MIT"
] | null | null | null | ck_maml_train.py | zhangming880102/facial_expression_recognition_maml_pytorch | 072655d25028e7d1e384f488d9b344b584d0a254 | [
"MIT"
] | null | null | null | import torch, os
import numpy as np
from fer2013NShot import FerNShot
import argparse
from meta import Meta
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
print(args)
config =[
('conv2d',[64,3,3,3,1,1]),
('bn',[64]),
('relu',[True]),
('conv2d',[64,64,3,3,1,1]),
('bn',[64]),
('relu',[True]),
('max_pool2d',[2,2,0]),
('conv2d',[128,64,3,3,1,1]),
('bn',[128]),
('relu',[True]),
('conv2d',[128,128,3,3,1,1]),
('bn',[128]),
('relu',[True]),
('max_pool2d',[2,2,0]),
('conv2d',[256,128,3,3,1,1]),
('bn',[256]),
('relu',[True]),
('conv2d',[256,256,3,3,1,1]),
('bn',[256]),
('relu',[True]),
('conv2d',[256,256,3,3,1,1]),
('bn',[256]),
('relu',[True]),
('conv2d',[256,256,3,3,1,1]),
('bn',[256]),
('relu',[True]),
('max_pool2d',[2,2,0]),
('conv2d',[512,256,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('conv2d',[512,512,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('conv2d',[512,512,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('conv2d',[512,512,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('max_pool2d',[2,2,0]),
('conv2d',[512,512,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('conv2d',[512,512,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('conv2d',[512,512,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('conv2d',[512,512,3,3,1,1]),
('bn',[512]),
('relu',[True]),
('max_pool2d',[2,2,0]),
('avg_pool2d',[1,1,0]),
('flatten', []),
('linear', [args.n_way,512])
]
device = args.device
maml = Meta(args,config).to(device)
if not args.reload_model is None:
maml.load_state_dict(torch.load(args.reload_model,map_location=device))
maml.train()
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
db_train = FerNShot('fer',
batchsz=args.task_num,
n_way=args.n_way,
k_shot=args.k_spt,
k_query=args.k_qry,
imgsz=args.imgsz)
for step in range(args.epoch):
print('step:%d'%(step))
x_spt, y_spt, x_qry, y_qry = db_train.next()
x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), \
torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)
# set traning=True to update running_mean, running_variance, bn_weights, bn_bias
accs = maml(x_spt, y_spt, x_qry, y_qry)
if step % 50 == 0:
print('step:', step, '\ttraining acc:', accs)
if step % 500 == 0:
accs = []
for testid in range(100//args.task_num):
# test
x_spt, y_spt, x_qry, y_qry = db_train.next('test')
x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), \
torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)
# split to single task each time
for x_spt_one, y_spt_one, x_qry_one, y_qry_one in zip(x_spt, y_spt, x_qry, y_qry):
test_acc = maml.finetunning(x_spt_one, y_spt_one, x_qry_one, y_qry_one)
accs.append( test_acc )
# [b, update_step+1]
accs = np.array(accs).mean(axis=0).astype(np.float16)
print('Test acc:', accs)
state_dict=maml.state_dict()
torch.save(state_dict,'models/fer_maml_%d.pt'%(step))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=40000)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=1)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=7)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=32)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=8)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.4)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--model', type=str, help='vgg19 or resnet', default='vgg19')
argparser.add_argument('--device', type=str, help='cpu or cuda', default='cuda:1')
argparser.add_argument('--reload_model', type=str,help='reload maml model',default=None)
args = argparser.parse_args()
main(args)
| 37.358621 | 118 | 0.541259 |
d0441c503f5b1418b533840bfaba64021a64858d | 13,265 | py | Python | frequency_model.py | Aurelien1609/Computational-model | ee11c06d3d84f3caab2deef9b7ec2ec96e30d6bd | [
"BSD-3-Clause"
] | 1 | 2019-03-01T02:17:12.000Z | 2019-03-01T02:17:12.000Z | frequency_model.py | Aurelien1609/Computational-model | ee11c06d3d84f3caab2deef9b7ec2ec96e30d6bd | [
"BSD-3-Clause"
] | null | null | null | frequency_model.py | Aurelien1609/Computational-model | ee11c06d3d84f3caab2deef9b7ec2ec96e30d6bd | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import numpy as np
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from math import sqrt
import matplotlib.animation as animation
from dana import *
''' Spikes model in computational neuroscience with Brian library. '''
# ----------------------------------------------- Parameters --------------------------------------------------------------------- #
#np.random.seed(1) # Initialization of the random generator (same randoms values)
STR_density = 1000
STR_size = 1.00,1.00
STR_count = STR_size[0] * STR_size[1] * STR_density
GP_density = 1000
GP_size = 0.85,0.85
GP_count = GP_size[0] * GP_size[1] * GP_density
# Striatum
STR = np.zeros(STR_count, dtype = [("V", float), # Membrane potential
("P", float, 2)]) # Spatial position
STR["P"][:,0] = (1.0-STR_size[0])/2 + np.random.uniform(0.0, STR_size[0], len(STR))
STR["P"][:,1] = (1.0-STR_size[1])/2 + np.random.uniform(0.0, STR_size[1], len(STR))
# Globus Pallidus
GP = np.zeros(GP_count, dtype = [("V", float), # Membrane potential
("P", float, 2)]) # Spatial position
GP["P"][:,0] = (1.0-GP_size[0])/2 + np.random.uniform(0.0, GP_size[0], len(GP))
GP["P"][:,1] = (1.0-GP_size[1])/2 + np.random.uniform(0.0, GP_size[1], len(GP))
# Striatum -> Striatum
D = cdist(STR["P"], STR["P"])
W = np.abs(np.random.normal(0.0, 0.1,(len(STR),len(STR))))
S = np.sign(np.random.uniform(-1, 3, (len(STR), len(STR)))) # 60% connections positive / 40% connections negative
W_STR_STR = ((W > D)) * S
#W_STR_STR = (W > D)
np.fill_diagonal(W_STR_STR, 0) # neuron can not connect himself
# Globus Pallidus -> Globus Pallidus
D = cdist(GP["P"], GP["P"])
W = np.abs(np.random.normal(0.0,0.1,(len(GP),len(GP))))
W_GP_GP = D * (W > D)
# Striatum -> Globus Pallidus
D = cdist(STR["P"], GP["P"])
W = np.abs(np.random.normal(0.0,0.1,(len(STR),len(GP))))
W_STR_GP = D * (W > D)
# Globus Pallidus -> Striatum
D = cdist(GP["P"], STR["P"])
W = np.abs(np.random.normal(0.0,0.1,(len(GP),len(STR))))
W_GP_STR = D * (W > D)
def save_connections() : np.savez("connections.npz", W_GP_STR, W_STR_GP, W_GP_GP, W_STR_STR)
def load_connections() : W_GP_STR, W_STR_GP, W_GP_GP, W_STR_STR = np.load("connections.npz")
# ----------------------------------------------- Model --------------------------------------------------------------------- #
duration = 200 * millisecond # Default trial duration
dt = 1.0 / 1024 * second # Default Time resolution : number of power 2 to avoid approximations
#dt = 1 * millisecond
STR_H = -65.0 # Resting potentiel
Threshold = -30.0 # Maximal Voltage for Spikes activity
STR_tau = 0.1 # Time constants
STR_N = 1 * 10 ** -3 # Noise level
V_init = -65.0 # Init potential V
# Sigmoid parameter
fmin = 0.0
fmax = 1.0 # frequency PA
slope = 1.0 # steepness of the slope
mean_freq = -40.0
# Functions
def noise(V, level) :
""" Initial level of the noisy neurons """
V *= (1 + np.random.uniform(-level, level, V.shape))
return V
def sigmoid(V, Fmin = 0, Fmax = 1, mean_freq = 0, slope = 1) :
""" Boltzmann sigmoid
Returns values between [fmin, fmax] """
V = Fmin + ((Fmax - Fmin) / (1.0 + np.exp((mean_freq - V) / slope)))
return V
# Populations #
G_STR = zeros(len(STR), 'dV/dt = (-V + I_int + I_ext + Input + STR_H)/ STR_tau; U = sigmoid(noise(V, STR_N), fmin, fmax, mean_freq, slope); I_int; I_ext; Input;')
G_STR.V = V_init + np.random.uniform(-STR_N, STR_N, G_STR.V.shape)
G_STR.U = sigmoid(G_STR.V, fmin, fmax, mean_freq, slope)
# Connectivity #
SparseConnection(G_STR('U'), G_STR('I_int'), W_STR_STR * 10) # faster computation with sparse matrix
# Electrode Stimulation #
def input_current(voltage = 100.0, position = [0.5, 0.5], tau_Stim = 0.15) :
''' Add current in dv/dt equation '''
Stim = np.array([(voltage, position)], dtype=[('V', '<f8'), ('P', '<f8', 2)])
Distance_Stim_Neurons = cdist(Stim['P'], STR['P']) # Compute distance between electrode and neurons in STR
Stim_Voltage = np.exp(-Distance_Stim_Neurons / tau_Stim) * voltage # Value of stim voltage
#Stim_Voltage = voltage * (Distance_Stim_Neurons < 0.3)
return Distance_Stim_Neurons, Stim_Voltage
Input = 500.0 # here, we add current
Position = [0.5, 0.5] # electrode position
tau = 0.15
dist_Stim_STR, input_STR = input_current(Input, Position, tau)
@clock.at(1 * millisecond) # avoid 0 ms
def data(times) :
G_STR.Input = input_STR
@clock.at(20 * millisecond)
def data(times) :
G_STR.Input = 0.0
# Trial setup #
time = int(duration / dt) + 1
STR_record_V = np.zeros((time, len(STR)))
STR_record_U = np.zeros((time, len(STR)))
STR_record_Input = np.zeros((time, len(STR)))
STR_record_I_int = np.zeros((time, len(STR)))
record_index = 0
@before(clock.tick)
def save_data(times):
global record_index
STR_record_V[record_index] = G_STR.V
STR_record_U[record_index] = G_STR.U
STR_record_Input[record_index] = G_STR.Input
STR_record_I_int[record_index] = G_STR.I_int
record_index += 1
# Simulation #
run(time = duration, dt = dt)
# ----------------------------------------------- Displays --------------------------------------------------------------------- #
def infos() :
'''Some information about the neuronal populations for each structures'''
print "Striatal populations: %d" % len(STR)
print "Pallidal populations: %d" % len(GP)
print
C = (W_STR_STR > 0).sum(axis=1) + (W_STR_STR < 0).sum(axis=1)
L = W_STR_STR[np.where(W_STR_STR != 0)]
print "Collateral striatal connections"
print "Mean number: %g (+/- %g)" % (C.mean(), C.std())
print "Mean length: %g (+/- %g)" % (L.mean(), L.std())
print
C = (W_GP_GP > 0).sum(axis=1) + (W_GP_GP < 0).sum(axis=1)
L = W_GP_GP[np.where(W_GP_GP != 0)]
print "Collateral pallidal connections"
print "Mean number: %g (+/- %g)" % (C.mean(), C.std())
print "Mean length: %g (+/- %g)" % (L.mean(), L.std())
print
C = (W_STR_GP > 0).sum(axis=1) + (W_STR_GP < 0).sum(axis=1)
L = W_STR_GP[np.where(W_STR_GP != 0)]
print "Striato-pallidal connections"
print "Mean number: %g (+/- %g)" % (C.mean(), C.std())
print "Mean length: %g (+/- %g)" % (L.mean(), L.std())
print
print "Mean # collateral striato-pallidal connections: %g (+/- %g)" % (C.mean(), C.std())
C = (W_GP_STR > 0).sum(axis=1) + (W_GP_STR < 0).sum(axis=1)
L = W_GP_STR[np.where(W_GP_STR != 0)]
print "Pallido-striatal connections"
print "Mean number: %g (+/- %g)" % (C.mean(), C.std())
print "Mean length: %g (+/- %g)" % (L.mean(), L.std())
print
infos()
def connections() :
''' The graph of connections of afferent and efferent neurons '''
def on_pick(event):
'''Show neuronals afferants with a click and efferents with control and click '''
button = event.mouseevent.button
index = event.ind[0]
# Clear previous selection/connections
STR_selection_plot.set_data([],[])
STR_connections_plot.set_data([],[])
GP_selection_plot.set_data([],[])
GP_connections_plot.set_data([],[])
# --- Output connections ---
if button == 1:
STR_connections_plot.set_color('red')
GP_connections_plot.set_color('red')
if event.artist == STR_plot:
x,y = STR[index]['P']
STR_selection_plot.set_data([x],[y])
I = W_STR_STR[:,index].nonzero()
STR_connections_plot.set_data(STR['P'][I,0], STR['P'][I,1])
I = W_GP_STR[:,index].nonzero()
GP_connections_plot.set_data(GP['P'][I,0], GP['P'][I,1])
elif event.artist == GP_plot:
x,y = GP[index]['P']
GP_selection_plot.set_data([x],[y])
I = W_GP_GP[:,index].nonzero()
GP_connections_plot.set_data(GP['P'][I,0], GP['P'][I,1])
I = W_STR_GP[:,index].nonzero()
STR_connections_plot.set_data(STR['P'][I,0], STR['P'][I,1])
# --- Input connections ---
elif button == 3:
STR_connections_plot.set_color('blue')
GP_connections_plot.set_color('blue')
if event.artist == STR_plot:
x,y = STR[index]['P']
STR_selection_plot.set_data([x],[y])
I = W_STR_STR[index,:].nonzero()
STR_connections_plot.set_data(STR['P'][I,0], STR['P'][I,1])
I = W_STR_GP[index].nonzero()
GP_connections_plot.set_data(GP['P'][I,0], GP['P'][I,1])
elif event.artist == GP_plot:
x,y = GP[index]['P']
GP_selection_plot.set_data([x],[y])
I = W_GP_GP[index,:].nonzero()
GP_connections_plot.set_data(GP['P'][I,0], GP['P'][I,1])
I = W_GP_STR[index,:].nonzero()
STR_connections_plot.set_data(STR['P'][I,0], STR['P'][I,1])
plt.draw()
# Figure
fig = plt.figure(figsize=(16,7), facecolor='white')
fig.canvas.mpl_connect('pick_event', on_pick)
# Striatum plot
STR_ax = plt.subplot(121, aspect=1)
STR_ax.set_title("Striatum")
STR_plot, = STR_ax.plot(STR['P'][:,0], STR['P'][:,1], 'o', color='k', alpha=0.1, picker=5)
STR_ax.set_xlim(0,1)
STR_ax.set_xticks([])
STR_ax.set_ylim(0,1)
STR_ax.set_yticks([])
STR_selection_plot, = STR_ax.plot([],[], 'o', color='black', alpha=1.0, zorder=10)
STR_connections_plot, = STR_ax.plot([],[], 'o', color='red', alpha=0.5, zorder=10)
# GP plot
GP_ax = plt.subplot(122, aspect=1)
GP_ax.set_title("Globus Pallidus")
GP_plot, = GP_ax.plot(GP['P'][:,0], GP['P'][:,1], 'o', color='k', alpha=0.1, picker=5)
GP_ax.set_xlim(0,1)
GP_ax.set_xticks([])
GP_ax.set_ylim(0,1)
GP_ax.set_yticks([])
GP_selection_plot, = GP_ax.plot([],[], 'o', color='black', alpha=1.0, zorder=10)
GP_connections_plot, = GP_ax.plot([],[], 'o', color='red', alpha=0.5, zorder=10)
plt.show()
connections()
frequence = 1
if frequence :
''' Histogram 2D of record activity '''
x = STR["P"][:,0]
y = STR["P"][:,1]
pause = False
step = 1
#step = min(int(duration / dt + 1), step) # step in ms
times = 0
bins = 18
hist_cumulate, xa, ya = np.histogram2d(x,y, bins = bins, weights = STR_record_U[0])
hist_counts_neurons, xb, yb = np.histogram2d(x,y, bins = bins)
hist_counts_neurons = np.maximum(hist_counts_neurons, 1)
mean_activity = hist_cumulate / hist_counts_neurons
def onClick(event):
''' Capture a click to turn the histogram paused '''
global pause
pause ^= True
def updatefig(i) :
''' Updated of potential activity'''
global pause, times
if not pause and times < len(STR_record_U) :
hist_cumulate, xa, ya = np.histogram2d(x,y, bins = bins, weights = STR_record_U[times])
mean_activity = hist_cumulate / hist_counts_neurons
plt.title("Mean of frequency networks = " + str("{0:.2f}".format(np.mean(STR_record_U[times])))
+ "\n Dispersion of frequency networks = " + str("{0:.2f}".format(np.std(STR_record_U[times])))
+ "\n Time = " + str(times) + " ms")
im.set_array(mean_activity)
times += step # acceleration of the visualization
return im
fig = plt.figure(figsize=(12, 8))
im = plt.imshow(mean_activity, interpolation='nearest', origin='low', extent=[0, 1, 0, 1], vmin = fmin, vmax = fmax)
# vmin = Vmin, vmax = Vmax : fix values potential V, cmap = 'hot'
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Frequency in HZ')
fig.canvas.mpl_connect('button_press_event', onClick)
ani = animation.FuncAnimation(fig, updatefig)
plt.show()
fig = plt.figure(figsize=(14, 9))
plt.subplot(222)
H = plt.hist(STR_record_U[-1], color='.5', edgecolor='w')
plt.title('Distribution of frequency at the end')
#plt.xlabel('Frequency HZ')
plt.ylabel('Number of Neurons')
plt.subplot(221)
H = plt.hist(STR_record_U[0], color='.5', edgecolor='w')
plt.title('Distribution of frequency at start')
plt.ylabel('Number of Neurons')
plt.subplot(223)
number_neuron = 0
#M = np.mean(STR_record_U[:, 0])
title = "STR Neuron " + str(number_neuron) + ", I_int " #+ str("{0:.2f}".format(M))
time_step = np.arange(0, len(STR_record_I_int))
#mean_step = np.zeros(len(STR_record_I_int)) + M
plt.plot(time_step, STR_record_I_int[:, number_neuron], c='b', label= title)
#plt.plot(time_step, mean_step, c='r', label= 'Mean')
plt.title(title)
plt.xlabel("Time (mseconds)")
plt.ylabel("Intensity (mV)")
plt.xlim(0, len(STR_record_U) - 1)
#plt.xlim(0, len(STR_record_U) - 1)
plt.subplot(224)
number_neuron = 0
M = np.mean(STR_record_U[:, 0])
title = "STR Neuron " + str(number_neuron) + ", Mean Frequency = " + str("{0:.2f}".format(M))
time_step = np.arange(0, len(STR_record_U))
mean_step = np.zeros(len(STR_record_U)) + M
plt.plot(time_step, STR_record_U[:, number_neuron], c='b', label= title)
plt.plot(time_step, mean_step, c='r', label= 'Mean')
plt.title(title)
plt.xlabel("Time (mseconds)")
plt.ylabel("Frequency (HZ)")
plt.xlim(0, len(STR_record_U) - 1)
plt.show()
| 32.997512 | 162 | 0.599095 |
8411ee7763470f14a25cb6ca7803dfbe67d2ba82 | 14,920 | py | Python | ViewWidget.py | Linzecong/ExcelDiffer | 97ee053cf29f70e401e9ddc65fc2f79d5da2d923 | [
"Apache-2.0"
] | 20 | 2019-03-04T11:11:30.000Z | 2022-03-14T06:52:46.000Z | ViewWidget.py | hubuyaolian/ExcelDiffer-1 | 97ee053cf29f70e401e9ddc65fc2f79d5da2d923 | [
"Apache-2.0"
] | 3 | 2019-03-04T11:12:44.000Z | 2022-01-12T18:06:15.000Z | ViewWidget.py | hubuyaolian/ExcelDiffer-1 | 97ee053cf29f70e401e9ddc65fc2f79d5da2d923 | [
"Apache-2.0"
] | 8 | 2019-03-28T11:07:39.000Z | 2022-01-03T19:45:52.000Z | #-*- codingg:utf8 -*-
from PyQt5.QtWidgets import QScrollBar, QWidget,QAction, QSplitter, QMainWindow, QApplication, QTableWidgetItem,QTableWidget,QHBoxLayout,QVBoxLayout
from PyQt5.QtGui import QBrush,QColor,QIcon
from PyQt5.QtCore import Qt,QSettings
import sys
from ExcelWidget import ExcelWidget
class ViewWidget(QMainWindow):
def __init__(self):
super(ViewWidget,self).__init__()
self.diff = -1
self.OldTableWidget = ExcelWidget()
self.NewTableWidget = ExcelWidget()
self.MainLayout = QHBoxLayout()
self.Splitter = QSplitter(Qt.Horizontal)
self.Splitter.addWidget(self.OldTableWidget)
self.Splitter.addWidget(self.NewTableWidget)
self.Splitter.setContentsMargins(5,5,5,5)
self.setCentralWidget(self.Splitter)
self.Lock = True
self.OldTableWidget.currentChanged.connect(lambda x:self.setSame(x,0))
self.NewTableWidget.currentChanged.connect(lambda x:self.setSame(x,1))
self.OldTableWidget.cellClicked.connect(lambda x,y:self.setSameCell(x,y,0))
self.NewTableWidget.cellClicked.connect(lambda x,y:self.setSameCell(x,y,1))
self.OldTableWidget.hbarchange.connect(lambda x:self.NewTableWidget.TableWidgets[self.NewTableWidget.currentIndex()].horizontalScrollBar().setValue(x))
self.NewTableWidget.vbarchange.connect(lambda x:self.OldTableWidget.TableWidgets[self.OldTableWidget.currentIndex()].verticalScrollBar().setValue(x))
self.NewTableWidget.hbarchange.connect(lambda x:self.OldTableWidget.TableWidgets[self.OldTableWidget.currentIndex()].horizontalScrollBar().setValue(x))
self.OldTableWidget.vbarchange.connect(lambda x:self.NewTableWidget.TableWidgets[self.NewTableWidget.currentIndex()].verticalScrollBar().setValue(x))
self.initAction()
self.initToolbar()
# self.MainLayout.addWidget(self.Splitter)
# self.setLayout(self.MainLayout)
def setSameCell(self,x,y,type1):
if self.Lock == False:
return
if type1 == 0:
self.NewTableWidget.currentWidget().setCurrentCell(x,y)
else:
self.OldTableWidget.currentWidget().setCurrentCell(x,y)
def setSame(self,id,type1):
if self.Lock == False:
return
if type1 == 0:
text = self.OldTableWidget.tabText(id)
for i in range(self.NewTableWidget.count()):
if text == self.NewTableWidget.tabText(i):
self.NewTableWidget.setCurrentIndex(i)
else:
text = self.NewTableWidget.tabText(id)
for i in range(self.OldTableWidget.count()):
if text == self.OldTableWidget.tabText(i):
self.OldTableWidget.setCurrentIndex(i)
def initToolbar(self):
self.toolbar = self.addToolBar("tabletool")
def initAction(self):
self.LockAction = QAction(QIcon("icon/lock.png"),"锁定",self)
self.LockAction.setStatusTip("锁定表格,使得切换标签页时,新旧两个表格同步,且比较时将比较整个文件!")
self.LockAction.triggered.connect(self.lockTab)
self.UnlockAction = QAction(QIcon("icon/unlock.png"),"解锁",self)
self.UnlockAction.setStatusTip("解锁表格,使得切换标签页时,新旧两个表格不会同步,且只比较选定的标签!")
self.UnlockAction.triggered.connect(self.unlockTab)
def lockTab(self):
self.Lock = True
self.toolbar.removeAction(self.LockAction)
self.toolbar.addAction(self.UnlockAction)
def unlockTab(self):
self.Lock = False
self.toolbar.removeAction(self.UnlockAction)
self.toolbar.addAction(self.LockAction)
def setOldTable(self,data):
self.OldTableWidget.setData(data)
def setNewTable(self,data):
self.NewTableWidget.setData(data)
def ABCToInt(self, s):
dict0 = {}
for i in range(26):
dict0[chr(ord('A')+i)]=i+1
output = 0
for i in range(len(s)):
output = output*26+dict0[s[i]]
return output
def setHighLight(self,widget,difftype,id):
"""
0 old
1 new
2 both
"""
self.ColorSettings = QSettings("ExcelDiffer", "Color");
hightlight = self.ColorSettings.value("hightlight")
self.setColor(self.diff,self.oi,self.ni)
if widget == 0:
if difftype == "del_col":
col = self.ABCToInt(self.diff[difftype][id])
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(0,col-1)
for i in range(self.OldTableWidget.TableWidgets[self.oi].rowCount()):
self.OldTableWidget.TableWidgets[self.oi].item(i,col-1).setBackground(QBrush(QColor(hightlight)))
if difftype == "del_row":
row = self.diff[difftype][id]
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(row-1,0)
for j in range(self.OldTableWidget.TableWidgets[self.oi].columnCount()):
self.OldTableWidget.TableWidgets[self.oi].item(row-1,j).setBackground(QBrush(QColor(hightlight)))
if difftype == "change_cell":
rec = self.diff[difftype][id]
j = self.ABCToInt(rec[0][1])
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(rec[0][0]-1,j-1)
self.OldTableWidget.TableWidgets[self.oi].item(rec[0][0]-1,j-1).setBackground(QBrush(QColor(hightlight)))
j = self.ABCToInt(rec[1][1])
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(rec[1][0]-1,j-1)
self.NewTableWidget.TableWidgets[self.ni].item(rec[1][0]-1,j-1).setBackground(QBrush(QColor(hightlight)))
if difftype == "del_merge":
rec = self.diff["del_merge"][id]
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(rec[0],rec[2])
for i in range(rec[0],rec[1]):
for j in range(rec[2],rec[3]):
self.OldTableWidget.TableWidgets[self.oi].item(i,j).setBackground(QBrush(QColor(hightlight)))
if difftype == "row_exchange":
i = self.diff["row_exchange"][id]
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(i[0]-1,0)
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(i[1]-1,0)
for j in range(self.OldTableWidget.TableWidgets[self.oi].columnCount()):
self.OldTableWidget.TableWidgets[self.oi].item(i[0]-1,j).setBackground(QBrush(QColor(hightlight)))
for j in range(self.NewTableWidget.TableWidgets[self.ni].columnCount()):
self.NewTableWidget.TableWidgets[self.ni].item(i[1]-1,j).setBackground(QBrush(QColor(hightlight)))
if difftype == "col_exchange":
s = self.diff["col_exchange"][id]
j1 = self.ABCToInt(s[0])
j2 = self.ABCToInt(s[1])
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(0,j1-1)
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(0,j2-1)
for i in range(self.OldTableWidget.TableWidgets[self.oi].rowCount()):
self.OldTableWidget.TableWidgets[self.oi].item(i,j1-1).setBackground(QBrush(QColor(hightlight)))
for i in range(self.NewTableWidget.TableWidgets[self.ni].rowCount()):
self.NewTableWidget.TableWidgets[self.ni].item(i,j2-1).setBackground(QBrush(QColor(hightlight)))
elif widget == 1:
if difftype == "add_col":
col = self.ABCToInt(self.diff[difftype][id])
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(0,col-1)
for i in range(self.NewTableWidget.TableWidgets[self.ni].rowCount()):
self.NewTableWidget.TableWidgets[self.ni].item(i,col-1).setBackground(QBrush(QColor(hightlight)))
if difftype == "add_row":
row = self.diff[difftype][id]
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(row-1,0)
for j in range(self.NewTableWidget.TableWidgets[self.ni].columnCount()):
self.NewTableWidget.TableWidgets[self.ni].item(row-1,j).setBackground(QBrush(QColor(hightlight)))
if difftype == "change_cell":
rec = self.diff[difftype][id]
j = self.ABCToInt(rec[0][1])
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(rec[0][0]-1,j-1)
self.OldTableWidget.TableWidgets[self.oi].item(rec[0][0]-1,j-1).setBackground(QBrush(QColor(hightlight)))
j = self.ABCToInt(rec[1][1])
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(rec[1][0]-1,j-1)
self.NewTableWidget.TableWidgets[self.ni].item(rec[1][0]-1,j-1).setBackground(QBrush(QColor(hightlight)))
if difftype == "new_merge":
rec = self.diff["new_merge"][id]
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(rec[0],rec[2])
for i in range(rec[0],rec[1]):
for j in range(rec[2],rec[3]):
self.NewTableWidget.TableWidgets[self.ni].item(i,j).setBackground(QBrush(QColor(hightlight)))
if difftype == "row_exchange":
i = self.diff["row_exchange"][id]
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(i[0]-1,0)
for j in range(self.OldTableWidget.TableWidgets[self.oi].columnCount()):
self.OldTableWidget.TableWidgets[self.oi].item(i[0]-1,j).setBackground(QBrush(QColor(hightlight)))
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(i[1]-1,0)
for j in range(self.NewTableWidget.TableWidgets[self.ni].columnCount()):
self.NewTableWidget.TableWidgets[self.ni].item(i[1]-1,j).setBackground(QBrush(QColor(hightlight)))
if difftype == "col_exchange":
s = self.diff["col_exchange"][id]
j1 = self.ABCToInt(s[0])
j2 = self.ABCToInt(s[1])
self.OldTableWidget.TableWidgets[self.oi].setCurrentCell(0,j1-1)
for i in range(self.OldTableWidget.TableWidgets[self.oi].rowCount()):
self.OldTableWidget.TableWidgets[self.oi].item(i,j1-1).setBackground(QBrush(QColor(hightlight)))
self.NewTableWidget.TableWidgets[self.ni].setCurrentCell(0,j2-1)
for i in range(self.NewTableWidget.TableWidgets[self.ni].rowCount()):
self.NewTableWidget.TableWidgets[self.ni].item(i,j2-1).setBackground(QBrush(QColor(hightlight)))
else:
pass
def setColor(self,diff,oi=-1,ni=-1):
self.ColorSettings = QSettings("ExcelDiffer", "Color");
hightlight = self.ColorSettings.value("hightlight")
background = self.ColorSettings.value("background");
exchange = self.ColorSettings.value("exchange");
add = self.ColorSettings.value("add");
delcolor = self.ColorSettings.value("delcolor");
change = self.ColorSettings.value("change");
self.diff = diff
if oi==-1:
oi = self.OldTableWidget.currentIndex()
ni = self.NewTableWidget.currentIndex()
self.oi=oi
self.ni=ni
for i in range(self.NewTableWidget.TableWidgets[ni].rowCount()):
for j in range(self.NewTableWidget.TableWidgets[ni].columnCount()):
self.NewTableWidget.TableWidgets[ni].item(i,j).setBackground(QBrush(QColor(background)))
self.NewTableWidget.TableWidgets[ni].item(i,j).setForeground(QBrush(QColor("#000000")))
for i in range(self.OldTableWidget.TableWidgets[oi].rowCount()):
for j in range(self.OldTableWidget.TableWidgets[oi].columnCount()):
self.OldTableWidget.TableWidgets[oi].item(i,j).setBackground(QBrush(QColor(background)))
self.OldTableWidget.TableWidgets[oi].item(i,j).setForeground(QBrush(QColor("#000000")))
for i in diff["row_exchange"]:
for j in range(self.OldTableWidget.TableWidgets[oi].columnCount()):
self.OldTableWidget.TableWidgets[oi].item(i[0]-1,j).setBackground(QBrush(QColor(exchange)))
for j in range(self.NewTableWidget.TableWidgets[ni].columnCount()):
self.NewTableWidget.TableWidgets[ni].item(i[1]-1,j).setBackground(QBrush(QColor(exchange)))
for s in diff["col_exchange"]:
j1 = self.ABCToInt(s[0])
j2 = self.ABCToInt(s[1])
for i in range(self.OldTableWidget.TableWidgets[oi].rowCount()):
self.OldTableWidget.TableWidgets[oi].item(i,j1-1).setBackground(QBrush(QColor(exchange)))
for i in range(self.NewTableWidget.TableWidgets[ni].rowCount()):
self.NewTableWidget.TableWidgets[ni].item(i,j2-1).setBackground(QBrush(QColor(exchange)))
for rec in diff["new_merge"]:
for i in range(rec[0],rec[1]):
for j in range(rec[2],rec[3]):
self.NewTableWidget.TableWidgets[ni].item(i,j).setBackground(QBrush(QColor(add)))
for rec in diff["del_merge"]:
for i in range(rec[0],rec[1]):
for j in range(rec[2],rec[3]):
self.OldTableWidget.TableWidgets[oi].item(i,j).setBackground(QBrush(QColor(delcolor)))
for s in diff["add_col"]:
j = self.ABCToInt(s)
for i in range(self.NewTableWidget.TableWidgets[ni].rowCount()):
self.NewTableWidget.TableWidgets[ni].item(i,j-1).setBackground(QBrush(QColor(add)))
for s in diff["del_col"]:
j = self.ABCToInt(s)
for i in range(self.OldTableWidget.TableWidgets[oi].rowCount()):
self.OldTableWidget.TableWidgets[oi].item(i,j-1).setBackground(QBrush(QColor(delcolor)))
for i in diff["add_row"]:
for j in range(self.NewTableWidget.TableWidgets[ni].columnCount()):
self.NewTableWidget.TableWidgets[ni].item(i-1,j).setBackground(QBrush(QColor(add)))
for i in diff["del_row"]:
for j in range(self.OldTableWidget.TableWidgets[oi].columnCount()):
self.OldTableWidget.TableWidgets[oi].item(i-1,j).setBackground(QBrush(QColor(delcolor)))
for rec in diff["change_cell"]:
j = self.ABCToInt(rec[0][1])
self.OldTableWidget.TableWidgets[oi].item(rec[0][0]-1,j-1).setBackground(QBrush(QColor(change)))
j = self.ABCToInt(rec[1][1])
self.NewTableWidget.TableWidgets[ni].item(rec[1][0]-1,j-1).setBackground(QBrush(QColor(change)))
if __name__=="__main__":
app = QApplication(sys.argv)
main = ViewWidget()
main.show()
sys.exit(app.exec_()) | 52.350877 | 159 | 0.623995 |
d92cf40ab5c5e3dc812b111ba84bce6a36bf7565 | 1,229 | py | Python | server/static/old/FacialDetect.py | loudest/Videostream-Face-Biometrics | 44c297c59bf14cd0f9a59c68f5718718e14b0c6e | [
"MIT"
] | 2 | 2017-07-13T13:13:33.000Z | 2020-03-27T02:06:56.000Z | server/static/old/FacialDetect.py | loudest/Videostream-Face-Biometrics | 44c297c59bf14cd0f9a59c68f5718718e14b0c6e | [
"MIT"
] | null | null | null | server/static/old/FacialDetect.py | loudest/Videostream-Face-Biometrics | 44c297c59bf14cd0f9a59c68f5718718e14b0c6e | [
"MIT"
] | null | null | null | import numpy as np
import cv2
def facialDetect(cascadePath=None):
if cascadePath == None:
cascadePath = "haarcascades/haarcascade_frontalface_default.xml"
camera = cv2.VideoCapture(0)
while True:
res, frame = camera.read()
cascade = cv2.CascadeClassifier(cascadePath)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('face detection', frame)
k = cv2.waitKey(1) & 0xFF
if k == 27:
cv2.destroyAllWindows()
break
del(camera)
def frameFacialDetect(frame, cascadePath=None, color=None):
if cascadePath == None:
cascadePath = "haarcascade_frontalface_default.xml"
if color == None:
color = (0, 255, 0)
cascade = cv2.CascadeClassifier(cascadePath)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
return frame | 38.40625 | 97 | 0.631408 |
8266904a36c348eb901c23f43201155bf23cf202 | 4,603 | py | Python | main.py | kokonut27/BHTML-Compiler | 39e7e79d789be3ded946965d2b152c4cf6aa8459 | [
"MIT"
] | 6 | 2021-04-21T22:46:08.000Z | 2021-12-19T14:12:59.000Z | main.py | kokonut27/BHTML-Compiler | 39e7e79d789be3ded946965d2b152c4cf6aa8459 | [
"MIT"
] | 2 | 2021-04-21T22:54:23.000Z | 2021-04-26T12:35:53.000Z | main.py | kokonut27/BHTML-Compiler | 39e7e79d789be3ded946965d2b152c4cf6aa8459 | [
"MIT"
] | 3 | 2021-04-21T22:48:37.000Z | 2021-04-24T21:44:14.000Z | import os, sys, time, re
import string
fp = input('FilePath: ')
def parse(file):
print("i started parsing!!!")
if '.bhtml' in file:
pass
else:
raise Exception("The file is not a 'bhtml' file")
try:
f = open(file,'r')
except:
raise Exception("There is no such file")
content = f.read()
colist = content.split("\n")
'''
load = 0
for i in colist:
if i:
load+=1
'''
'''
num=0
while num < load:
print("Compiling... |")
time.sleep(0.08)
os.system('clear')
print("Compiling... \ ")
time.sleep(0.08)
os.system('clear')
print("Compiling... -")
time.sleep(0.08)
os.system('clear')
print("Compiling... |")
time.sleep(0.08)
os.system('clear')
print('Compiling... /')
time.sleep(0.08)
os.system('clear')
num+=1
'''
def check():
df = re.findall("(?<=[AZaz])?(?!\d*=)[0-9.+-]+", lines)
df = str(df)
def wait_until(somepredicate, timeout, period=0.25, *args, **kwargs):
mustend = time.time() + timeout
while time.time() < mustend:
if somepredicate(*args, **kwargs): return True
time.sleep(period)
return False
allvars = {}
line = 0
read_line=0
getChar1 = "none"
getChar2 = "none"
getChar3 = "none"
var1 = "Undefined variable"
input1 = "Undefined input"
input2 = "Undefined input"
input3 = "Undefined input"
def docTAG():
try:
if '<!DOCTYPE bhtml>' in lines:#this way is more recommended
bhtmldoc = True
elif '<!doctype bhtml>' in lines:
bhtmldoc = True
elif '<!doctype BHTML>' in lines:
bhtmldoc = True
elif '<!DOCTYPE BHTML>' in lines:
bhtmldoc = True
else:
pass
except:
pass
def aTAG():
try:
#lines = lines.replace(' ','') PUT DELETE SPACES FUNCTION HERE
if ('<a href = "' in lines):
wrd = '<a href = "'
res = lines.partition(wrd)[2]
split_string = res.split("\">", 1)
res = split_string[0]
print(res) # been doing too much js
#print(lines)
os.system(f"touch {res}.bhtml") # creates file
ee = lines.partition(f'<a href = "{res}">')
f = ee[2]
f = str(f)
f = f.replace('</a>','')
#print(f)
# try:
#except:
# raise Exception("ERROR")
print("BEFORE")
print(res)
#parse(f"{res}.bhtml")
print("AFTER")
# code here, it means it passed with href tag
elif ('<a id = "'):
pass
else:
pass
except:
raise Exception("ERROR")
def pTAG():
try:
if '</p>' in lines:#maybe replace </p> with </>?
wrd = '<p>'
res = lines.partition(wrd)[2]
res = res.replace('</p>', '')
#res = res.replace(' ', '')
res = res.replace('{getChar1}', getChar1)
res = res.replace('{getChar2}', getChar2)
res = res.replace('{getChar3}', getChar3)
res = res.replace("{{input1}}", input1)
res = res.replace("{{input2}}", input2)
res = res.replace("{{input3}}", input3)
res = res.replace("{{var1}}", var1)
if "{{" in res:
if "}}" in res:
start = "{{"
end = "}}"
check = res[res.find(start) + len(start):res.rfind(end)]
if check in allvars:
res = res.replace('{{','')
res = res.replace('}}','')
e = allvars[check]
res = res.replace(check, str(e))
else:
exit()#add error
wait_until("</p>", 0)
split_string = res.split("</p>", -1)
res = split_string[0]
print(res)
else:
pass
except:
raise Exception("ERROR")
def h1TAG():
pass
newvar = 0
file = open(fp)
readline2 = 0
for lines in file.readlines():
if "<!--" in lines:
wait_until("-->", 0)
readline2=1
if readline2 == 1:
continue
line+=1
lines = lines.replace('\n','')
lines = lines.replace('\t','')
if lines == '':
pass
elif "<!--" in lines:
wait_until("-->", 0)
pass
lines = lines.rstrip()
if "</p>" in lines:
pTAG()
if "<a href" in lines:
aTAG()
elif lines in string.whitespace:#i might remove this
pass
elif type(lines) == str:#if the code inside index.bhtml is string, it prints, like regular html
print(str(lines))
parse(fp) | 23.247475 | 99 | 0.494026 |
cb721357046f290b4a4f3e82a54f8c1537b2b002 | 8,040 | py | Python | src/graph_transpiler/webdnn/optimizer/sub_rules/merge_sgemm_and_elementwise_mul.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | 1 | 2018-07-26T13:52:21.000Z | 2018-07-26T13:52:21.000Z | src/graph_transpiler/webdnn/optimizer/sub_rules/merge_sgemm_and_elementwise_mul.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/optimizer/sub_rules/merge_sgemm_and_elementwise_mul.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | from typing import Tuple
from webdnn.graph import traverse
from webdnn.graph.axis import Axis
from webdnn.graph.graph import Graph
from webdnn.graph.operators.elementwise_mul import ElementwiseMul
from webdnn.graph.operators.sgemm import Sgemm
from webdnn.graph.operators.transpose import Transpose
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.order import Order
from webdnn.graph.variable import Variable
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.util import flags
from webdnn.util.misc import mul
class MergeSgemmAndElementwiseMul(OptimizeRule):
"""
This optimize rule merges SGEMM weight and ElementwiseMul coefficient.
... code-block:: text
x -+
+-{sgemm}- h -+
w1 -+ +-{mul}- y
w2 -+
In above sub structure, if some conditions are satisfied, it can be simplified as follows,
... code-block::
x -+
+-{sgemm}- y
w1 * w2 -+
Conditions are as follows.
- :code:`w1` and :code:`w2` is :class:`~webdnn.graph.variables.constant_variable.ConstantVariable`.
- All axes in :code:`w2` is derived from :code:`w1`
Considering follow example,
... code-block::
<x shape=[5, 15], order=OrderNC>
<w1 shape=[2, 3, 4, 5], order=OrderNHWC>
<Sgemm A=w1, B=x,
M=24, K=5, N=15,
out_shape=[4, 6, 5, 3], out_order=OrderNCHW
transposeA=True, transposeB=True>
<h shape=[4, 6, 5, 3] order=OrderNCHW>
<w2 shape=[6] order=OrderC>
In this case, :code:`w1` is regarded as `OrderMK` in SGEMM, and axis :code:`M` is split into :code:`N` and :code:`C` at the
end of the SGEMM.
... code-block::
w1 | x
======================|=============
SGEMM's inputs' shape is: [N:2, H:3, W:4, C:5] | [N:5, C:15]
----------------------+-------------
SGEMM reshaped them as: [M:24, K:5] | [K:5, N:15]
----------------------+-------------
SGEMM's output shape is: [M:24, | N:15]
-----------+-------------
SGEMM splits axes as: [N:4, C:6, | H:5, W:3]
|
w2's shape is: [C:6] |
In this case, it can be said that "all axes in :code:`w2` (:code:`C`) is derived from :code:`w1`".
:code:`w1` is reinterpreted as `OrderNCK` with shape :code:`(4, 6, 5)`. Also :code:`w2` is reinterpreted as `OrderNCK` with
shape :code:`(1, 6, 1)`. Then, :code:`w1` and :code:`w2` are multiplied elementwisely.
"""
def flags(self):
return [
flags.optimize.OPTIMIZE,
flags.optimize.MERGE_SGEMM_AND_ELEMENTWISE_MUL,
]
def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
flag_changed = False
matches = traverse.search_sub_structure(graph, [Sgemm, Variable, ElementwiseMul])
while len(matches) > 0:
match = matches.pop()
sgemm = match[0] # type: Sgemm
elementwise_mul = match[2] # type: ElementwiseMul
out_order = sgemm.parameters["out_order"]
out_shape = sgemm.parameters["out_shape"]
axis_k = Axis('AxisK')
if not isinstance(sgemm.inputs["A"], ConstantVariable) and not isinstance(sgemm.inputs["B"], ConstantVariable):
# neither x nor w1 is constant
continue
elif isinstance(sgemm.inputs["A"], ConstantVariable):
w1 = sgemm.inputs["A"] # type: ConstantVariable
if sgemm.transpose_A:
# w1.shape = (M, K)
shape = []
axes = []
for axis, size in zip(out_order.axes, out_shape):
shape.append(size)
axes.append(axis)
if mul(shape) >= sgemm.M:
break
if mul(shape) != sgemm.M:
# output axes are derived from both w1 and x
continue
w1_virtual_order = Order(axes + [axis_k])
w1_virtual_shape = shape + [sgemm.K]
else:
# w1.shape = (K, M)
shape = [sgemm.K]
axes = [axis_k]
for axis, size in zip(out_order.axes, out_shape):
shape.append(size)
axes.append(axis)
if mul(shape) >= w1.size:
break
if mul(shape) != w1.size:
# output axes are derived from both w1 and x
continue
w1_virtual_order = Order(axes)
w1_virtual_shape = shape
else:
w1 = sgemm.inputs["B"] # type: ConstantVariable
if sgemm.transpose_B:
# w1.shape = (K, N)
shape = []
axes = []
for axis, size in reversed(list(zip(out_order.axes, out_shape))):
shape.insert(0, size)
axes.insert(0, axis)
if mul(shape) >= sgemm.N:
break
if mul(shape) != sgemm.N:
# output axes are derived from both w1 and x
continue
w1_virtual_order = Order([axis_k] + axes)
w1_virtual_shape = [sgemm.K] + shape
else:
# w1.shape = (N, K)
shape = [sgemm.K]
axes = [axis_k]
for axis, size in reversed(list(zip(out_order.axes, out_shape))):
shape.insert(0, size)
axes.insert(0, axis)
if mul(shape) >= w1.size:
break
if mul(shape) != w1.size:
# output axes are derived from both w1 and x
continue
w1_virtual_order = Order(axes)
w1_virtual_shape = shape
h = sgemm.outputs["C"] # type: Variable
x0 = elementwise_mul.inputs["x0"]
x1 = elementwise_mul.inputs["x1"]
if h == x1:
if not isinstance(x0, ConstantVariable):
# w2 is not constant
continue
w2 = x0 # type: ConstantVariable
else:
if not isinstance(x1, ConstantVariable):
# w2 is not constant
continue
w2 = x1 # type: ConstantVariable
y = elementwise_mul.outputs["y"] # type: Variable
if not all(axis in w1_virtual_order.axes for axis in w2.order.axes):
# w2's axes are derived from both w1 and x
continue
elementwise_mul.remove_all()
y_dummy, = Transpose(None)(h)
y_dummy.change_order(y.order)
y_dummy.replace(y)
w2.change_order(w1_virtual_order)
w_new = ConstantVariable(w1.data.reshape(w1_virtual_shape), w1_virtual_order) * w2 # type: ConstantVariable
w1.replace(w_new, with_assert=False)
flag_changed = True
matches = traverse.search_sub_structure(graph, [Sgemm, Variable, ElementwiseMul])
return graph, flag_changed
| 36.545455 | 135 | 0.468035 |
b0cfa4b3ecf030d303632f51da390fc09933e2e0 | 590 | py | Python | util.py | IshantRam/Pong | 4b67dae587e034c5eb04e729ac3bd3920975bfd5 | [
"MIT"
] | null | null | null | util.py | IshantRam/Pong | 4b67dae587e034c5eb04e729ac3bd3920975bfd5 | [
"MIT"
] | null | null | null | util.py | IshantRam/Pong | 4b67dae587e034c5eb04e729ac3bd3920975bfd5 | [
"MIT"
] | null | null | null | import pygame
from pygame.locals import *
from sys import exit
import random
from termcolor import colored
# Height and Width
HEIGHT = 720
WIDTH = 1024
# RGB Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# The scores
LEFT_SCORE = 0
RIGHT_SCORE = 0
# The sounds
pygame.mixer.init()
paddleSound = pygame.mixer.Sound("assets/audio/paddel.wav")
scoreSound = pygame.mixer.Sound("assets/audio/score.wav")
wallSound = pygame.mixer.Sound("assets/audio/wall.wav")
# Wrap the given value according to arguments
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
| 21.071429 | 59 | 0.742373 |
47e49e7f026ff9e2c53a716c48067094352b29c1 | 6,257 | py | Python | apteco_api/models/paged_results_export_system_summary.py | Apteco/apteco-api | 7440c98ab10ea6d8a5997187f6fc739ce1c75d2b | [
"Apache-2.0"
] | 2 | 2020-05-21T14:24:16.000Z | 2020-12-03T19:56:34.000Z | apteco_api/models/paged_results_export_system_summary.py | Apteco/apteco-api | 7440c98ab10ea6d8a5997187f6fc739ce1c75d2b | [
"Apache-2.0"
] | null | null | null | apteco_api/models/paged_results_export_system_summary.py | Apteco/apteco-api | 7440c98ab10ea6d8a5997187f6fc739ce1c75d2b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PagedResultsExportSystemSummary(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'offset': 'int',
'count': 'int',
'total_count': 'int',
'list': 'list[ExportSystemSummary]'
}
attribute_map = {
'offset': 'offset',
'count': 'count',
'total_count': 'totalCount',
'list': 'list'
}
def __init__(self, offset=None, count=None, total_count=None, list=None): # noqa: E501
"""PagedResultsExportSystemSummary - a model defined in OpenAPI""" # noqa: E501
self._offset = None
self._count = None
self._total_count = None
self._list = None
self.discriminator = None
self.offset = offset
self.count = count
self.total_count = total_count
self.list = list
@property
def offset(self):
"""Gets the offset of this PagedResultsExportSystemSummary. # noqa: E501
The number of items that were skipped over from the (potentially filtered) result set # noqa: E501
:return: The offset of this PagedResultsExportSystemSummary. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this PagedResultsExportSystemSummary.
The number of items that were skipped over from the (potentially filtered) result set # noqa: E501
:param offset: The offset of this PagedResultsExportSystemSummary. # noqa: E501
:type: int
"""
if offset is None:
raise ValueError("Invalid value for `offset`, must not be `None`") # noqa: E501
self._offset = offset
@property
def count(self):
"""Gets the count of this PagedResultsExportSystemSummary. # noqa: E501
The number of items returned in this page of the result set # noqa: E501
:return: The count of this PagedResultsExportSystemSummary. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this PagedResultsExportSystemSummary.
The number of items returned in this page of the result set # noqa: E501
:param count: The count of this PagedResultsExportSystemSummary. # noqa: E501
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
@property
def total_count(self):
"""Gets the total_count of this PagedResultsExportSystemSummary. # noqa: E501
The total number of items available in the (potentially filtered) result set # noqa: E501
:return: The total_count of this PagedResultsExportSystemSummary. # noqa: E501
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this PagedResultsExportSystemSummary.
The total number of items available in the (potentially filtered) result set # noqa: E501
:param total_count: The total_count of this PagedResultsExportSystemSummary. # noqa: E501
:type: int
"""
if total_count is None:
raise ValueError("Invalid value for `total_count`, must not be `None`") # noqa: E501
self._total_count = total_count
@property
def list(self):
"""Gets the list of this PagedResultsExportSystemSummary. # noqa: E501
The list of results # noqa: E501
:return: The list of this PagedResultsExportSystemSummary. # noqa: E501
:rtype: list[ExportSystemSummary]
"""
return self._list
@list.setter
def list(self, list):
"""Sets the list of this PagedResultsExportSystemSummary.
The list of results # noqa: E501
:param list: The list of this PagedResultsExportSystemSummary. # noqa: E501
:type: list[ExportSystemSummary]
"""
if list is None:
raise ValueError("Invalid value for `list`, must not be `None`") # noqa: E501
self._list = list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PagedResultsExportSystemSummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.671569 | 107 | 0.601726 |
6621f4a2e4b2a07b420a80ebfb626e3dbc6728bf | 767 | py | Python | example.py | KelvinGitu/MLDeploy | 0f914519173ee31bb265199b7c0e82b3470da596 | [
"MIT"
] | 3 | 2019-05-18T17:57:02.000Z | 2020-10-19T23:13:17.000Z | example.py | KelvinGitu/MLDeploy | 0f914519173ee31bb265199b7c0e82b3470da596 | [
"MIT"
] | 89 | 2019-05-16T02:21:14.000Z | 2022-03-31T02:01:15.000Z | example.py | KelvinGitu/MLDeploy | 0f914519173ee31bb265199b7c0e82b3470da596 | [
"MIT"
] | 4 | 2019-05-18T17:57:19.000Z | 2020-10-17T14:16:43.000Z | import Sentiment as s
print(s.sentiment("This movie was awesome the acting was great, and there were pythons"))
print(s.sentiment("This movie was utter junk. There were absolutely 0 pythons. I don't see what the point was at all. Horrible movie. 0/10"))
print(s.sentiment("Maize scandal"))
print(s.sentiment("Trump signs order limiting migrant asylum at U.S.-Mexico border"))
print(s.sentiment("There is awesome healthcare"))
print(s.sentiment("Kibera houses are poor"))
print(s.sentiment("This is Kenyatta University Referral Hospital, Built at a cost of KSH 11 Billion, project commenced in 2014 and was completed in 2018. The hospital will ease pressure on @KNH_hospital and enhance capacity building of health workers in the region #Big4Agenda #KenyaMbele"))
| 69.727273 | 293 | 0.780965 |
2fe2bba83405e7ac95ba65f7b9d9df48e9c5c5f2 | 7,815 | py | Python | maskrcnn_benchmark/modeling/rpn/fcos/inference.py | Ricardozzf/FCOS_PLUS | 418b0678fe5c3936ae853a23af0aeff2f085544d | [
"BSD-2-Clause"
] | 313 | 2019-07-20T08:30:17.000Z | 2022-03-29T03:10:27.000Z | maskrcnn_benchmark/modeling/rpn/fcos/inference.py | Ricardozzf/FCOS_PLUS | 418b0678fe5c3936ae853a23af0aeff2f085544d | [
"BSD-2-Clause"
] | 16 | 2019-07-25T05:56:46.000Z | 2021-11-04T06:40:08.000Z | maskrcnn_benchmark/modeling/rpn/fcos/inference.py | Ricardozzf/FCOS_PLUS | 418b0678fe5c3936ae853a23af0aeff2f085544d | [
"BSD-2-Clause"
] | 48 | 2019-07-21T14:28:57.000Z | 2022-03-20T16:30:33.000Z | import torch
from ..inference import RPNPostProcessor
from ..utils import permute_and_flatten
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.utils import cat
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
class FCOSPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RetinaNet boxes.
This is only used in the testing.
"""
def __init__(self, pre_nms_thresh, pre_nms_top_n, nms_thresh,
fpn_post_nms_top_n, min_size, num_classes, dense_points):
"""
Arguments:
pre_nms_thresh (float)
pre_nms_top_n (int)
nms_thresh (float)
fpn_post_nms_top_n (int)
min_size (int)
num_classes (int)
box_coder (BoxCoder)
"""
super(FCOSPostProcessor, self).__init__()
self.pre_nms_thresh = pre_nms_thresh
self.pre_nms_top_n = pre_nms_top_n
self.nms_thresh = nms_thresh
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.min_size = min_size
self.num_classes = num_classes
self.dense_points = dense_points
def forward_for_single_feature_map(
self, locations, box_cls,
box_regression, centerness,
image_sizes):
"""
Arguments:
anchors: list[BoxList]
box_cls: tensor of size N, A * C, H, W
box_regression: tensor of size N, A * 4, H, W
"""
N, C, H, W = box_cls.shape
# put in the same format as locations
box_cls = box_cls.view(N, C, H, W).permute(0, 2, 3, 1)
box_cls = box_cls.reshape(N, -1, self.num_classes - 1).sigmoid()
box_regression = box_regression.view(N, self.dense_points * 4, H, W).permute(0, 2, 3, 1)
box_regression = box_regression.reshape(N, -1, 4)
centerness = centerness.view(N, self.dense_points, H, W).permute(0, 2, 3, 1)
centerness = centerness.reshape(N, -1).sigmoid()
candidate_inds = box_cls > self.pre_nms_thresh
pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)
# multiply the classification scores with centerness scores
box_cls = box_cls * centerness[:, :, None]
results = []
for i in range(N):
per_box_cls = box_cls[i]
per_candidate_inds = candidate_inds[i]
per_box_cls = per_box_cls[per_candidate_inds]
per_candidate_nonzeros = per_candidate_inds.nonzero()
per_box_loc = per_candidate_nonzeros[:, 0]
per_class = per_candidate_nonzeros[:, 1] + 1
per_box_regression = box_regression[i]
per_box_regression = per_box_regression[per_box_loc]
per_locations = locations[per_box_loc]
per_pre_nms_top_n = pre_nms_top_n[i]
if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
per_box_cls, top_k_indices = \
per_box_cls.topk(per_pre_nms_top_n, sorted=False)
per_class = per_class[top_k_indices]
per_box_regression = per_box_regression[top_k_indices]
per_locations = per_locations[top_k_indices]
detections = torch.stack([
per_locations[:, 0] - per_box_regression[:, 0],
per_locations[:, 1] - per_box_regression[:, 1],
per_locations[:, 0] + per_box_regression[:, 2],
per_locations[:, 1] + per_box_regression[:, 3],
], dim=1)
h, w = image_sizes[i]
boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
boxlist.add_field("labels", per_class)
boxlist.add_field("scores", per_box_cls)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
results.append(boxlist)
return results
def forward(self, locations, box_cls, box_regression, centerness, image_sizes):
"""
Arguments:
anchors: list[list[BoxList]]
box_cls: list[tensor]
box_regression: list[tensor]
image_sizes: list[(h, w)]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes = []
for _, (l, o, b, c) in enumerate(zip(locations, box_cls, box_regression, centerness)):
sampled_boxes.append(
self.forward_for_single_feature_map(
l, o, b, c, image_sizes
)
)
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
boxlists = self.select_over_all_levels(boxlists)
return boxlists
# TODO very similar to filter_results from PostProcessor
# but filter_results is per image
# TODO Yang: solve this issue in the future. No good solution
# right now.
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
results = []
for i in range(num_images):
scores = boxlists[i].get_field("scores")
labels = boxlists[i].get_field("labels")
boxes = boxlists[i].bbox
boxlist = boxlists[i]
result = []
# skip the background
for j in range(1, self.num_classes):
inds = (labels == j).nonzero().view(-1)
scores_j = scores[inds]
boxes_j = boxes[inds, :].view(-1, 4)
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms_thresh,
score_field="scores"
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j,
dtype=torch.int64,
device=scores.device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.fpn_post_nms_top_n > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(),
number_of_detections - self.fpn_post_nms_top_n + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
results.append(result)
return results
def make_fcos_postprocessor(config):
pre_nms_thresh = config.MODEL.FCOS.INFERENCE_TH
pre_nms_top_n = config.MODEL.FCOS.PRE_NMS_TOP_N
nms_thresh = config.MODEL.FCOS.NMS_TH
fpn_post_nms_top_n = config.TEST.DETECTIONS_PER_IMG
dense_points = config.MODEL.FCOS.DENSE_POINTS
box_selector = FCOSPostProcessor(
pre_nms_thresh=pre_nms_thresh,
pre_nms_top_n=pre_nms_top_n,
nms_thresh=nms_thresh,
fpn_post_nms_top_n=fpn_post_nms_top_n,
min_size=0,
num_classes=config.MODEL.FCOS.NUM_CLASSES,
dense_points=dense_points)
return box_selector
| 39.271357 | 96 | 0.606142 |
12e98e8d061c8b8076f693224d9f2fcdffbd859c | 875 | py | Python | slides/slide18.py | samdmarshall/pyconfig | 10c5d2ce5465510404c3a119f0be4a0ee9b5ae33 | [
"BSD-3-Clause"
] | 51 | 2016-05-17T19:31:30.000Z | 2020-08-16T13:55:51.000Z | slides/slide18.py | samdmarshall/pyconfig | 10c5d2ce5465510404c3a119f0be4a0ee9b5ae33 | [
"BSD-3-Clause"
] | 54 | 2016-06-03T11:13:50.000Z | 2019-03-10T22:02:57.000Z | slides/slide18.py | samdmarshall/pyconfig | 10c5d2ce5465510404c3a119f0be4a0ee9b5ae33 | [
"BSD-3-Clause"
] | 4 | 2016-05-31T16:10:01.000Z | 2017-04-07T03:23:18.000Z | #!/usr/bin/python
import pypresenter.slide
class slide18(pypresenter.slide):
def __init__(self):
super(self.__class__, self).__init__('left')
def content(self, window=None):
return "\nIntegration with Xcode is easy!"\
"\n\n"\
"1. Install pyconfig\n\n"\
"\t$ brew update\n"\
"\t$ brew tap samdmarshall/formulae\n"\
"\t$ brew install samdmarshall/formulae/pyconfig\n"\
"\n"\
"2. Add a 'Pre-Build' Script Phase to your scheme\n\n"\
"3. Invoke 'pyconfig' with the path to your config files\n\n"
def draw(self, window):
self.displayText(window, self.content())
def formatting(self):
return {
# title
"0": ['underline'],
"31": ['normal']
} | 35 | 77 | 0.513143 |
99fbb70d88a82934ebe65d9ba89df09d492d5bb6 | 1,080 | py | Python | Services/JSON-RPC/Basic_Template/test_call_basic_services.py | astroseger/dnn-model-services | 1755ac9a45d6113544d12010fb3ba95ab3a0690c | [
"MIT"
] | null | null | null | Services/JSON-RPC/Basic_Template/test_call_basic_services.py | astroseger/dnn-model-services | 1755ac9a45d6113544d12010fb3ba95ab3a0690c | [
"MIT"
] | null | null | null | Services/JSON-RPC/Basic_Template/test_call_basic_services.py | astroseger/dnn-model-services | 1755ac9a45d6113544d12010fb3ba95ab3a0690c | [
"MIT"
] | null | null | null | import jsonrpcclient
from services import registry
if __name__ == '__main__':
try:
opt = input('Which service (1|2)? ')
if opt == '1':
# Service ONE - Arithmetics
jsonrpc_method = input('Which method (add|sub|mul|div)? ')
a = input('Number 1: ')
b = input('Number 2: ')
jsonrpc_port = registry['basic_service_one']['jsonrpc']
jsonrpcclient.request(f"http://127.0.0.1:{jsonrpc_port}",
jsonrpc_method,
a=a,
b=b)
elif opt == '2':
# Service TWO - Basic Echo
jsonrpc_method = input('Which method (version|echo)? ')
jsonrpc_port = registry['basic_service_two']['jsonrpc']
jsonrpcclient.request(f"http://127.0.0.1:{jsonrpc_port}",
jsonrpc_method,
test="testing...")
else:
print('Service unavailable!')
except Exception as e:
print(e)
| 36 | 70 | 0.480556 |
f46954c4960373fd49532e76ec463644479b320b | 1,878 | py | Python | tools/pylcc/guide/eventOut.py | liaozhaoyan/surftrace | 879e9d6a4410373b211cc7a9d22dd3fa102bfbf4 | [
"MIT"
] | null | null | null | tools/pylcc/guide/eventOut.py | liaozhaoyan/surftrace | 879e9d6a4410373b211cc7a9d22dd3fa102bfbf4 | [
"MIT"
] | null | null | null | tools/pylcc/guide/eventOut.py | liaozhaoyan/surftrace | 879e9d6a4410373b211cc7a9d22dd3fa102bfbf4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# cython:language_level=2
"""
-------------------------------------------------
File Name: eventOut
Description :
Author : liaozhaoyan
date: 2021/11/3
-------------------------------------------------
Change Activity:
2021/11/3:
-------------------------------------------------
"""
__author__ = 'liaozhaoyan'
import ctypes as ct
from pylcc.lbcBase import ClbcBase
bpfPog = r"""
#include "lbc.h"
#define TASK_COMM_LEN 16
struct data_t {
u32 c_pid;
u32 p_pid;
char c_comm[TASK_COMM_LEN];
char p_comm[TASK_COMM_LEN];
};
LBC_PERF_OUTPUT(e_out, struct data_t, 128);
SEC("kprobe/wake_up_new_task")
int j_wake_up_new_task(struct pt_regs *ctx)
{
struct task_struct* parent = (struct task_struct *)PT_REGS_PARM1(ctx);
struct data_t data = {};
data.c_pid = bpf_get_current_pid_tgid() >> 32;
bpf_get_current_comm(&data.c_comm, TASK_COMM_LEN);
data.p_pid = BPF_CORE_READ(parent, pid);
bpf_core_read(&data.p_comm[0], TASK_COMM_LEN, &parent->comm[0]);
bpf_perf_event_output(ctx, &e_out, BPF_F_CURRENT_CPU, &data, sizeof(data));
return 0;
}
char _license[] SEC("license") = "GPL";
"""
class CeventOut(ClbcBase):
def __init__(self):
super(CeventOut, self).__init__("eventOut", bpf_str=bpfPog)
def _cb(self, cpu, data, size):
stream = ct.string_at(data, size)
e = self.maps['e_out'].event(stream)
print("current pid:%d, comm:%s. wake_up_new_task pid: %d, comm: %s" % (
e.c_pid, e.c_comm, e.p_pid, e.p_comm
))
def loop(self):
self.maps['e_out'].open_perf_buffer(self._cb)
try:
self.maps['e_out'].perf_buffer_poll()
except KeyboardInterrupt:
print("key interrupt.")
exit()
if __name__ == "__main__":
e = CeventOut()
e.loop()
| 26.828571 | 79 | 0.57934 |
4bbd16566a711c0a7725d46f901d9cd6d75c3233 | 204 | py | Python | testdataToAudio.py | Shameli91/prml | 3f09f87cad830d8566d523058b3ccec4de25a3c2 | [
"MIT"
] | null | null | null | testdataToAudio.py | Shameli91/prml | 3f09f87cad830d8566d523058b3ccec4de25a3c2 | [
"MIT"
] | null | null | null | testdataToAudio.py | Shameli91/prml | 3f09f87cad830d8566d523058b3ccec4de25a3c2 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.io.wavfile import write
for i in range(0, 4512):
data = np.load('./bird-audio-detection/' + str(i) + '.npy')
write('./main/testData/' + str(i) + '.wav', 48000, data) | 34 | 63 | 0.627451 |
374a2de1b2e9059689e1a639908faa7db46bbe95 | 223 | py | Python | shop/urls.py | DenisDolmatov2020/lote | cb6b1021ed541799cabb5b3e850792690debcec8 | [
"MIT"
] | null | null | null | shop/urls.py | DenisDolmatov2020/lote | cb6b1021ed541799cabb5b3e850792690debcec8 | [
"MIT"
] | null | null | null | shop/urls.py | DenisDolmatov2020/lote | cb6b1021ed541799cabb5b3e850792690debcec8 | [
"MIT"
] | null | null | null | from django.urls import path
from shop.views import ShopListView, shop_detail_view
urlpatterns = [
path('<slug:slug>/', shop_detail_view, name='detail-shop'),
path('', ShopListView.as_view(), name='list-shops')
]
| 24.777778 | 63 | 0.717489 |
d6c2f969c5b93ee71273e78f4a129232cbf2585c | 416 | py | Python | tests/commands/test_convert.py | Playfloor/bonobo | feb7ec850566ca3c2ccc139610201dbd237d6083 | [
"Apache-2.0"
] | 1,573 | 2016-12-09T09:28:50.000Z | 2022-03-31T06:16:45.000Z | tests/commands/test_convert.py | Playfloor/bonobo | feb7ec850566ca3c2ccc139610201dbd237d6083 | [
"Apache-2.0"
] | 257 | 2016-12-25T06:54:33.000Z | 2022-03-18T22:12:17.000Z | tests/commands/test_convert.py | Playfloor/bonobo | feb7ec850566ca3c2ccc139610201dbd237d6083 | [
"Apache-2.0"
] | 153 | 2016-12-09T07:23:58.000Z | 2022-03-18T22:01:23.000Z | import sys
import pytest
from bonobo.util.environ import change_working_directory
from bonobo.util.testing import all_runners
@all_runners
def test_convert(runner, tmpdir):
csv_content = "id;name\n1;Romain"
tmpdir.join("in.csv").write(csv_content)
with change_working_directory(tmpdir):
runner("convert", "in.csv", "out.csv")
assert tmpdir.join("out.csv").read().strip() == csv_content
| 23.111111 | 63 | 0.730769 |
87fc073da18fdd50aeb56e8565202e691e97d1a4 | 4,069 | py | Python | tests/operators/gpu/test_fused_relu_grad_bn_double_reduce_grad.py | Kiike5/akg | f16019261cca6b2d33b3b6f27c45ee8e6f7a834b | [
"Apache-2.0"
] | null | null | null | tests/operators/gpu/test_fused_relu_grad_bn_double_reduce_grad.py | Kiike5/akg | f16019261cca6b2d33b3b6f27c45ee8e6f7a834b | [
"Apache-2.0"
] | null | null | null | tests/operators/gpu/test_fused_relu_grad_bn_double_reduce_grad.py | Kiike5/akg | f16019261cca6b2d33b3b6f27c45ee8e6f7a834b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import absolute_import
import numpy as np
from akg.utils import kernel_exec as utils
from tests.common.gen_random import random_gaussian
from akg.utils.result_analysis import gpu_profiling
from akg.utils.format_transform import to_tvm_nd_array
from tests.operators.gpu.test_fused_pattern_grad import relu_grad_np
from tests.common.test_op.resnet.fused_relu_grad_bn_double_reduce_grad import fused_relu_grad_bn_double_reduce_grad
def compute_expect(inshp_data, outshp_data):
out_shape = outshp_data.shape
scale = out_shape[0] * out_shape[1] * out_shape[2]
mul = np.multiply(inshp_data, inshp_data)
mean1 = np.divide(mul, scale)
add = np.add(outshp_data, outshp_data)
addgrad = relu_grad_np(add, outshp_data).astype(inshp_data.dtype)
mul1 = np.multiply(addgrad, scale)
sub = np.subtract(mul1, inshp_data)
outdata_cast = outshp_data.astype(inshp_data.dtype)
mean2 = np.divide(inshp_data, scale)
sub1 = np.subtract(outdata_cast, mean2)
mul2 = np.multiply(sub1, inshp_data)
div = np.divide(mul2, inshp_data)
sub2 = np.subtract(sub, div)
mul3 = np.multiply(mean1, sub2).astype(outshp_data.dtype)
mul4 = np.multiply(inshp_data, inshp_data)
mean3 = np.divide(mul4, scale)
mean4 = np.divide(inshp_data, scale)
sub3 = np.subtract(outshp_data.astype(inshp_data.dtype), mean4)
mul5 = np.multiply(inshp_data, sub3)
div1 = np.divide(mul5, inshp_data)
sub4 = np.subtract(sub, div1)
mul6 = np.multiply(mean3, sub4).astype(outshp_data.dtype)
return [mul3, mul6]
def gen_data(shape, out_shape, dtype, out_dtype):
support_list = {"float16": np.float16, "float32": np.float32}
inshp_data = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
outshp_data = random_gaussian(out_shape, miu=1, sigma=0.1).astype(support_list[out_dtype])
output = np.full(out_shape, np.nan, out_dtype)
expect = compute_expect(inshp_data, outshp_data)
return inshp_data, outshp_data, output, expect
def test_fused_relu_grad_bn_double_reduce_grad(shape, out_shape, dtype="float32", layout="NHWC", out_dtype="float16", poly_sch=False):
shape_list = [shape] * 5 + [out_shape] + [shape] * 3 + [out_shape] + [shape] * 3 + [out_shape] * 3
dtype_list = [dtype] * 5 +[out_dtype] +[dtype] * 3 + [out_dtype] + [dtype] * 3 +[out_dtype] * 3
op_attrs = [layout, out_dtype]
if poly_sch:
mod = utils.op_build_test(
fused_relu_grad_bn_double_reduce_grad,
shape_list,
dtype_list,
op_attrs=op_attrs,
kernel_name="fused_relu_grad_bn_double_reduce_grad",
attrs={
"target": "cuda"})
inshp_data, outshp_data, output, expect = gen_data(shape, out_shape, dtype, out_dtype)
inputs = [inshp_data] * 5 + [outshp_data] + [inshp_data] * 3 + [outshp_data] + [inshp_data] * 3 + [outshp_data] * 3
outputs = [output, output]
arg_list = inputs + outputs
outputs = utils.mod_launch(mod, arg_list, outputs=tuple(range(-len(outputs), 0)), expect=expect)
res = np.allclose(outputs, expect, rtol=5e-03, atol=1.e-8)
print("Test {}".format("Pass" if res else "Fail"))
if not res:
print("Error cuda:========================")
print(mod.imported_modules[0].get_source())
raise AssertionError("Test fail")
inputs = to_tvm_nd_array(inputs)
expect = to_tvm_nd_array(expect)
gpu_profiling(mod, *inputs, *expect, 400)
| 43.287234 | 134 | 0.706808 |
bb32713cad3ef761bc5847b3b339d1aab75152e6 | 398 | py | Python | jobportal/migrations/0008_remove_person_join_date.py | klenks/jobsportal | 330f3b40220a9a721897a047ebaaabe98a11edde | [
"MIT"
] | null | null | null | jobportal/migrations/0008_remove_person_join_date.py | klenks/jobsportal | 330f3b40220a9a721897a047ebaaabe98a11edde | [
"MIT"
] | null | null | null | jobportal/migrations/0008_remove_person_join_date.py | klenks/jobsportal | 330f3b40220a9a721897a047ebaaabe98a11edde | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-14 23:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobportal', '0007_auto_20170314_2337'),
]
operations = [
migrations.RemoveField(
model_name='person',
name='join_date',
),
]
| 19.9 | 49 | 0.61809 |
1222d8b11f2d788ac26dd5e7b250c152ffd2c4fa | 800 | py | Python | setup.py | jsta/gssurgo | d1ff22880040fcb58347cf948cf7f3ce8b7830ee | [
"MIT"
] | 3 | 2018-08-16T02:13:46.000Z | 2020-06-03T06:31:42.000Z | setup.py | jsta/gssurgo | d1ff22880040fcb58347cf948cf7f3ce8b7830ee | [
"MIT"
] | 16 | 2018-08-16T02:14:07.000Z | 2021-04-09T10:31:49.000Z | setup.py | jsta/gSSURGO | d1ff22880040fcb58347cf948cf7f3ce8b7830ee | [
"MIT"
] | 1 | 2018-11-14T18:39:14.000Z | 2018-11-14T18:39:14.000Z | """A package that enables open source workflows with the gSSURGO dataset."""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="gssurgo",
version="1.0.1",
author="Jemma Stachelek",
author_email="stachel2@msu.edu",
description="Python toolbox enabling an open source gSSURGO workflow",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jsta/gssurgo",
scripts=["bin/extract_gssurgo_tif"],
include_package_data=True,
packages=setuptools.find_packages(exclude=['tests']),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 30.769231 | 76 | 0.68625 |
7ecd210caca6ebe7d1f38bc144c284dfaf0d9adc | 1,091 | py | Python | examples/pyplot/customIndividualAxes.py | danielhrisca/vedo | 487568b7956a67b87752e3d518ba3f7e87b327a6 | [
"CC0-1.0"
] | 1 | 2021-04-25T06:28:01.000Z | 2021-04-25T06:28:01.000Z | examples/pyplot/customIndividualAxes.py | danielhrisca/vedo | 487568b7956a67b87752e3d518ba3f7e87b327a6 | [
"CC0-1.0"
] | null | null | null | examples/pyplot/customIndividualAxes.py | danielhrisca/vedo | 487568b7956a67b87752e3d518ba3f7e87b327a6 | [
"CC0-1.0"
] | null | null | null | """Create individual axes to each separate object in a scene.
Access any element to change its size and color"""
from vedo import *
# Create a bunch of objects
s1 = Sphere(pos=(10, 0, 0), r=1, c='r')
s2 = Sphere(pos=( 0,10, 0), r=2, c='g')
s3 = Sphere(pos=( 0, 0,10), r=3, c='b')
pt = Point([-4,-4,-4], c='k')
# Build individual axes for each object.
# A new Assembly object is returned:
axes1 = s1.buildAxes(c='r')
axes2 = s2.buildAxes(c='g')
axes3 = s3.buildAxes(c='b', numberOfDivisions=10)
# axes3 is an Assembly (group of Meshes).
# Unpack it and scale the 7th label getting it by its name,
# make it 5 times bigger big and fuchsia:
axes3.unpack('xNumericLabel7').scale(5).c('fuchsia')
# Print all element names in axes3:
#for m in axes3.getMeshes(): print(m.name)
# By specifiyng axes in show(), new axes are
# created which span the whole bounding box.
# Options are passed through a dictionary
show(pt, s1,axes1, s2,axes2, s3,axes3, __doc__,
viewup='z',
axes=dict(c='black',
numberOfDivisions=10,
yzGrid=False,
),
)
| 32.088235 | 62 | 0.652612 |
155ba7e6691e8501725daf3f1a4e5a3a742a3a82 | 57,193 | py | Python | tests/integration/test_subscription.py | sharma7n/braintree_python | 34c36bddca7aa55512ee5129175eedcfc6d1fb30 | [
"MIT"
] | null | null | null | tests/integration/test_subscription.py | sharma7n/braintree_python | 34c36bddca7aa55512ee5129175eedcfc6d1fb30 | [
"MIT"
] | null | null | null | tests/integration/test_subscription.py | sharma7n/braintree_python | 34c36bddca7aa55512ee5129175eedcfc6d1fb30 | [
"MIT"
] | null | null | null | from tests.test_helper import *
from braintree.test.nonces import Nonces
from datetime import date, timedelta
class TestSubscription(unittest.TestCase):
def setUp(self):
self.credit_card = Customer.create({
"first_name": "Mike",
"last_name": "Jones",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100"
}
}).customer.credit_cards[0]
self.updateable_subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"price": Decimal("54.32"),
"plan_id": TestHelper.trialless_plan["id"]
}).subscription
def test_create_returns_successful_result_if_valid(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"]
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertNotEqual(None, re.search(r"\A\w{6}\Z", subscription.id))
self.assertEqual(Decimal("12.34"), subscription.price)
self.assertEqual(Decimal("12.34"), subscription.next_bill_amount)
self.assertEqual(Decimal("12.34"), subscription.next_billing_period_amount)
self.assertEqual(Subscription.Status.Active, subscription.status)
self.assertEqual("integration_trialless_plan", subscription.plan_id)
self.assertEqual(TestHelper.default_merchant_account_id, subscription.merchant_account_id)
self.assertEqual(Decimal("0.00"), subscription.balance)
self.assertEqual(date, type(subscription.first_billing_date))
self.assertEqual(date, type(subscription.next_billing_date))
self.assertEqual(date, type(subscription.billing_period_start_date))
self.assertEqual(date, type(subscription.billing_period_end_date))
self.assertEqual(date, type(subscription.paid_through_date))
self.assertEqual(datetime, type(subscription.created_at))
self.assertEqual(datetime, type(subscription.updated_at))
self.assertEqual(1, subscription.current_billing_cycle)
self.assertEqual(0, subscription.failure_count)
self.assertEqual(self.credit_card.token, subscription.payment_method_token)
self.assertEqual(Subscription.Status.Active, subscription.status_history[0].status)
self.assertEqual(Decimal("12.34"), subscription.status_history[0].price)
self.assertEqual(Decimal("0.00"), subscription.status_history[0].balance)
self.assertEqual(Subscription.Source.Api, subscription.status_history[0].subscription_source)
self.assertEqual("USD", subscription.status_history[0].currency_iso_code)
self.assertEqual(TestHelper.trialless_plan["id"], subscription.status_history[0].plan_id)
def test_create_returns_successful_result_with_payment_method_nonce(self):
config = Configuration.instantiate()
customer_id = Customer.create().customer.id
parsed_client_token = TestHelper.generate_decoded_client_token({"customer_id": customer_id})
authorization_fingerprint = json.loads(parsed_client_token)["authorizationFingerprint"]
http = ClientApiHttp(config, {
"authorization_fingerprint": authorization_fingerprint,
"shared_customer_identifier": "fake_identifier",
"shared_customer_identifier_type": "testing"
})
_, response = http.add_card({
"credit_card": {
"number": "4111111111111111",
"expiration_month": "11",
"expiration_year": "2099",
},
"share": True
})
nonce = json.loads(response)["creditCards"][0]["nonce"]
result = Subscription.create({
"payment_method_nonce": nonce,
"plan_id": TestHelper.trialless_plan["id"]
})
self.assertTrue(result.is_success)
transaction = result.subscription.transactions[0]
self.assertEqual("411111", transaction.credit_card_details.bin)
def test_create_can_set_the_id(self):
new_id = str(random.randint(1, 1000000))
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"id": new_id
})
self.assertTrue(result.is_success)
self.assertEqual(new_id, result.subscription.id)
def test_create_can_set_the_merchant_account_id(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"merchant_account_id": TestHelper.non_default_merchant_account_id
})
self.assertTrue(result.is_success)
self.assertEqual(TestHelper.non_default_merchant_account_id, result.subscription.merchant_account_id)
def test_create_defaults_to_plan_without_trial(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
self.assertEqual(TestHelper.trialless_plan["trial_period"], subscription.trial_period)
self.assertEqual(None, subscription.trial_duration)
self.assertEqual(None, subscription.trial_duration_unit)
def test_create_defaults_to_plan_with_trial(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
}).subscription
self.assertEqual(TestHelper.trial_plan["trial_period"], subscription.trial_period)
self.assertEqual(TestHelper.trial_plan["trial_duration"], subscription.trial_duration)
self.assertEqual(TestHelper.trial_plan["trial_duration_unit"], subscription.trial_duration_unit)
def test_create_and_override_plan_with_trial(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"trial_duration": 5,
"trial_duration_unit": Subscription.TrialDurationUnit.Month
}).subscription
self.assertEqual(True, subscription.trial_period)
self.assertEqual(5, subscription.trial_duration)
self.assertEqual(Subscription.TrialDurationUnit.Month, subscription.trial_duration_unit)
def test_create_and_override_trial_period(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"trial_period": False
}).subscription
self.assertEqual(False, subscription.trial_period)
def test_create_and_override_number_of_billing_cycles(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"number_of_billing_cycles": 10
}).subscription
self.assertEqual(10, subscription.number_of_billing_cycles)
def test_create_and_override_number_of_billing_cycles_to_never_expire(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"never_expires": True
}).subscription
self.assertEqual(None, subscription.number_of_billing_cycles)
def test_create_creates_a_transaction_if_no_trial_period(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
self.assertEqual(1, len(subscription.transactions))
transaction = subscription.transactions[0]
self.assertEqual(Transaction, type(transaction))
self.assertEqual(TestHelper.trialless_plan["price"], transaction.amount)
self.assertEqual("sale", transaction.type)
self.assertEqual(subscription.id, transaction.subscription_id)
def test_create_has_transaction_with_billing_period_dates(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
transaction = subscription.transactions[0]
self.assertEqual(subscription.billing_period_start_date, transaction.subscription_details.billing_period_start_date)
self.assertEqual(subscription.billing_period_end_date, transaction.subscription_details.billing_period_end_date)
def test_create_returns_a_transaction_if_transaction_is_declined(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"price": TransactionAmounts.Decline
})
self.assertFalse(result.is_success)
self.assertEqual(Transaction.Status.ProcessorDeclined, result.transaction.status)
def test_create_doesnt_creates_a_transaction_if_trial_period(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
}).subscription
self.assertEqual(0, len(subscription.transactions))
def test_create_with_error_result(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"id": "invalid token"
})
self.assertFalse(result.is_success)
id_errors = result.errors.for_object("subscription").on("id")
self.assertEqual(1, len(id_errors))
self.assertEqual("81906", id_errors[0].code)
def test_create_inherits_billing_day_of_month_from_plan(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.billing_day_of_month_plan["id"],
})
self.assertTrue(result.is_success)
self.assertEqual(5, result.subscription.billing_day_of_month)
def test_create_allows_overriding_billing_day_of_month(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.billing_day_of_month_plan["id"],
"billing_day_of_month": 19
})
self.assertTrue(result.is_success)
self.assertEqual(19, result.subscription.billing_day_of_month)
def test_create_allows_overriding_billing_day_of_month_with_start_immediately(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.billing_day_of_month_plan["id"],
"options": {
"start_immediately": True
}
})
self.assertTrue(result.is_success)
self.assertEqual(1, len(result.subscription.transactions))
def test_create_allows_specifying_first_billing_date(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.billing_day_of_month_plan["id"],
"first_billing_date": date.today() + timedelta(days=3)
})
self.assertTrue(result.is_success)
self.assertEqual(date.today() + timedelta(days=3), result.subscription.first_billing_date)
self.assertEqual(Subscription.Status.Pending, result.subscription.status)
def test_create_does_not_allow_first_billing_date_in_the_past(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.billing_day_of_month_plan["id"],
"first_billing_date": date.today() - timedelta(days=3)
})
self.assertFalse(result.is_success)
billing_date_errors = result.errors.for_object("subscription").on("first_billing_date")
self.assertEqual(1, len(billing_date_errors))
self.assertEqual(ErrorCodes.Subscription.FirstBillingDateCannotBeInThePast, billing_date_errors[0].code)
def test_create_does_not_inherit_add_ons_or_discounts_from_the_plan_when_flag_is_set(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"options": {
"do_not_inherit_add_ons_or_discounts": True
}
}).subscription
self.assertEqual(0, len(subscription.add_ons))
self.assertEqual(0, len(subscription.discounts))
def test_create_inherits_add_ons_and_discounts_from_the_plan_when_not_specified(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"]
}).subscription
self.assertEqual(2, len(subscription.add_ons))
add_ons = sorted(subscription.add_ons, key=lambda add_on: add_on.id)
self.assertEqual("increase_10", add_ons[0].id)
self.assertEqual(Decimal("10.00"), add_ons[0].amount)
self.assertEqual(1, add_ons[0].quantity)
self.assertEqual(None, add_ons[0].number_of_billing_cycles)
self.assertTrue(add_ons[0].never_expires)
self.assertEqual(0, add_ons[0].current_billing_cycle)
self.assertEqual("increase_20", add_ons[1].id)
self.assertEqual(Decimal("20.00"), add_ons[1].amount)
self.assertEqual(1, add_ons[1].quantity)
self.assertEqual(None, add_ons[1].number_of_billing_cycles)
self.assertTrue(add_ons[1].never_expires)
self.assertEqual(0, add_ons[1].current_billing_cycle)
self.assertEqual(2, len(subscription.discounts))
discounts = sorted(subscription.discounts, key=lambda discount: discount.id)
self.assertEqual("discount_11", discounts[0].id)
self.assertEqual(Decimal("11.00"), discounts[0].amount)
self.assertEqual(1, discounts[0].quantity)
self.assertEqual(None, discounts[0].number_of_billing_cycles)
self.assertTrue(discounts[0].never_expires)
self.assertEqual(0, discounts[0].current_billing_cycle)
self.assertEqual("discount_7", discounts[1].id)
self.assertEqual(Decimal("7.00"), discounts[1].amount)
self.assertEqual(1, discounts[1].quantity)
self.assertEqual(None, discounts[1].number_of_billing_cycles)
self.assertTrue(discounts[1].never_expires)
self.assertEqual(0, discounts[1].current_billing_cycle)
def test_create_allows_overriding_of_inherited_add_ons_and_discounts(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"add_ons": {
"update": [
{
"amount": Decimal("50.00"),
"existing_id": "increase_10",
"quantity": 2,
"number_of_billing_cycles": 5
},
{
"amount": Decimal("100.00"),
"existing_id": "increase_20",
"quantity": 4,
"never_expires": True
}
]
},
"discounts": {
"update": [
{
"amount": Decimal("15.00"),
"existing_id": "discount_7",
"quantity": 3,
"number_of_billing_cycles": 19
}
]
}
}).subscription
self.assertEqual(2, len(subscription.add_ons))
add_ons = sorted(subscription.add_ons, key=lambda add_on: add_on.id)
self.assertEqual("increase_10", add_ons[0].id)
self.assertEqual(Decimal("50.00"), add_ons[0].amount)
self.assertEqual(2, add_ons[0].quantity)
self.assertEqual(5, add_ons[0].number_of_billing_cycles)
self.assertFalse(add_ons[0].never_expires)
self.assertEqual(0, add_ons[0].current_billing_cycle)
self.assertEqual("increase_20", add_ons[1].id)
self.assertEqual(Decimal("100.00"), add_ons[1].amount)
self.assertEqual(4, add_ons[1].quantity)
self.assertEqual(None, add_ons[1].number_of_billing_cycles)
self.assertTrue(add_ons[1].never_expires)
self.assertEqual(0, add_ons[1].current_billing_cycle)
self.assertEqual(2, len(subscription.discounts))
discounts = sorted(subscription.discounts, key=lambda discount: discount.id)
self.assertEqual("discount_11", discounts[0].id)
self.assertEqual(Decimal("11.00"), discounts[0].amount)
self.assertEqual(1, discounts[0].quantity)
self.assertEqual(None, discounts[0].number_of_billing_cycles)
self.assertTrue(discounts[0].never_expires)
self.assertEqual(0, discounts[0].current_billing_cycle)
self.assertEqual("discount_7", discounts[1].id)
self.assertEqual(Decimal("15.00"), discounts[1].amount)
self.assertEqual(3, discounts[1].quantity)
self.assertEqual(19, discounts[1].number_of_billing_cycles)
self.assertFalse(discounts[1].never_expires)
self.assertEqual(0, discounts[1].current_billing_cycle)
def test_create_allows_deleting_of_inherited_add_ons_and_discounts(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"add_ons": {
"remove": ["increase_10", "increase_20"]
},
"discounts": {
"remove": ["discount_7"]
}
}).subscription
self.assertEqual(0, len(subscription.add_ons))
self.assertEqual(1, len(subscription.discounts))
self.assertEqual("discount_11", subscription.discounts[0].id)
def test_create_allows_adding_add_ons_and_discounts(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"add_ons": {
"add": [
{
"amount": Decimal("50.00"),
"inherited_from_id": "increase_30",
"quantity": 2,
"number_of_billing_cycles": 5
}
],
"remove": ["increase_10", "increase_20"]
},
"discounts": {
"add": [
{
"amount": Decimal("17.00"),
"inherited_from_id": "discount_15",
"never_expires": True
}
],
"remove": ["discount_7", "discount_11"]
}
}).subscription
self.assertEqual(1, len(subscription.add_ons))
self.assertEqual("increase_30", subscription.add_ons[0].id)
self.assertEqual(Decimal("50.00"), subscription.add_ons[0].amount)
self.assertEqual(2, subscription.add_ons[0].quantity)
self.assertEqual(5, subscription.add_ons[0].number_of_billing_cycles)
self.assertFalse(subscription.add_ons[0].never_expires)
self.assertEqual(0, subscription.add_ons[0].current_billing_cycle)
self.assertEqual(1, len(subscription.discounts))
self.assertEqual("discount_15", subscription.discounts[0].id)
self.assertEqual(Decimal("17.00"), subscription.discounts[0].amount)
self.assertEqual(1, subscription.discounts[0].quantity)
self.assertEqual(None, subscription.discounts[0].number_of_billing_cycles)
self.assertTrue(subscription.discounts[0].never_expires)
self.assertEqual(0, subscription.discounts[0].current_billing_cycle)
def test_create_properly_parses_validation_errors_for_arrays(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"add_ons": {
"update": [
{
"existing_id": "increase_10",
"amount": "invalid"
},
{
"existing_id": "increase_20",
"quantity": -2
}
]
}
})
self.assertFalse(result.is_success)
self.assertEqual(
ErrorCodes.Subscription.Modification.AmountIsInvalid,
result.errors.for_object("subscription").for_object("add_ons").for_object("update").for_index(0).on("amount")[0].code
)
self.assertEqual(
ErrorCodes.Subscription.Modification.QuantityIsInvalid,
result.errors.for_object("subscription").for_object("add_ons").for_object("update").for_index(1).on("quantity")[0].code
)
def test_descriptors_accepts_name_phone_and_url(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"descriptor": {
"name": "123*123456789012345678",
"phone": "3334445555",
"url": "ebay.com"
}
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual("123*123456789012345678", subscription.descriptor.name)
self.assertEqual("3334445555", subscription.descriptor.phone)
transaction = subscription.transactions[0]
self.assertEqual("123*123456789012345678", transaction.descriptor.name)
self.assertEqual("3334445555", transaction.descriptor.phone)
self.assertEqual("ebay.com", transaction.descriptor.url)
def test_descriptors_has_validation_errors_if_format_is_invalid(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"descriptor": {
"name": "badcompanyname12*badproduct12",
"phone": "%bad4445555"
}
})
self.assertFalse(result.is_success)
name_errors = result.errors.for_object("transaction").for_object("descriptor").on("name")
self.assertEqual(1, len(name_errors))
self.assertEqual(ErrorCodes.Descriptor.NameFormatIsInvalid, name_errors[0].code)
phone_errors = result.errors.for_object("transaction").for_object("descriptor").on("phone")
self.assertEqual(1, len(phone_errors))
self.assertEqual(ErrorCodes.Descriptor.PhoneFormatIsInvalid, phone_errors[0].code)
def test_find_with_valid_id(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
}).subscription
found_subscription = Subscription.find(subscription.id)
self.assertEqual(subscription.id, found_subscription.id)
@raises_with_regexp(NotFoundError, "subscription with id bad_token not found")
def test_find_with_invalid_token(self):
Subscription.find("bad_token")
def test_update_creates_a_prorated_transaction_when_merchant_is_set_to_prorate(self):
result = Subscription.update(self.updateable_subscription.id, {
"price": self.updateable_subscription.price + Decimal("1"),
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(2, len(subscription.transactions))
def test_update_creates_a_prorated_transaction_when_flag_is_passed_as_True(self):
result = Subscription.update(self.updateable_subscription.id, {
"price": self.updateable_subscription.price + Decimal("1"),
"options": {
"prorate_charges": True
}
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(2, len(subscription.transactions))
def test_update_does_not_create_a_prorated_transaction_when_flag_is_passed_as_False(self):
result = Subscription.update(self.updateable_subscription.id, {
"price": self.updateable_subscription.price + Decimal("1"),
"options": {
"prorate_charges": False
}
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(1, len(subscription.transactions))
def test_update_does_not_update_subscription_when_revert_subscription_on_proration_failure_is_true(self):
result = Subscription.update(self.updateable_subscription.id, {
"price": self.updateable_subscription.price + Decimal("2100"),
"options": {
"prorate_charges": True,
"revert_subscription_on_proration_failure": True
}
})
self.assertFalse(result.is_success)
found_subscription = Subscription.find(result.subscription.id)
self.assertEqual(len(self.updateable_subscription.transactions) + 1, len(result.subscription.transactions))
self.assertEqual("processor_declined", result.subscription.transactions[0].status)
self.assertEqual(Decimal("0.00"), found_subscription.balance)
self.assertEqual(self.updateable_subscription.price, found_subscription.price)
def test_update_updates_subscription_when_revert_subscription_on_proration_failure_is_false(self):
result = Subscription.update(self.updateable_subscription.id, {
"price": self.updateable_subscription.price + Decimal("2100"),
"options": {
"prorate_charges": True,
"revert_subscription_on_proration_failure": False
}
})
self.assertTrue(result.is_success)
found_subscription = Subscription.find(result.subscription.id)
self.assertEqual(len(self.updateable_subscription.transactions) + 1, len(result.subscription.transactions))
self.assertEqual("processor_declined", result.subscription.transactions[0].status)
self.assertEqual(result.subscription.transactions[0].amount, Decimal(found_subscription.balance))
self.assertEqual(self.updateable_subscription.price + Decimal("2100"), found_subscription.price)
def test_update_with_successful_result(self):
new_id = str(random.randint(1, 1000000))
result = Subscription.update(self.updateable_subscription.id, {
"id": new_id,
"price": Decimal("9999.88"),
"plan_id": TestHelper.trial_plan["id"]
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(new_id, subscription.id)
self.assertEqual(TestHelper.trial_plan["id"], subscription.plan_id)
self.assertEqual(Decimal("9999.88"), subscription.price)
def test_update_with_merchant_account_id(self):
result = Subscription.update(self.updateable_subscription.id, {
"merchant_account_id": TestHelper.non_default_merchant_account_id,
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(TestHelper.non_default_merchant_account_id, subscription.merchant_account_id)
def test_update_with_payment_method_token(self):
newCard = CreditCard.create({
"customer_id": self.credit_card.customer_id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": self.credit_card.cardholder_name
}).credit_card
result = Subscription.update(self.updateable_subscription.id, {
"payment_method_token": newCard.token
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(newCard.token, subscription.payment_method_token)
def test_update_with_payment_method_nonce(self):
config = Configuration.instantiate()
customer_id = self.credit_card.customer_id
parsed_client_token = TestHelper.generate_decoded_client_token({"customer_id": customer_id})
authorization_fingerprint = json.loads(parsed_client_token)["authorizationFingerprint"]
http = ClientApiHttp(config, {
"authorization_fingerprint": authorization_fingerprint,
"shared_customer_identifier": "fake_identifier",
"shared_customer_identifier_type": "testing"
})
_, response = http.add_card({
"credit_card": {
"number": "4242424242424242",
"expiration_month": "11",
"expiration_year": "2099",
},
"share": True
})
nonce = json.loads(response)["creditCards"][0]["nonce"]
result = Subscription.update(self.updateable_subscription.id, {
"payment_method_nonce": nonce
})
self.assertTrue(result.is_success)
subscription = result.subscription
newCard = CreditCard.find(subscription.payment_method_token)
self.assertEqual("4242", newCard.last_4)
self.assertNotEqual(newCard.last_4, self.credit_card.last_4)
def test_update_with_number_of_billing_cycles(self):
result = Subscription.update(self.updateable_subscription.id, {
"number_of_billing_cycles": 10
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(10, subscription.number_of_billing_cycles)
def test_update_with_never_expires(self):
result = Subscription.update(self.updateable_subscription.id, {
"never_expires": True
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(None, subscription.number_of_billing_cycles)
def test_update_with_error_result(self):
result = Subscription.update(self.updateable_subscription.id, {
"id": "bad id",
})
self.assertFalse(result.is_success)
id_errors = result.errors.for_object("subscription").on("id")
self.assertEqual(1, len(id_errors))
self.assertEqual("81906", id_errors[0].code)
@raises(NotFoundError)
def test_update_raises_error_when_subscription_not_found(self):
Subscription.update("notfound", {
"id": "newid",
})
def test_update_allows_overriding_of_inherited_add_ons_and_discounts(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
}).subscription
subscription = Subscription.update(subscription.id, {
"add_ons": {
"update": [
{
"amount": Decimal("50.00"),
"existing_id": "increase_10",
"quantity": 2,
"number_of_billing_cycles": 5
},
{
"amount": Decimal("100.00"),
"existing_id": "increase_20",
"quantity": 4,
"never_expires": True
}
]
},
"discounts": {
"update": [
{
"amount": Decimal("15.00"),
"existing_id": "discount_7",
"quantity": 3,
"number_of_billing_cycles": 19
}
]
}
}).subscription
self.assertEqual(2, len(subscription.add_ons))
add_ons = sorted(subscription.add_ons, key=lambda add_on: add_on.id)
self.assertEqual("increase_10", add_ons[0].id)
self.assertEqual(Decimal("50.00"), add_ons[0].amount)
self.assertEqual(2, add_ons[0].quantity)
self.assertEqual(5, add_ons[0].number_of_billing_cycles)
self.assertFalse(add_ons[0].never_expires)
self.assertEqual("increase_20", add_ons[1].id)
self.assertEqual(Decimal("100.00"), add_ons[1].amount)
self.assertEqual(4, add_ons[1].quantity)
self.assertEqual(None, add_ons[1].number_of_billing_cycles)
self.assertTrue(add_ons[1].never_expires)
self.assertEqual(2, len(subscription.discounts))
discounts = sorted(subscription.discounts, key=lambda discount: discount.id)
self.assertEqual("discount_11", discounts[0].id)
self.assertEqual(Decimal("11.00"), discounts[0].amount)
self.assertEqual(1, discounts[0].quantity)
self.assertEqual(None, discounts[0].number_of_billing_cycles)
self.assertTrue(discounts[0].never_expires)
self.assertEqual("discount_7", discounts[1].id)
self.assertEqual(Decimal("15.00"), discounts[1].amount)
self.assertEqual(3, discounts[1].quantity)
self.assertEqual(19, discounts[1].number_of_billing_cycles)
self.assertFalse(discounts[1].never_expires)
def test_update_allows_adding_and_removing_add_ons_and_discounts(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
}).subscription
subscription = Subscription.update(subscription.id, {
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"add_ons": {
"add": [
{
"amount": Decimal("50.00"),
"inherited_from_id": "increase_30",
"quantity": 2,
"number_of_billing_cycles": 5
}
],
"remove": ["increase_10", "increase_20"]
},
"discounts": {
"add": [
{
"amount": Decimal("17.00"),
"inherited_from_id": "discount_15",
"never_expires": True
}
],
"remove": ["discount_7", "discount_11"]
}
}).subscription
self.assertEqual(1, len(subscription.add_ons))
self.assertEqual("increase_30", subscription.add_ons[0].id)
self.assertEqual(Decimal("50.00"), subscription.add_ons[0].amount)
self.assertEqual(2, subscription.add_ons[0].quantity)
self.assertEqual(5, subscription.add_ons[0].number_of_billing_cycles)
self.assertFalse(subscription.add_ons[0].never_expires)
self.assertEqual(1, len(subscription.discounts))
self.assertEqual("discount_15", subscription.discounts[0].id)
self.assertEqual(Decimal("17.00"), subscription.discounts[0].amount)
self.assertEqual(1, subscription.discounts[0].quantity)
self.assertEqual(None, subscription.discounts[0].number_of_billing_cycles)
self.assertTrue(subscription.discounts[0].never_expires)
def test_update_allows_adding_and_removing_unicode_add_ons_and_discounts(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
}).subscription
subscription = Subscription.update(subscription.id, {
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"add_ons": {
"add": [
{
"amount": Decimal("50.00"),
"inherited_from_id": u"increase_30",
"quantity": 2,
"number_of_billing_cycles": 5
}
],
"remove": [u"increase_10", u"increase_20"]
},
"discounts": {
"add": [
{
"amount": Decimal("17.00"),
"inherited_from_id": u"discount_15",
"never_expires": True
}
],
"remove": [u"discount_7", u"discount_11"]
}
}).subscription
self.assertEqual(1, len(subscription.add_ons))
self.assertEqual(u"increase_30", subscription.add_ons[0].id)
self.assertEqual(Decimal("50.00"), subscription.add_ons[0].amount)
self.assertEqual(2, subscription.add_ons[0].quantity)
self.assertEqual(5, subscription.add_ons[0].number_of_billing_cycles)
self.assertFalse(subscription.add_ons[0].never_expires)
self.assertEqual(1, len(subscription.discounts))
self.assertEqual(u"discount_15", subscription.discounts[0].id)
self.assertEqual(Decimal("17.00"), subscription.discounts[0].amount)
self.assertEqual(1, subscription.discounts[0].quantity)
self.assertEqual(None, subscription.discounts[0].number_of_billing_cycles)
self.assertTrue(subscription.discounts[0].never_expires)
def test_update_can_replace_entire_set_of_add_ons_and_discounts(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
}).subscription
subscription = Subscription.update(subscription.id, {
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.add_on_discount_plan["id"],
"add_ons": {
"add": [
{"inherited_from_id": "increase_30"},
{"inherited_from_id": "increase_20"},
],
},
"discounts": {
"add": [
{"inherited_from_id": "discount_15"},
],
},
"options": {
"replace_all_add_ons_and_discounts": True,
},
}).subscription
self.assertEqual(2, len(subscription.add_ons))
add_ons = sorted(subscription.add_ons, key=lambda add_on: add_on.id)
self.assertEqual("increase_20", add_ons[0].id)
self.assertEqual(Decimal("20.00"), add_ons[0].amount)
self.assertEqual(1, add_ons[0].quantity)
self.assertEqual(None, add_ons[0].number_of_billing_cycles)
self.assertTrue(add_ons[0].never_expires)
self.assertEqual("increase_30", add_ons[1].id)
self.assertEqual(Decimal("30.00"), add_ons[1].amount)
self.assertEqual(1, add_ons[1].quantity)
self.assertEqual(None, add_ons[1].number_of_billing_cycles)
self.assertTrue(add_ons[1].never_expires)
self.assertEqual(1, len(subscription.discounts))
self.assertEqual("discount_15", subscription.discounts[0].id)
self.assertEqual(Decimal("15.00"), subscription.discounts[0].amount)
self.assertEqual(1, subscription.discounts[0].quantity)
self.assertEqual(None, subscription.discounts[0].number_of_billing_cycles)
self.assertTrue(subscription.discounts[0].never_expires)
def test_update_descriptor_name_and_phone(self):
result = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"descriptor": {
"name": "123*123456789012345678",
"phone": "1234567890"
}
})
self.assertTrue(result.is_success)
subscription = result.subscription
updated_subscription = Subscription.update(subscription.id, {
"descriptor": {
"name": "999*99",
"phone": "1234567890"
}
}).subscription
self.assertEqual("999*99", updated_subscription.descriptor.name)
self.assertEqual("1234567890", updated_subscription.descriptor.phone)
def test_cancel_with_successful_response(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"]
}).subscription
result = Subscription.cancel(subscription.id)
self.assertTrue(result.is_success)
self.assertEqual("Canceled", result.subscription.status)
def test_unsuccessful_cancel_returns_validation_error(self):
Subscription.cancel(self.updateable_subscription.id)
result = Subscription.cancel(self.updateable_subscription.id)
self.assertFalse(result.is_success)
status_errors = result.errors.for_object("subscription").on("status")
self.assertTrue(len(status_errors), 1)
self.assertEqual("81905", status_errors[0].code)
@raises(NotFoundError)
def test_cancel_raises_not_found_error_with_bad_subscription(self):
Subscription.cancel("notreal")
def test_search_with_argument_list_rather_than_literal_list(self):
trial_subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"price": Decimal("1")
}).subscription
trialless_subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"price": Decimal("1")
}).subscription
collection = Subscription.search(
SubscriptionSearch.plan_id == "integration_trial_plan",
SubscriptionSearch.price == Decimal("1")
)
self.assertTrue(TestHelper.includes(collection, trial_subscription))
self.assertFalse(TestHelper.includes(collection, trialless_subscription))
def test_search_on_billing_cycles_remaining(self):
subscription_5 = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"number_of_billing_cycles": 5
}).subscription
subscription_10 = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"number_of_billing_cycles": 10
}).subscription
collection = Subscription.search([
SubscriptionSearch.billing_cycles_remaining >= 7
])
self.assertTrue(TestHelper.includes(collection, subscription_10))
self.assertFalse(TestHelper.includes(collection, subscription_5))
def test_search_on_created_at(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
empty_collection = Subscription.search([
SubscriptionSearch.created_at.between(date.today() + timedelta(1), date.today() + timedelta(2))
])
self.assertTrue(empty_collection.maximum_size == 0)
success_collection = Subscription.search([
SubscriptionSearch.created_at.between(date.today() - timedelta(1), date.today() + timedelta(1))
])
self.assertTrue(success_collection.maximum_size > 0)
def test_search_on_days_past_due(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
TestHelper.make_past_due(subscription, 3)
collection = Subscription.search([
SubscriptionSearch.days_past_due.between(2, 10)
])
self.assertTrue(collection.maximum_size > 0)
for subscription in collection.items:
self.assertTrue(2 <= subscription.days_past_due <= 10)
def test_search_on_plan_id(self):
trial_subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"price": Decimal("2")
}).subscription
trialless_subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"price": Decimal("2")
}).subscription
collection = Subscription.search([
SubscriptionSearch.plan_id == "integration_trial_plan",
SubscriptionSearch.price == Decimal("2")
])
self.assertTrue(TestHelper.includes(collection, trial_subscription))
self.assertFalse(TestHelper.includes(collection, trialless_subscription))
collection = Subscription.search([
SubscriptionSearch.plan_id.in_list("integration_trial_plan", "integration_trialless_plan"),
SubscriptionSearch.price == Decimal("2")
])
self.assertTrue(TestHelper.includes(collection, trial_subscription))
self.assertTrue(TestHelper.includes(collection, trialless_subscription))
def test_search_on_plan_id_is_acts_like_text_node_instead_of_multiple_value(self):
for plan in [TestHelper.trial_plan, TestHelper.trialless_plan]:
Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": plan["id"],
"price": Decimal("3")
})
collection = Subscription.search([
SubscriptionSearch.plan_id == "no such plan id",
SubscriptionSearch.price == Decimal("3")
])
self.assertEqual(0, collection.maximum_size)
def test_search_on_status(self):
active_subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"price": Decimal("3")
}).subscription
canceled_subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
"price": Decimal("3")
}).subscription
Subscription.cancel(canceled_subscription.id)
collection = Subscription.search([
SubscriptionSearch.status.in_list([Subscription.Status.Active, Subscription.Status.Canceled]),
SubscriptionSearch.price == Decimal("3")
])
self.assertTrue(TestHelper.includes(collection, active_subscription))
self.assertTrue(TestHelper.includes(collection, canceled_subscription))
def test_search_on_merchant_account_id(self):
subscription_default_ma = Subscription.create({
"merchant_account_id": TestHelper.default_merchant_account_id,
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"price": Decimal("4")
}).subscription
subscription_non_default_ma = Subscription.create({
"merchant_account_id": TestHelper.non_default_merchant_account_id,
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"price": Decimal("4")
}).subscription
collection = Subscription.search([
SubscriptionSearch.merchant_account_id == TestHelper.default_merchant_account_id,
SubscriptionSearch.price == Decimal("4")
])
self.assertTrue(TestHelper.includes(collection, subscription_default_ma))
self.assertFalse(TestHelper.includes(collection, subscription_non_default_ma))
def test_search_on_bogus_merchant_account_id(self):
subscription = Subscription.create({
"merchant_account_id": TestHelper.default_merchant_account_id,
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"price": Decimal("4")
}).subscription
collection = Subscription.search([
SubscriptionSearch.merchant_account_id == subscription.merchant_account_id,
SubscriptionSearch.price == Decimal("4")
])
self.assertTrue(TestHelper.includes(collection, subscription))
collection = Subscription.search([
SubscriptionSearch.merchant_account_id.in_list(["totally_bogus_id", subscription.merchant_account_id]),
SubscriptionSearch.price == Decimal("4")
])
self.assertTrue(TestHelper.includes(collection, subscription))
collection = Subscription.search([
SubscriptionSearch.merchant_account_id == "totally_bogus_id",
SubscriptionSearch.price == Decimal("4")
])
self.assertFalse(TestHelper.includes(collection, subscription))
def test_search_on_price(self):
subscription_900 = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"price": Decimal("900")
}).subscription
subscription_1000 = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
"price": Decimal("1000")
}).subscription
collection = Subscription.search([
SubscriptionSearch.price >= Decimal("950")
])
self.assertTrue(TestHelper.includes(collection, subscription_1000))
self.assertFalse(TestHelper.includes(collection, subscription_900))
def test_search_on_transaction_id(self):
subscription_found = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
subscription_not_found = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
collection = Subscription.search(
SubscriptionSearch.transaction_id == subscription_found.transactions[0].id
)
self.assertTrue(TestHelper.includes(collection, subscription_found))
self.assertFalse(TestHelper.includes(collection, subscription_not_found))
def test_search_on_id(self):
subscription_found = Subscription.create({
"id": "find_me_%s" % random.randint(1, 1000000),
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
}).subscription
subscription_not_found = Subscription.create({
"id": "do_not_find_me_%s" % random.randint(1, 1000000),
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"],
}).subscription
collection = Subscription.search([
SubscriptionSearch.id.starts_with("find_me")
])
self.assertTrue(TestHelper.includes(collection, subscription_found))
self.assertFalse(TestHelper.includes(collection, subscription_not_found))
def test_search_on_next_billing_date(self):
subscription_found = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"]
}).subscription
subscription_not_found = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trial_plan["id"]
}).subscription
next_billing_date_cutoff = datetime.today() + timedelta(days=5)
collection = Subscription.search(
SubscriptionSearch.next_billing_date >= next_billing_date_cutoff
)
self.assertTrue(TestHelper.includes(collection, subscription_found))
self.assertFalse(TestHelper.includes(collection, subscription_not_found))
def test_retryCharge_without_amount__deprecated(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
TestHelper.make_past_due(subscription)
result = Subscription.retryCharge(subscription.id)
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEqual(subscription.price, transaction.amount)
self.assertNotEqual(None, transaction.processor_authorization_code)
self.assertEqual(Transaction.Type.Sale, transaction.type)
self.assertEqual(Transaction.Status.Authorized, transaction.status)
def test_retry_charge_without_amount(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
TestHelper.make_past_due(subscription)
result = Subscription.retry_charge(subscription.id)
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEqual(subscription.price, transaction.amount)
self.assertNotEqual(None, transaction.processor_authorization_code)
self.assertEqual(Transaction.Type.Sale, transaction.type)
self.assertEqual(Transaction.Status.Authorized, transaction.status)
def test_retryCharge_with_amount__deprecated(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
TestHelper.make_past_due(subscription)
result = Subscription.retryCharge(subscription.id, Decimal(TransactionAmounts.Authorize))
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEqual(Decimal(TransactionAmounts.Authorize), transaction.amount)
self.assertNotEqual(None, transaction.processor_authorization_code)
self.assertEqual(Transaction.Type.Sale, transaction.type)
self.assertEqual(Transaction.Status.Authorized, transaction.status)
def test_retry_charge_with_amount(self):
subscription = Subscription.create({
"payment_method_token": self.credit_card.token,
"plan_id": TestHelper.trialless_plan["id"],
}).subscription
TestHelper.make_past_due(subscription)
result = Subscription.retry_charge(subscription.id, Decimal(TransactionAmounts.Authorize))
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEqual(Decimal(TransactionAmounts.Authorize), transaction.amount)
self.assertNotEqual(None, transaction.processor_authorization_code)
self.assertEqual(Transaction.Type.Sale, transaction.type)
self.assertEqual(Transaction.Status.Authorized, transaction.status)
def test_create_with_paypal_future_payment_method_token(self):
http = ClientApiHttp.create()
status_code, nonce = http.get_paypal_nonce({
"consent-code": "consent-code",
"options": {"validate": False}
})
self.assertEqual(202, status_code)
payment_method_token = PaymentMethod.create({
"customer_id": Customer.create().customer.id,
"payment_method_nonce": nonce
}).payment_method.token
result = Subscription.create({
"payment_method_token": payment_method_token,
"plan_id": TestHelper.trialless_plan["id"]
})
self.assertTrue(result.is_success)
subscription = result.subscription
self.assertEqual(payment_method_token, subscription.payment_method_token)
def test_create_fails_with_paypal_one_time_payment_method_nonce(self):
result = Subscription.create({
"payment_method_nonce": Nonces.PayPalOneTimePayment,
"plan_id": TestHelper.trialless_plan["id"]
})
self.assertFalse(result.is_success)
self.assertEqual(
ErrorCodes.Subscription.PaymentMethodNonceIsInvalid,
result.errors.for_object("subscription")[0].code
)
def test_create_fails_with_paypal_future_payment_method_nonce(self):
result = Subscription.create({
"payment_method_nonce": Nonces.PayPalFuturePayment,
"plan_id": TestHelper.trialless_plan["id"]
})
self.assertFalse(result.is_success)
self.assertEqual(
ErrorCodes.Subscription.PaymentMethodNonceIsInvalid,
result.errors.for_object("subscription")[0].code
)
| 42.053676 | 131 | 0.64695 |
7d128d5bf4a50e75ca592d54eddac6091352fe63 | 296 | py | Python | python/phonenumbers/data/alt_format_36.py | rodgar-nvkz/python-phonenumbers | 4c7c4892211dbc9bc328bc3356b03853eaf993dc | [
"Apache-2.0"
] | 2,424 | 2015-01-05T05:34:45.000Z | 2022-03-28T22:37:53.000Z | python/phonenumbers/data/alt_format_36.py | rodgar-nvkz/python-phonenumbers | 4c7c4892211dbc9bc328bc3356b03853eaf993dc | [
"Apache-2.0"
] | 166 | 2015-01-30T23:59:18.000Z | 2022-03-14T21:08:42.000Z | Lib/site-packages/phonenumbers/data/alt_format_36.py | PsychedVic/Portafolio | 4bd59d19de41fbea5317d4f2b9e6219ea0359945 | [
"bzip2-1.0.6"
] | 345 | 2015-01-02T00:33:27.000Z | 2022-03-26T13:06:57.000Z | """Auto-generated file, do not edit by hand. 36 metadata"""
from ..phonemetadata import NumberFormat
PHONE_ALT_FORMAT_36 = [NumberFormat(pattern='(\\d)(\\d{4})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['1']), NumberFormat(pattern='(\\d{2})(\\d{4})(\\d{3})', format='\\1 \\2 \\3')]
| 59.2 | 193 | 0.628378 |
4d5b397e37a092cf5f61d5636b7860b318a6f8ed | 436 | py | Python | detect_secrets/plugins/twilio.py | paulo-sampaio/detect-secrets | 73ffbc35a72cb316d9e1842cc131b6098cf3c36a | [
"Apache-2.0"
] | 2,212 | 2018-04-03T20:58:42.000Z | 2022-03-31T17:58:38.000Z | detect_secrets/plugins/twilio.py | paulo-sampaio/detect-secrets | 73ffbc35a72cb316d9e1842cc131b6098cf3c36a | [
"Apache-2.0"
] | 354 | 2018-04-03T16:29:55.000Z | 2022-03-31T18:26:26.000Z | detect_secrets/plugins/twilio.py | paulo-sampaio/detect-secrets | 73ffbc35a72cb316d9e1842cc131b6098cf3c36a | [
"Apache-2.0"
] | 298 | 2018-04-02T19:35:15.000Z | 2022-03-28T04:52:14.000Z | """
This plugin searches for Twilio API keys
"""
import re
from .base import RegexBasedDetector
class TwilioKeyDetector(RegexBasedDetector):
"""Scans for Twilio API keys."""
secret_type = 'Twilio API Key'
denylist = [
# Account SID (ACxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
re.compile(r'AC[a-z0-9]{32}'),
# Auth token (SKxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
re.compile(r'SK[a-z0-9]{32}'),
]
| 21.8 | 58 | 0.662844 |
5597dfc0fee593a1a48d9a5e3ef52ff15e9b6b34 | 5,685 | py | Python | app/util/analytics/log_reader.py | dsplugins/dc-app-performance-toolkit | 0a1bb0f8d40f1dc4104aebe926695238a0ef3d00 | [
"Apache-2.0"
] | null | null | null | app/util/analytics/log_reader.py | dsplugins/dc-app-performance-toolkit | 0a1bb0f8d40f1dc4104aebe926695238a0ef3d00 | [
"Apache-2.0"
] | null | null | null | app/util/analytics/log_reader.py | dsplugins/dc-app-performance-toolkit | 0a1bb0f8d40f1dc4104aebe926695238a0ef3d00 | [
"Apache-2.0"
] | null | null | null | import os
import re
from datetime import datetime
from util.project_paths import ENV_TAURUS_ARTIFACT_DIR
GIT_OPERATIONS = ['jmeter_clone_repo_via_http', 'jmeter_clone_repo_via_ssh',
'jmeter_git_push_via_http', 'jmeter_git_fetch_via_http',
'jmeter_git_push_via_ssh', 'jmeter_git_fetch_via_ssh']
class BaseFileReader:
@staticmethod
def validate_file_exists(path):
if not os.path.exists(path):
raise Exception(f'{path} does not exist')
@staticmethod
def validate_file_not_empty(file):
if len(file) == 0:
raise SystemExit(f'ERROR: {file} file in {file} is empty')
@staticmethod
def validate_headers(headers_list, validation_dict):
for key, value in validation_dict.items():
if headers_list[key] != value:
raise SystemExit(f'Header validation error. '
f'Actual: {headers_list[key]}, Expected: {validation_dict[key]}')
@property
def log_dir(self):
return ENV_TAURUS_ARTIFACT_DIR
class BztFileReader(BaseFileReader):
bzt_log_name = 'bzt.log'
dt_regexp = r'(\d{4}-\d{1,2}-\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2})'
jmeter_test_regexp = r'jmeter_\S*'
selenium_test_regexp = r'selenium_\S*'
locust_test_regexp = r'locust_\S*'
success_test_rate_regexp = r'(\d{1,3}.\d{1,2}%)'
def __init__(self):
self.bzt_log = self.get_bzt_log()
self.bzt_log_results_part = self._get_results_bzt_log_part()
def get_bzt_log(self):
bzt_log_path = f'{self.log_dir}/{self.bzt_log_name}'
self.validate_file_exists(bzt_log_path)
with open(bzt_log_path) as log_file:
log_file = log_file.readlines()
self.validate_file_not_empty(log_file)
return log_file
def _get_duration_by_start_finish_strings(self):
first_string = self.bzt_log[0]
last_string = self.bzt_log[-1]
start_time = re.findall(self.dt_regexp, first_string)[0]
start_datetime_obj = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
finish_time = re.findall(self.dt_regexp, last_string)[0]
finish_datetime_obj = datetime.strptime(finish_time, '%Y-%m-%d %H:%M:%S')
duration = finish_datetime_obj - start_datetime_obj
return duration.seconds
def _get_duration_by_test_duration(self):
test_duration = None
for string in self.bzt_log:
if 'Test duration' in string:
str_duration = string.split('duration:')[1].replace('\n', '')
str_duration = str_duration.replace(' ', '')
duration_datetime_obj = datetime.strptime(str_duration, '%H:%M:%S')
test_duration = (duration_datetime_obj.hour * 3600 +
duration_datetime_obj.minute * 60 + duration_datetime_obj.second)
break
return test_duration
def _get_test_count_by_type(self, tests_type, log):
trigger = f' {tests_type}_'
test_search_regx = ""
if tests_type == 'jmeter':
test_search_regx = self.jmeter_test_regexp
elif tests_type == 'selenium':
test_search_regx = self.selenium_test_regexp
elif tests_type == 'locust':
test_search_regx = self.locust_test_regexp
tests = {}
for line in log:
if trigger in line and ('FAIL' in line or 'OK' in line):
test_name = re.findall(test_search_regx, line)[0]
test_rate = float(''.join(re.findall(self.success_test_rate_regexp, line))[:-1])
if test_name not in tests:
tests[test_name] = test_rate
return tests
def _get_results_bzt_log_part(self):
test_result_string_trigger = 'Request label stats:'
res_string_idx = [index for index, value in enumerate(self.bzt_log) if test_result_string_trigger in value]
# Cut bzt.log from the 'Request label stats:' string to the end
if res_string_idx:
res_string_idx = res_string_idx[0]
results_bzt_run = self.bzt_log[res_string_idx:]
return results_bzt_run
@property
def selenium_test_rates(self):
return self._get_test_count_by_type(tests_type='selenium', log=self.bzt_log_results_part)
@property
def jmeter_test_rates(self):
return self._get_test_count_by_type(tests_type='jmeter', log=self.bzt_log_results_part)
@property
def locust_test_rates(self):
return self._get_test_count_by_type(tests_type='locust', log=self.bzt_log_results_part)
@property
def actual_run_time(self):
run_time_bzt = self._get_duration_by_test_duration()
return run_time_bzt if run_time_bzt else self._get_duration_by_start_finish_strings()
class ResultsFileReader(BaseFileReader):
header_validation = {0: 'Label', 1: '# Samples'}
def __init__(self):
self.results_log = self.get_results_log()
def get_results_log(self):
results_log_path = f'{self.log_dir}/results.csv'
self.validate_file_exists(results_log_path)
with open(results_log_path) as res_file:
header = res_file.readline()
results = res_file.readlines()
self.validate_file_not_empty(results)
headers_list = header.split(',')
self.validate_headers(headers_list, self.header_validation)
return results
@property
def actual_git_operations_count(self):
count = 0
for line in self.results_log:
if any(s in line for s in GIT_OPERATIONS):
count = count + int(line.split(',')[1])
return count
| 38.673469 | 115 | 0.651187 |
cf2fdc23b093f1a49f1e0e1433a54c5706ccda12 | 2,611 | py | Python | tests/components/test_panel_iframe.py | loraxx753/skynet | 86a1b0a6c6a3f81bc92d4f61de6a9a6b9f964543 | [
"Apache-2.0"
] | 13 | 2017-02-01T13:25:34.000Z | 2022-01-26T01:30:39.000Z | tests/components/test_panel_iframe.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 9 | 2017-07-26T18:05:32.000Z | 2021-12-05T14:16:34.000Z | tests/components/test_panel_iframe.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 21 | 2017-07-26T17:09:40.000Z | 2022-03-27T22:37:22.000Z | """The tests for the panel_iframe component."""
import unittest
from unittest.mock import patch
from homeassistant import bootstrap
from homeassistant.components import frontend
from tests.common import get_test_home_assistant
class TestPanelIframe(unittest.TestCase):
"""Test the panel_iframe component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_wrong_config(self):
"""Test setup with wrong configuration."""
to_try = [
{'invalid space': {
'url': 'https://home-assistant.io'}},
{'router': {
'url': 'not-a-url'}}]
for conf in to_try:
assert not bootstrap.setup_component(
self.hass, 'panel_iframe', {
'panel_iframe': conf
})
@patch.dict('homeassistant.components.frontend.FINGERPRINTS', {
'panels/ha-panel-iframe.html': 'md5md5'})
def test_correct_config(self):
"""Test correct config."""
assert bootstrap.setup_component(
self.hass, 'panel_iframe', {
'panel_iframe': {
'router': {
'icon': 'mdi:network-wireless',
'title': 'Router',
'url': 'http://192.168.1.1',
},
'weather': {
'icon': 'mdi:weather',
'title': 'Weather',
'url': 'https://www.wunderground.com/us/ca/san-diego',
},
},
})
# 5 dev tools + map are automatically loaded + 2 iframe panels
assert len(self.hass.data[frontend.DATA_PANELS]) == 8
assert self.hass.data[frontend.DATA_PANELS]['router'] == {
'component_name': 'iframe',
'config': {'url': 'http://192.168.1.1'},
'icon': 'mdi:network-wireless',
'title': 'Router',
'url': '/frontend/panels/iframe-md5md5.html',
'url_path': 'router'
}
assert self.hass.data[frontend.DATA_PANELS]['weather'] == {
'component_name': 'iframe',
'config': {'url': 'https://www.wunderground.com/us/ca/san-diego'},
'icon': 'mdi:weather',
'title': 'Weather',
'url': '/frontend/panels/iframe-md5md5.html',
'url_path': 'weather',
}
| 34.813333 | 78 | 0.52049 |
ddf657cbfc82f31e521b7c5d0638fdad968e135c | 15,032 | py | Python | ios/build/tools/setup-gn.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ios/build/tools/setup-gn.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ios/build/tools/setup-gn.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-03-07T14:20:02.000Z | 2021-03-07T14:20:02.000Z | #!/usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import convert_gn_xcodeproj
import errno
import os
import re
import shutil
import subprocess
import sys
import tempfile
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
import StringIO as io
except ImportError:
import io
SUPPORTED_TARGETS = ('iphoneos', 'iphonesimulator', 'maccatalyst')
SUPPORTED_CONFIGS = ('Debug', 'Release', 'Profile', 'Official', 'Coverage')
# Name of the gn variable to set when generating Xcode project.
GENERATE_XCODE_PROJECT = 'ios_set_attributes_for_xcode_project_generation'
# Pattern matching lines from ~/.lldbinit that must not be copied to the
# generated .lldbinit file. They match what the user were told to add to
# their global ~/.lldbinit file before setup-gn.py was updated to generate
# a project specific file and thus must not be copied as they would cause
# the settings to be overwritten.
LLDBINIT_SKIP_PATTERNS = (
re.compile('^script sys.path\\[:0\\] = \\[\'.*/src/tools/lldb\'\\]$'),
re.compile('^script import lldbinit$'),
re.compile('^settings append target.source-map .* /google/src/.*$'),
)
class ConfigParserWithStringInterpolation(configparser.SafeConfigParser):
'''A .ini file parser that supports strings and environment variables.'''
ENV_VAR_PATTERN = re.compile(r'\$([A-Za-z0-9_]+)')
def values(self, section):
return map(
lambda kv: self._UnquoteString(self._ExpandEnvVar(kv[1])),
configparser.ConfigParser.items(self, section))
def getstring(self, section, option, fallback=''):
try:
raw_value = self.get(section, option)
except configparser.NoOptionError, _:
return fallback
return self._UnquoteString(self._ExpandEnvVar(raw_value))
def _UnquoteString(self, string):
if not string or string[0] != '"' or string[-1] != '"':
return string
return string[1:-1]
def _ExpandEnvVar(self, value):
match = self.ENV_VAR_PATTERN.search(value)
if not match:
return value
name, (begin, end) = match.group(1), match.span(0)
prefix, suffix = value[:begin], self._ExpandEnvVar(value[end:])
return prefix + os.environ.get(name, '') + suffix
class GnGenerator(object):
'''Holds configuration for a build and method to generate gn default files.'''
FAT_BUILD_DEFAULT_ARCH = '64-bit'
TARGET_CPU_VALUES = {
'iphoneos': '"arm64"',
'iphonesimulator': '"x64"',
'maccatalyst': '"x64"',
}
TARGET_ENVIRONMENT_VALUES = {
'iphoneos': '"device"',
'iphonesimulator': '"simulator"',
'maccatalyst': '"catalyst"'
}
def __init__(self, settings, config, target):
assert target in SUPPORTED_TARGETS
assert config in SUPPORTED_CONFIGS
self._settings = settings
self._config = config
self._target = target
def _GetGnArgs(self, extra_args=None):
"""Build the list of arguments to pass to gn.
Returns:
A list of tuple containing gn variable names and variable values (it
is not a dictionary as the order needs to be preserved).
"""
args = []
# build/config/ios/ios_sdk.gni asserts that goma is not enabled when
# building Official, so ignore the value of goma.enabled when creating
# args.gn for Official.
if self._config != 'Official':
if self._settings.getboolean('goma', 'enabled'):
args.append(('use_goma', True))
goma_dir = self._settings.getstring('goma', 'install')
if goma_dir:
args.append(('goma_dir', '"%s"' % os.path.expanduser(goma_dir)))
args.append(('target_os', '"ios"'))
args.append(('is_debug', self._config in ('Debug', 'Coverage')))
args.append(('enable_dsyms', self._config in ('Profile', 'Official')))
args.append(('enable_stripping', 'enable_dsyms'))
args.append(('is_official_build', self._config == 'Official'))
args.append(('is_chrome_branded', 'is_official_build'))
args.append(('use_clang_coverage', self._config == 'Coverage'))
args.append(('is_component_build', False))
if os.environ.get('FORCE_MAC_TOOLCHAIN', '0') == '1':
args.append(('use_system_xcode', False))
args.append(('target_cpu', self.TARGET_CPU_VALUES[self._target]))
args.append((
'target_environment',
self.TARGET_ENVIRONMENT_VALUES[self._target]))
if self._target == 'maccatalyst':
# Building for "catalyst" environment has not been open-sourced thus can't
# use ToT clang and need to use Xcode's version instead. This version of
# clang does not generate the same warning as ToT clang, so do not treat
# warnings as errors.
# TODO(crbug.com/1145947): remove once clang ToT supports "macabi".
args.append(('use_xcode_clang', True))
args.append(('treat_warnings_as_errors', False))
# The "catalyst" environment is only supported from iOS 13.0 SDK. Until
# Chrome uses this SDK, it needs to be overridden for "catalyst" builds.
args.append(('ios_deployment_target', '"13.0"'))
# If extra arguments are passed to the function, pass them before the
# user overrides (if any).
if extra_args is not None:
args.extend(extra_args)
# Add user overrides after the other configurations so that they can
# refer to them and override them.
args.extend(self._settings.items('gn_args'))
return args
def Generate(self, gn_path, root_path, build_dir):
self.WriteArgsGn(build_dir, generate_xcode_project=True)
subprocess.check_call(
self.GetGnCommand(gn_path, root_path, build_dir, True))
def CreateGnRules(self, gn_path, root_path, build_dir):
gn_command = self.GetGnCommand(gn_path, root_path, build_dir, False)
self.WriteArgsGn(build_dir, generate_xcode_project=False)
self.WriteBuildNinja(gn_command, build_dir)
self.WriteBuildNinjaDeps(build_dir)
def WriteArgsGn(self, build_dir, generate_xcode_project):
with open(os.path.join(build_dir, 'args.gn'), 'w') as stream:
stream.write('# This file was generated by setup-gn.py. Do not edit\n')
stream.write('# but instead use ~/.setup-gn or $repo/.setup-gn files\n')
stream.write('# to configure settings.\n')
stream.write('\n')
if self._target != 'maccatalyst':
if self._settings.has_section('$imports$'):
for import_rule in self._settings.values('$imports$'):
stream.write('import("%s")\n' % import_rule)
stream.write('\n')
extra_args = [(GENERATE_XCODE_PROJECT, generate_xcode_project)]
gn_args = self._GetGnArgs(extra_args)
for name, value in gn_args:
if isinstance(value, bool):
stream.write('%s = %s\n' % (name, str(value).lower()))
elif isinstance(value, list):
stream.write('%s = [%s' % (name, '\n' if len(value) > 1 else ''))
if len(value) == 1:
prefix = ' '
suffix = ' '
else:
prefix = ' '
suffix = ',\n'
for item in value:
if isinstance(item, bool):
stream.write('%s%s%s' % (prefix, str(item).lower(), suffix))
else:
stream.write('%s%s%s' % (prefix, item, suffix))
stream.write(']\n')
else:
# ConfigParser removes quote around empty string which confuse
# `gn gen` so restore them.
if not value:
value = '""'
stream.write('%s = %s\n' % (name, value))
def WriteBuildNinja(self, gn_command, build_dir):
with open(os.path.join(build_dir, 'build.ninja'), 'w') as stream:
stream.write('ninja_required_version = 1.7.2\n')
stream.write('\n')
stream.write('rule gn\n')
stream.write(' command = %s\n' % NinjaEscapeCommand(gn_command))
stream.write(' description = Regenerating ninja files\n')
stream.write('\n')
stream.write('build build.ninja: gn\n')
stream.write(' generator = 1\n')
stream.write(' depfile = build.ninja.d\n')
def WriteBuildNinjaDeps(self, build_dir):
with open(os.path.join(build_dir, 'build.ninja.d'), 'w') as stream:
stream.write('build.ninja: nonexistant_file.gn\n')
def GetGnCommand(self, gn_path, src_path, out_path, generate_xcode_project):
gn_command = [ gn_path, '--root=%s' % os.path.realpath(src_path), '-q' ]
if generate_xcode_project:
gn_command.append('--ide=xcode')
gn_command.append('--ninja-executable=autoninja')
gn_command.append('--xcode-build-system=new')
if self._settings.has_section('filters'):
target_filters = self._settings.values('filters')
if target_filters:
gn_command.append('--filters=%s' % ';'.join(target_filters))
else:
gn_command.append('--check')
gn_command.append('gen')
gn_command.append('//%s' %
os.path.relpath(os.path.abspath(out_path), os.path.abspath(src_path)))
return gn_command
def NinjaNeedEscape(arg):
'''Returns True if |arg| needs to be escaped when written to .ninja file.'''
return ':' in arg or '*' in arg or ';' in arg
def NinjaEscapeCommand(command):
'''Escapes |command| in order to write it to .ninja file.'''
result = []
for arg in command:
if NinjaNeedEscape(arg):
arg = arg.replace(':', '$:')
arg = arg.replace(';', '\\;')
arg = arg.replace('*', '\\*')
else:
result.append(arg)
return ' '.join(result)
def FindGn():
'''Returns absolute path to gn binary looking at the PATH env variable.'''
for path in os.environ['PATH'].split(os.path.pathsep):
gn_path = os.path.join(path, 'gn')
if os.path.isfile(gn_path) and os.access(gn_path, os.X_OK):
return gn_path
return None
def GenerateXcodeProject(gn_path, root_dir, out_dir, settings):
'''Convert GN generated Xcode project into multi-configuration Xcode
project.'''
prefix = os.path.abspath(os.path.join(out_dir, '_temp'))
temp_path = tempfile.mkdtemp(prefix=prefix)
try:
generator = GnGenerator(settings, 'Debug', 'iphonesimulator')
generator.Generate(gn_path, root_dir, temp_path)
convert_gn_xcodeproj.ConvertGnXcodeProject(
root_dir,
os.path.join(temp_path),
os.path.join(out_dir, 'build'),
SUPPORTED_CONFIGS)
finally:
if os.path.exists(temp_path):
shutil.rmtree(temp_path)
def CreateLLDBInitFile(root_dir, out_dir, settings):
'''
Generate an .lldbinit file for the project that load the script that fixes
the mapping of source files (see docs/ios/build_instructions.md#debugging).
'''
with open(os.path.join(out_dir, 'build', '.lldbinit'), 'w') as lldbinit:
lldb_script_dir = os.path.join(os.path.abspath(root_dir), 'tools', 'lldb')
lldbinit.write('script sys.path[:0] = [\'%s\']\n' % lldb_script_dir)
lldbinit.write('script import lldbinit\n')
workspace_name = settings.getstring(
'gn_args',
'ios_internal_citc_workspace_name')
if workspace_name != '':
username = os.environ['USER']
for shortname in ('googlemac', 'third_party', 'blaze-out'):
lldbinit.write('settings append target.source-map %s %s\n' % (
shortname,
'/google/src/cloud/%s/%s/google3/%s' % (
username, workspace_name, shortname)))
# Append the content of //ios/build/tools/lldbinit.defaults if it exists.
tools_dir = os.path.join(root_dir, 'ios', 'build', 'tools')
defaults_lldbinit_path = os.path.join(tools_dir, 'lldbinit.defaults')
if os.path.isfile(defaults_lldbinit_path):
with open(defaults_lldbinit_path) as defaults_lldbinit:
for line in defaults_lldbinit:
lldbinit.write(line)
# Append the content of ~/.lldbinit if it exists. Line that look like they
# are trying to configure source mapping are skipped as they probably date
# back from when setup-gn.py was not generating an .lldbinit file.
global_lldbinit_path = os.path.join(os.environ['HOME'], '.lldbinit')
if os.path.isfile(global_lldbinit_path):
with open(global_lldbinit_path) as global_lldbinit:
for line in global_lldbinit:
if any(pattern.match(line) for pattern in LLDBINIT_SKIP_PATTERNS):
continue
lldbinit.write(line)
def GenerateGnBuildRules(gn_path, root_dir, out_dir, settings):
'''Generates all template configurations for gn.'''
for config in SUPPORTED_CONFIGS:
for target in SUPPORTED_TARGETS:
build_dir = os.path.join(out_dir, '%s-%s' % (config, target))
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
generator = GnGenerator(settings, config, target)
generator.CreateGnRules(gn_path, root_dir, build_dir)
def Main(args):
default_root = os.path.normpath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
parser = argparse.ArgumentParser(
description='Generate build directories for use with gn.')
parser.add_argument(
'root', default=default_root, nargs='?',
help='root directory where to generate multiple out configurations')
parser.add_argument(
'--import', action='append', dest='import_rules', default=[],
help='path to file defining default gn variables')
parser.add_argument(
'--gn-path', default=None,
help='path to gn binary (default: look up in $PATH)')
parser.add_argument(
'--build-dir', default='out',
help='path where the build should be created (default: %(default)s)')
args = parser.parse_args(args)
# Load configuration (first global and then any user overrides).
settings = ConfigParserWithStringInterpolation()
settings.read([
os.path.splitext(__file__)[0] + '.config',
os.path.expanduser('~/.setup-gn'),
])
# Add private sections corresponding to --import argument.
if args.import_rules:
settings.add_section('$imports$')
for i, import_rule in enumerate(args.import_rules):
if not import_rule.startswith('//'):
import_rule = '//%s' % os.path.relpath(
os.path.abspath(import_rule), os.path.abspath(args.root))
settings.set('$imports$', '$rule%d$' % i, import_rule)
# Validate settings.
if settings.getstring('build', 'arch') not in ('64-bit', '32-bit', 'fat'):
sys.stderr.write('ERROR: invalid value for build.arch: %s\n' %
settings.getstring('build', 'arch'))
sys.exit(1)
# Find path to gn binary either from command-line or in PATH.
if args.gn_path:
gn_path = args.gn_path
else:
gn_path = FindGn()
if gn_path is None:
sys.stderr.write('ERROR: cannot find gn in PATH\n')
sys.exit(1)
out_dir = os.path.join(args.root, args.build_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
GenerateXcodeProject(gn_path, args.root, out_dir, settings)
GenerateGnBuildRules(gn_path, args.root, out_dir, settings)
CreateLLDBInitFile(args.root, out_dir, settings)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| 37.024631 | 80 | 0.668973 |
5ba7cdb84391d7ab67cac5954bf757125e3ce6d4 | 3,201 | py | Python | utils/logger.py | MoustafaMeshry/fbias_gan_residual | 798a7935d42b9987039ceddb2e415a499a2e18ce | [
"MIT"
] | 29 | 2021-11-05T10:09:21.000Z | 2022-03-15T13:37:06.000Z | utils/logger.py | MoustafaMeshry/fbias_gan_residual | 798a7935d42b9987039ceddb2e415a499a2e18ce | [
"MIT"
] | null | null | null | utils/logger.py | MoustafaMeshry/fbias_gan_residual | 798a7935d42b9987039ceddb2e415a499a2e18ce | [
"MIT"
] | 2 | 2021-12-15T13:04:52.000Z | 2022-01-22T16:30:34.000Z | """From https://github.com/LMescheder/GAN_stability/blob/master/gan_training/logger.py"""
import pickle
import os
import torchvision
class Logger(object):
def __init__(self, log_dir='./logs', img_dir='./imgs',
monitoring=None, monitoring_dir=None):
self.stats = dict()
self.log_dir = log_dir
self.img_dir = img_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
if not (monitoring is None or monitoring == 'none'):
self.setup_monitoring(monitoring, monitoring_dir)
else:
self.monitoring = None
self.monitoring_dir = None
def setup_monitoring(self, monitoring, monitoring_dir=None):
self.monitoring = monitoring
self.monitoring_dir = monitoring_dir
if monitoring == 'telemetry':
import telemetry
self.tm = telemetry.ApplicationTelemetry()
if self.tm.get_status() == 0:
print('Telemetry successfully connected.')
elif monitoring == 'tensorboard':
import tensorboardX
self.tb = tensorboardX.SummaryWriter(monitoring_dir)
else:
raise NotImplementedError('Monitoring tool "%s" not supported!'
% monitoring)
def add(self, category, k, v, it):
if category not in self.stats:
self.stats[category] = {}
if k not in self.stats[category]:
self.stats[category][k] = []
self.stats[category][k].append((it, v))
k_name = '%s/%s' % (category, k)
if self.monitoring == 'telemetry':
self.tm.metric_push_async({
'metric': k_name, 'value': v, 'it': it
})
elif self.monitoring == 'tensorboard':
self.tb.add_scalar(k_name, v, it)
def add_imgs(self, imgs, class_name, it):
outdir = os.path.join(self.img_dir, class_name)
if not os.path.exists(outdir):
os.makedirs(outdir)
outfile = os.path.join(outdir, '%08d.png' % it)
imgs = imgs / 2 + 0.5
imgs = torchvision.utils.make_grid(imgs)
torchvision.utils.save_image(imgs, outfile, nrow=8)
if self.monitoring == 'tensorboard':
self.tb.add_image(class_name, imgs, it)
def get_last(self, category, k, default=0.):
if category not in self.stats:
return default
elif k not in self.stats[category]:
return default
else:
return self.stats[category][k][-1][1]
def save_stats(self, filename):
filename = os.path.join(self.log_dir, filename)
with open(filename, 'wb') as f:
pickle.dump(self.stats, f)
def load_stats(self, filename):
filename = os.path.join(self.log_dir, filename)
if not os.path.exists(filename):
print('Warning: file "%s" does not exist!' % filename)
return
try:
with open(filename, 'rb') as f:
self.stats = pickle.load(f)
except EOFError:
print('Warning: log file corrupted!') | 33.34375 | 89 | 0.577632 |
be6f75e6d06fa23e958363ecc97342fd015af791 | 6,481 | py | Python | scripts/TEMP/temp_post.py | shunhuahan/mcclintock | 999f064847e824d41a76791c913e24454ef6cba8 | [
"Unlicense"
] | null | null | null | scripts/TEMP/temp_post.py | shunhuahan/mcclintock | 999f064847e824d41a76791c913e24454ef6cba8 | [
"Unlicense"
] | null | null | null | scripts/TEMP/temp_post.py | shunhuahan/mcclintock | 999f064847e824d41a76791c913e24454ef6cba8 | [
"Unlicense"
] | null | null | null | import os
import sys
import subprocess
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
import scripts.output as output
import config.TEMP.temp_post as config
def main():
mccutils.log("temp","running TEMP post processing")
insert_summary = snakemake.input.insert_summary
absence_summary = snakemake.input.absence_summary
te_gff = snakemake.input.te_gff
reference_fasta = snakemake.input.reference_fasta
log = snakemake.params.log
sample_name = snakemake.params.sample_name
chromosomes = snakemake.params.chromosomes.split(",")
out_dir = snakemake.params.out_dir
insertions = read_insertion_summary(insert_summary, sample_name)
absence_bed = make_absence_bed(absence_summary, sample_name, out_dir)
non_absent_ref_insertions = get_non_absent_ref_tes(te_gff, absence_bed, sample_name, out_dir, log)
insertions += non_absent_ref_insertions
insertions = filter_insertions(insertions, chromosomes, acceptable_classes=config.ACCEPTABLE_INSERTION_SUPPORT_CLASSES, frequency_theshold=config.FREQUENCY_THRESHOLD)
if len(insertions) > 0:
insertions = output.make_redundant_bed(insertions, sample_name, out_dir, method="temp")
insertions = output.make_nonredundant_bed(insertions, sample_name, out_dir, method="temp")
output.write_vcf(insertions, reference_fasta, sample_name, "temp", out_dir)
else:
mccutils.run_command(["touch", out_dir+"/"+sample_name+"_temp_redundant.bed"])
mccutils.run_command(["touch", out_dir+"/"+sample_name+"_temp_nonredundant.bed"])
mccutils.log("temp","TEMP postprocessing complete")
def read_insertion_summary(infile, sample):
insertions = []
with open(infile,"r") as inf:
for x,line in enumerate(inf):
if x > 0:
insert = output.Insertion(output.Temp())
split_line = line.split("\t")
if len(split_line) == 14:
insert.chromosome = split_line[0]
insert.start = int(split_line[1])-1
insert.end = int(split_line[2])
insert.family = split_line[3]
insert.name = insert.family+"|non-reference|"+split_line[7]+"|"+sample+"|temp|"
if "antisense" in split_line[4]:
insert.strand = "-"
else:
insert.strand = "+"
insert.support_info.support['class'].value = split_line[5]
insert.support_info.support['variantsupport'].value = int(float(split_line[6]))
insert.support_info.support['frequency'].value = float(split_line[7])
insert.support_info.support['junction1'].value = int(split_line[8])
insert.support_info.support['junction1support'].value = int(split_line[9])
insert.support_info.support['junction2'].value = int(split_line[10])
insert.support_info.support['junction2support'].value = int(split_line[11])
insert.support_info.support['fiveprimesupport'].value = int(float(split_line[12]))
insert.support_info.support['threeprimesupport'].value = int(float(split_line[13].replace("\n","")))
insert.type = "non-reference"
if insert.end >= insert.start and insert.end > 0 and insert.start > -1:
# if split read, use junction positions as start and end
if insert.support_info.support['junction1support'].value > 0 and insert.support_info.support['junction2support'].value > 0:
insert.start = insert.support_info.support['junction1'].value
insert.end = insert.support_info.support['junction2'].value
insert.name = insert.name+"sr|"
# read pair
else:
insert.name = insert.name+"rp|"
insertions.append(insert)
else:
print("<TEMP POST> Omitting malformed line from insertion summary results:", line)
else:
print("<TEMP POST> Omitting malformed line from insertion summary results:", line)
return insertions
def make_absence_bed(summary_file, sample, out):
out_bed = out+"/"+sample+".absent.bed"
lines = []
with open(summary_file, "r") as inf:
for x,line in enumerate(inf):
if x > 0:
split_line = line.split("\t")
new_line = "\t".join([split_line[0], split_line[1], split_line[2]])
new_line += "\n"
lines.append(new_line)
if len(lines) < 1:
lines.append("empty\t0\t1\n")
with open(out_bed,"w") as bed:
for line in lines:
bed.write(line)
return out_bed
def get_non_absent_ref_tes(te_gff, absence_bed, sample, out, log):
insertions = []
tmp_gff = out+"/tmp.ref_nonabs.gff"
command = ["bedtools", "subtract", "-A", "-a", te_gff, "-b", absence_bed]
mccutils.run_command_stdout(command, tmp_gff, log=log)
with open(tmp_gff,"r") as gff:
for line in gff:
if "#" not in line:
line = line.replace(";","\t")
split_line = line.split("\t")
insert = output.Insertion(output.Temp())
insert.chromosome = split_line[0]
insert.start = int(split_line[3])
insert.end = int(split_line[4])
insert.name = split_line[9].split("=")[1]+"|reference|NA|"+sample+"|temp|nonab|"
insert.strand = split_line[6]
insert.type = "reference"
insertions.append(insert)
mccutils.remove(tmp_gff)
return insertions
def filter_insertions(insertions, chromosomes, acceptable_classes=["1p1"], frequency_theshold=0.1):
out = []
for insert in insertions:
if ( insert.chromosome in chromosomes and
( insert.type == "reference" or
(insert.support_info.support['class'].value in acceptable_classes and
insert.support_info.support['frequency'].value > frequency_theshold))):
out.append(insert)
return out
if __name__ == "__main__":
main() | 43.206667 | 170 | 0.597284 |
b65182c8d950fc5ef1a251c5f78310ab9456518c | 330 | py | Python | actions/__init__.py | BlackCatDevel0per/s2txt | fe1cf551057be5777eb8f27e9d56dd2ae3cbb514 | [
"Apache-2.0"
] | null | null | null | actions/__init__.py | BlackCatDevel0per/s2txt | fe1cf551057be5777eb8f27e9d56dd2ae3cbb514 | [
"Apache-2.0"
] | null | null | null | actions/__init__.py | BlackCatDevel0per/s2txt | fe1cf551057be5777eb8f27e9d56dd2ae3cbb514 | [
"Apache-2.0"
] | null | null | null | from .uic4load import UIC
from .menubar import FileMenu
from .menubar import Record
from .menubar import Options
from .menubar import Other
from .window import WindowActions
from .buttons import Buttons
from .buttons import Shortcuts
class Actions(WindowActions, FileMenu, Record, Options, Other, Buttons, Shortcuts):
pass
| 23.571429 | 83 | 0.80303 |
e836256d50a2fa4f979740aa49b809c52f73f21e | 457 | py | Python | chapter9/leds_led_shim.py | dannystaple/Learn-Robotics-Programming-Second-Edition | 081ed9bbab59aab57334fe8f2f06a157a8639eb4 | [
"MIT"
] | 19 | 2020-05-13T12:53:59.000Z | 2022-03-07T19:50:30.000Z | chapter9/leds_led_shim.py | dannystaple/Learn-Robotics-Programming-Second-Edition | 081ed9bbab59aab57334fe8f2f06a157a8639eb4 | [
"MIT"
] | 1 | 2020-11-20T16:56:24.000Z | 2020-12-01T06:24:45.000Z | chapter9/leds_led_shim.py | dannystaple/Learn-Robotics-Programming-Second-Edition | 081ed9bbab59aab57334fe8f2f06a157a8639eb4 | [
"MIT"
] | 12 | 2019-12-24T18:13:14.000Z | 2022-03-20T23:44:12.000Z | import ledshim
class Leds:
@property
def count(self):
return ledshim.width
def set_one(self, led_number, color):
ledshim.set_pixel(led_number, *color)
def set_range(self, led_range, color):
for pixel in led_range:
ledshim.set_pixel(pixel, *color)
def set_all(self, color):
ledshim.set_all(*color)
def clear(self):
ledshim.clear()
def show(self):
ledshim.show()
| 19.041667 | 45 | 0.610503 |
e738fa006b43b0aafbdbe957091cd90856c35c67 | 3,049 | py | Python | docs/tests/test_images.py | yjf18340/webots | 60d441c362031ab8fde120cc0cd97bdb1a31a3d5 | [
"Apache-2.0"
] | 1 | 2019-01-21T07:14:55.000Z | 2019-01-21T07:14:55.000Z | docs/tests/test_images.py | chinakwy/webots | 7c35a359848bafe81fe0229ac2ed587528f4c73e | [
"Apache-2.0"
] | null | null | null | docs/tests/test_images.py | chinakwy/webots | 7c35a359848bafe81fe0229ac2ed587528f4c73e | [
"Apache-2.0"
] | 1 | 2020-09-25T02:01:45.000Z | 2020-09-25T02:01:45.000Z | """Test module of the images."""
import unittest
from books import Books
import fnmatch
import os
import re
class TestImages(unittest.TestCase):
"""Unit test of the images."""
def test_images_are_valid(self):
"""Test that the MD files refer to valid URLs."""
books = Books()
for book in books.books:
for md_path in book.md_paths:
with open(md_path) as f:
content = f.read()
for match in re.finditer(r"!\[(.*?)\]\((.*?)\)", content):
# remove parameters
is_youtube_video = match.group(1) == "youtube video"
if not is_youtube_video:
image_ref = match.group(2).split(' ')[0]
image_path = os.path.join(book.path, image_ref)
self.assertTrue(
os.path.isfile(image_path),
msg='%s: "%s" not found' % (md_path, image_path)
)
def test_all_images_are_used(self):
"""Test that all the image files are referenced somewhere."""
books = Books()
for book in books.books:
# search for all images
images_paths = [] # ['image/sonar.png', 'image/sphere.png', ...]
for root, dirnames, filenames in os.walk(book.path):
if 'scenes' in root.replace(books.project_path, ''):
continue
for filename in fnmatch.filter(filenames, '*.png') + fnmatch.filter(filenames, '*.jpg'):
image_path = os.path.join(root, filename)
image_path = image_path[(len(book.path) + 1):]
images_paths.append(image_path.replace('\\', '/'))
self.assertGreater(
len(images_paths), 0,
msg='No image found in book "%s"' % book.name
)
# check the image reference can be found in at least one MD file
for image_path in images_paths:
found = False
for md_path in book.md_paths:
with open(md_path) as file:
if (image_path in file.read() or
image_path.replace('.png', '.thumbnail.jpg') in images_paths or
image_path.replace('.png', '.thumbnail.png') in images_paths):
found = True
break
self.assertTrue(
found, msg='Image "%s" not referenced in any MD file.' % image_path
)
# in case of thumbnail make sure the original file is available
if image_path.endswith('.thumbnail.jpg'):
self.assertTrue(
image_path.replace('.thumbnail.jpg', '.png') in images_paths,
msg='Missing original file for thumbnail "%s".' % image_path
)
if __name__ == '__main__':
unittest.main()
| 42.347222 | 104 | 0.501804 |
5f44da2c24c3de60f95ddb288c8d9295ccd2ae5e | 10,246 | py | Python | tcfcli/cmds/deploy/cli.py | alfredhuang211/scfcli | f5e086ff4fcee8d645682e85cd1486b28a224d08 | [
"Apache-2.0"
] | null | null | null | tcfcli/cmds/deploy/cli.py | alfredhuang211/scfcli | f5e086ff4fcee8d645682e85cd1486b28a224d08 | [
"Apache-2.0"
] | null | null | null | tcfcli/cmds/deploy/cli.py | alfredhuang211/scfcli | f5e086ff4fcee8d645682e85cd1486b28a224d08 | [
"Apache-2.0"
] | null | null | null | import click
import os
import sys
import time
from io import BytesIO
from tcfcli.common.template import Template
from tcfcli.common.user_exceptions import TemplateNotFoundException, InvalidTemplateException, ContextException
from tcfcli.common.user_exceptions import CloudAPIException
from tcfcli.libs.utils.scf_client import ScfClient
from tcfcli.common import tcsam
from tcfcli.common.user_config import UserConfig
from tcfcli.common.tcsam.tcsam_macro import TcSamMacro as tsmacro
from zipfile import ZipFile, ZIP_DEFLATED
from tcfcli.libs.utils.cos_client import CosClient
_CURRENT_DIR = '.'
_BUILD_DIR = './.tcf_build'
DEF_TMP_FILENAME = 'template.yaml'
REGIONS = ['ap-beijing', 'ap-chengdu', 'ap-guangzhou', 'ap-hongkong',
'ap-mumbai', 'ap-shanghai']
@click.command()
@click.option('--template-file', '-t', default=DEF_TMP_FILENAME, type=click.Path(exists=True),
help="TCF template file for deploy")
@click.option('--cos-bucket', '-c', type=str, help="COS bucket name")
@click.option('-n', '--name', type=str, help="Function name")
@click.option('-ns', '--namespace', type=str, help="Namespace name")
@click.option('--region', '-r', type=click.Choice(REGIONS),
help="The region which the function want to be deployed")
@click.option('-f', '--forced', is_flag=True, default=False,
help="Update the function when it already exists,default false")
@click.option('--skip-event', is_flag=True, default=False,
help="Keep previous version triggers, do not cover them this time.")
def deploy(template_file, cos_bucket, name, namespace, region, forced, skip_event):
'''
Deploy a scf.
'''
package = Package(template_file, cos_bucket, name, region, namespace)
resource = package.do_package()
deploy = Deploy(resource, namespace, region, forced, skip_event)
deploy.do_deploy()
class Package(object):
def __init__(self, template_file, cos_bucket, function, region, deploy_namespace):
self.template_file = template_file
self.template_file_dir = ""
self.cos_bucket = cos_bucket
self.check_params()
template_data = tcsam.tcsam_validate(Template.get_template_data(self.template_file))
self.resource = template_data.get(tsmacro.Resources, {})
self.function = function
self.deploy_namespace = deploy_namespace
self.region = region
def do_package(self):
for ns in self.resource:
for func in list(self.resource[ns]):
if func == tsmacro.Type:
continue
if self.function is not None and func != self.function:
self.resource[ns].pop(func)
continue
code_url = self._do_package_core(
self.resource[ns][func][tsmacro.Properties].get(tsmacro.CodeUri, ""),
ns,
func,
self.region
)
if "cos_bucket_name" in code_url:
self.resource[ns][func][tsmacro.Properties]["CosBucketName"] = code_url["cos_bucket_name"]
self.resource[ns][func][tsmacro.Properties]["CosObjectName"] = code_url["cos_object_name"]
click.secho("Upload function zip file '{}' to COS bucket '{}' success".
format(os.path.basename(code_url["cos_object_name"]),
code_url["cos_bucket_name"]), fg="green")
elif "zip_file" in code_url:
self.resource[ns][func][tsmacro.Properties]["LocalZipFile"] = code_url["zip_file"]
# click.secho("Generate resource '{}' success".format(self.resource), fg="green")
return self.resource
def check_params(self):
if not self.template_file:
click.secho("FAM Template Not Found", fg="red")
raise TemplateNotFoundException("Missing option --template-file")
if not os.path.isfile(self.template_file):
click.secho("FAM Template Not Found", fg="red")
raise TemplateNotFoundException("template-file Not Found")
self.template_file = os.path.abspath(self.template_file)
self.template_file_dir = os.path.dirname(os.path.abspath(self.template_file))
uc = UserConfig()
if self.cos_bucket and self.cos_bucket.endswith("-" + uc.appid):
self.cos_bucket = self.cos_bucket.replace("-" + uc.appid, '')
def _do_package_core(self, func_path, namespace, func_name, region=None):
zipfile, zip_file_name, zip_file_name_cos = self._zip_func(func_path, namespace, func_name)
code_url = dict()
if self.cos_bucket:
CosClient(region).upload_file2cos(bucket=self.cos_bucket, file=zipfile.read(),
key=zip_file_name_cos)
code_url["cos_bucket_name"] = self.cos_bucket
code_url["cos_object_name"] = "/" + zip_file_name_cos
else:
code_url["zip_file"] = os.path.join(os.getcwd(), _BUILD_DIR, zip_file_name)
return code_url
def _zip_func(self, func_path, namespace, func_name):
buff = BytesIO()
if not os.path.exists(func_path):
raise ContextException("Function file or path not found by CodeUri '{}'".format(func_path))
if self.deploy_namespace and self.deploy_namespace != namespace:
namespace = self.deploy_namespace
zip_file_name = str(namespace) + '-' + str(func_name) + '-latest.zip'
zip_file_name_cos = str(namespace) + '-' + str(func_name) + '-latest' + time.strftime(
"-%Y-%m-%d-%H-%M-%S", time.localtime(int(time.time()))) + '.zip'
cwd = os.getcwd()
os.chdir(self.template_file_dir)
os.chdir(func_path)
with ZipFile(buff, mode='w', compression=ZIP_DEFLATED) as zip_object:
for current_path, sub_folders, files_name in os.walk(_CURRENT_DIR):
if current_path == _BUILD_DIR:
continue
for file in files_name:
zip_object.write(os.path.join(current_path, file))
os.chdir(cwd)
buff.seek(0)
buff.name = zip_file_name
if not os.path.exists(_BUILD_DIR):
os.mkdir(_BUILD_DIR)
zip_file_path = os.path.join(_BUILD_DIR, zip_file_name)
if os.path.exists(zip_file_path):
os.remove(zip_file_path)
# a temporary support for upload func from local zipfile
with open(zip_file_path, 'wb') as f:
f.write(buff.read())
buff.seek(0)
click.secho("Compress function '{}' to zipfile '{}' success".format(zip_file_path, zip_file_name))
return buff, zip_file_name, zip_file_name_cos
class Deploy(object):
def __init__(self, resource, namespace, region=None, forced=False, skip_event=False):
self.resources = resource
self.namespace = namespace
self.region = region
self.forced = forced
self.skip_event = skip_event
def do_deploy(self):
for ns in self.resources:
if not self.resources[ns]:
continue
click.secho("Deploy namespace '{ns}' begin".format(ns=ns))
for func in self.resources[ns]:
if func == tsmacro.Type:
continue
self._do_deploy_core(self.resources[ns][func], func, ns, self.region,
self.forced, self.skip_event)
click.secho("Deploy namespace '{ns}' end".format(ns=ns))
def _do_deploy_core(self, func, func_name, func_ns, region, forced, skip_event=False):
# check namespace exit, create namespace
if self.namespace and self.namespace != func_ns:
func_ns = self.namespace
rep = ScfClient(region).get_ns(func_ns)
if not rep:
click.secho("{ns} not exists, create it now".format(ns=func_ns), fg="red")
err = ScfClient(region).create_ns(func_ns)
if err is not None:
if sys.version_info[0] == 3:
s = err.get_message()
else:
s = err.get_message().encode("UTF-8")
click.secho("Create namespace '{name}' failure. Error: {e}.".format(
name=func_ns, e=s), fg="red")
sys.exit(1)
err = ScfClient(region).deploy_func(func, func_name, func_ns, forced)
if err is not None:
if sys.version_info[0] == 3:
s = err.get_message()
else:
s = err.get_message().encode("UTF-8")
err_msg = "Deploy function '{name}' failure, {e}.".format(name=func_name, e=s)
if err.get_request_id():
err_msg += ("\nRequestId: {}" .format(err.get_request_id().encode("UTF-8")))
raise CloudAPIException(err_msg.decode("UTF-8"))
click.secho("Deploy function '{name}' success".format(name=func_name), fg="green")
if not skip_event:
self._do_deploy_trigger(func, func_name, func_ns, region)
def _do_deploy_trigger(self, func, func_name, func_ns, region=None):
proper = func.get(tsmacro.Properties, {})
events = proper.get(tsmacro.Events, {})
hasError = None
for trigger in events:
err = ScfClient(region).deploy_trigger(events[trigger], trigger, func_name, func_ns)
if err is not None:
hasError = err
if sys.version_info[0] == 3:
s = err.get_message()
else:
s = err.get_message().encode("UTF-8")
click.secho(
"Deploy trigger '{name}' failure. Error: {e}.".format(name=trigger,
e=s), fg="red")
if err.get_request_id():
click.secho("RequestId: {}".format(err.get_request_id().encode("UTF-8")), fg="red")
continue
click.secho("Deploy trigger '{name}' success".format(name=trigger), fg="green")
if hasError is not None:
sys.exit(1)
| 43.232068 | 111 | 0.60121 |
dec5c48341dd63e70af67a32c7c2142426be2404 | 4,026 | py | Python | src/tributaries/metadata.py | akilby/tributary-cache | f4884a2aa49685b9e8aeb925c50afda887db1b18 | [
"MIT"
] | null | null | null | src/tributaries/metadata.py | akilby/tributary-cache | f4884a2aa49685b9e8aeb925c50afda887db1b18 | [
"MIT"
] | null | null | null | src/tributaries/metadata.py | akilby/tributary-cache | f4884a2aa49685b9e8aeb925c50afda887db1b18 | [
"MIT"
] | null | null | null | from .utils.codeparsers import code_tree
from .utils.objecthashers import complex_hasher
def determine_metadata(func, args, kwargs,
exclusion_list, globals_list,
old_version=False):
metadata = dict()
metadata['func'] = func
metadata['args'] = args
metadata['kwargs'] = kwargs
(metadata['code'],
metadata['other_globals']) = code_tree(func, args, kwargs,
exclusion_list, globals_list,
old_version=old_version)
if old_version:
metadata.pop('other_globals')
return refactor_metadata_for_storage(metadata)
def refactor_metadata_for_readability(metadata):
m = metadata.copy()
code = m['code']
code = {k: '-code snipped-' for k, v in code.items()}
args = m['args']
args = [(arg[:20] + ['...', '-args snipped-']
if isinstance(arg, list) and len(arg) > 20 else arg)
for arg in args]
args = [(set(list(arg)[:20]).union(set(['...', '-args snipped-']))
if isinstance(arg, set) and len(arg) > 20 else arg)
for arg in args]
args = [dict_refactor(arg) if isinstance(arg, dict) else arg
for arg in args]
kwargs = m['kwargs']
kwargs = dict_refactor(kwargs)
other_globals = m['other_globals']
for key, val in other_globals.items():
if isinstance(val, list) and len(val) > 20:
other_globals[key] = val[:20] + ['...', '-other_globals snipped-']
m2 = metadata.copy()
m2['code'] = code
m2['args'] = args
m2['kwargs'] = kwargs
m2['other_globals'] = other_globals
return m2
def dict_refactor(kwargs):
for key, val in kwargs.items():
if isinstance(val, list) and len(val) > 20:
kwargs[key] = val[:20] + ['...', '-snipped-']
elif isinstance(val, set) and len(val) > 20:
kwargs[key] = set(list(val)[:20]).union(
set(['...', '-snipped-']))
elif isinstance(val, dict):
for key1, val1 in val.items():
if isinstance(val1, list) and len(val1) > 20:
val[key1] = val1[:20] + ['...', '-snipped-']
kwargs[key] = val
return kwargs
def refactor_metadata_for_storage(metadata):
m, m2 = metadata.copy(), metadata.copy()
args, kwargs = m['args'], m['kwargs']
args = [complex_hasher(arg) for arg in args]
args = hash_arglist(args)
kw = dict_hasher(kwargs.copy())
m2['args'] = tuple(args)
m2['kwargs'] = kw
return m2
def hash_arglist(arglist):
if isinstance(arglist, list) or isinstance(arglist, tuple):
arglist = hash_all_in_arglist(arglist)
argsnew = []
for arg in arglist:
if isinstance(arg, list) or isinstance(arg, tuple):
arg = hash_all_in_arglist(arg)
elif isinstance(arg, dict):
arg = dict_hasher(arg.copy())
argsnew.append(arg)
if isinstance(arglist, tuple):
return tuple(argsnew)
elif isinstance(arglist, list):
return argsnew
return arglist
def hash_all_in_arglist(arglist):
argsnew = []
for arg in arglist:
if isinstance(arg, list) or isinstance(arg, tuple):
arg2 = [complex_hasher(a) for a in arg]
arg2 = hash_all_in_arglist(arg2)
if isinstance(arg, tuple):
arg2 = tuple(arg2)
else:
arg2 = arg
argsnew.append(arg2)
if isinstance(arglist, tuple):
return tuple(argsnew)
return argsnew
def dict_hasher(kw):
kw = kw.copy()
for key, val in kw.items():
kw[key] = complex_hasher(val)
if isinstance(val, list):
kw[key] = [complex_hasher(arg) for arg in val]
elif isinstance(val, dict):
m3 = val.copy()
for key_small, val_small in m3.items():
m3[key_small] = complex_hasher(val_small)
kw[key] = m3
return kw
| 33.831933 | 78 | 0.564083 |
fa5a804f51c456014a5b0677dbaa37c9b7d84eb9 | 2,572 | py | Python | partname_resolver/units/resistance.py | sakoPO/partname-resolver | ad881eb147b005f0e833a1c78fa9fc4b8b7a33bb | [
"BSD-3-Clause"
] | null | null | null | partname_resolver/units/resistance.py | sakoPO/partname-resolver | ad881eb147b005f0e833a1c78fa9fc4b8b7a33bb | [
"BSD-3-Clause"
] | null | null | null | partname_resolver/units/resistance.py | sakoPO/partname-resolver | ad881eb147b005f0e833a1c78fa9fc4b8b7a33bb | [
"BSD-3-Clause"
] | null | null | null | from decimal import Decimal
from .unit_base import Unit
import re
class Resistance(Unit):
multiply = {u'G': Decimal('1000000000'),
u'G\u03a9': Decimal('1000000000'),
u'GR': Decimal('1000000000'),
u'M': Decimal('1000000'),
u'M\u03a9': Decimal('1000000'),
u'MR': Decimal('1000000'),
u'k': Decimal('1000'),
u'k\u03a9': Decimal('1000'),
u'kR': Decimal('1000'),
u'R': Decimal('1'),
u'\u03a9': Decimal('1'),
u'm': Decimal('0.001'),
u'm\u03a9': Decimal('0.001'),
u'mR': Decimal('0.001'),
u'u': Decimal('0.000001'),
u'u\u03a9': Decimal('0.000001'),
u'uR': Decimal('0.000001')}
def __init__(self, resistance):
if isinstance(resistance, Decimal):
self.resistance = resistance
elif isinstance(resistance, str):
self.resistance = self.__convert_str_resistance_to_decimal_ohms(resistance)
else:
print(resistance)
raise TypeError
super().__init__("Watt", '\u03a9', self.resistance)
self.str_conversion_prefixes = ['u', 'm', '-', 'k', 'M', 'G']
def __eq__(self, other):
if isinstance(other, str):
return self.resistance == self.__convert_str_resistance_to_decimal_ohms(other)
if isinstance(other, Resistance):
return self.resistance == other.resistance
@staticmethod
def __convert_str_resistance_to_decimal_ohms(resistance):
resistance = resistance.replace("Ohms", "\u03a9")
resistance = resistance.replace("Ohm", "\u03a9")
try:
separated = re.split('(\d+)', resistance)
if separated[-1] in Resistance.multiply:
multiplier = Resistance.multiply[separated[-1]]
value = Decimal(resistance.replace(separated[-1], ''))
value = value * multiplier
return value
else:
for i, chunk in enumerate(separated):
if chunk in Resistance.multiply:
multiplier = Resistance.multiply[chunk]
resistance = Decimal(resistance.replace(chunk, '.'))
resistance = resistance * multiplier
return resistance
return Decimal(resistance)
except:
print("Unable to convert resistance: " + resistance)
raise
| 40.1875 | 90 | 0.531882 |
ccbba72c16c09f70975f250b253f1ebc57e79161 | 23 | py | Python | ros/devel/lib/python2.7/dist-packages/kvaser/msg/__init__.py | Innovation-Cell/radar | de1bcd91e5a831e2858539241edfea3ce79f3afd | [
"MIT"
] | null | null | null | ros/devel/lib/python2.7/dist-packages/kvaser/msg/__init__.py | Innovation-Cell/radar | de1bcd91e5a831e2858539241edfea3ce79f3afd | [
"MIT"
] | null | null | null | ros/devel/lib/python2.7/dist-packages/kvaser/msg/__init__.py | Innovation-Cell/radar | de1bcd91e5a831e2858539241edfea3ce79f3afd | [
"MIT"
] | null | null | null | from ._CANESR import *
| 11.5 | 22 | 0.73913 |
3e9ea83d0288e186d79c951cfb17aac1ef101fa3 | 8,858 | py | Python | accelbyte_py_sdk/api/lobby/operations/player/admin_set_player_session_attribute.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/lobby/operations/player/admin_set_player_session_attribute.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/lobby/operations/player/admin_set_player_session_attribute.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:01:26.588557+08:00
# from: Justice Lobby Service (1.33.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelsSetPlayerSessionAttributeRequest
from ...models import RestapiErrorResponseBody
class AdminSetPlayerSessionAttribute(Operation):
"""admin set player's session attribute (adminSetPlayerSessionAttribute)
Properties:
url: /lobby/v1/admin/player/namespaces/{namespace}/users/{userId}/attributes
method: PUT
tags: player
consumes: ["application/json"]
produces: ["application/json"]
security: bearer
body: (body) REQUIRED ModelsSetPlayerSessionAttributeRequest in body
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
204: No Content - (No Content)
400: Bad Request - RestapiErrorResponseBody (Bad Request)
401: Unauthorized - RestapiErrorResponseBody (Unauthorized)
403: Forbidden - RestapiErrorResponseBody (Forbidden)
404: Not Found - RestapiErrorResponseBody (Not Found)
500: Internal Server Error - RestapiErrorResponseBody (Internal Server Error)
"""
# region fields
_url: str = "/lobby/v1/admin/player/namespaces/{namespace}/users/{userId}/attributes"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: ModelsSetPlayerSessionAttributeRequest # REQUIRED in [body]
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"body",
"namespace",
"user_id",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "body") or self.body is None:
return False
if not hasattr(self, "namespace") or self.namespace is None:
return False
if not hasattr(self, "user_id") or self.user_id is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelsSetPlayerSessionAttributeRequest) -> AdminSetPlayerSessionAttribute:
self.body = value
return self
def with_namespace(self, value: str) -> AdminSetPlayerSessionAttribute:
self.namespace = value
return self
def with_user_id(self, value: str) -> AdminSetPlayerSessionAttribute:
self.user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsSetPlayerSessionAttributeRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, RestapiErrorResponseBody]]:
"""Parse the given response.
204: No Content - (No Content)
400: Bad Request - RestapiErrorResponseBody (Bad Request)
401: Unauthorized - RestapiErrorResponseBody (Unauthorized)
403: Forbidden - RestapiErrorResponseBody (Forbidden)
404: Not Found - RestapiErrorResponseBody (Not Found)
500: Internal Server Error - RestapiErrorResponseBody (Internal Server Error)
"""
if code == 204:
return HttpResponse.create(code, "No Content"), None
if code == 400:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 401:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 403:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 404:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 500:
return None, RestapiErrorResponseBody.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelsSetPlayerSessionAttributeRequest,
namespace: str,
user_id: str,
) -> AdminSetPlayerSessionAttribute:
instance = cls()
instance.body = body
instance.namespace = namespace
instance.user_id = user_id
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> AdminSetPlayerSessionAttribute:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsSetPlayerSessionAttributeRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelsSetPlayerSessionAttributeRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"userId": "user_id",
}
# endregion static methods
| 31.411348 | 148 | 0.634793 |
cefb6546985d976e9ae0021f73b367ddccb17ec6 | 64,458 | py | Python | monai/transforms/utility/dictionary.py | bamf-health/MONAI | 6a2086d21baf4b60c2ab3d400ed5c97cf24a0da9 | [
"Apache-2.0"
] | null | null | null | monai/transforms/utility/dictionary.py | bamf-health/MONAI | 6a2086d21baf4b60c2ab3d400ed5c97cf24a0da9 | [
"Apache-2.0"
] | null | null | null | monai/transforms/utility/dictionary.py | bamf-health/MONAI | 6a2086d21baf4b60c2ab3d400ed5c97cf24a0da9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for utility functions
defined in :py:class:`monai.transforms.utility.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
import logging
import re
from copy import deepcopy
from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import DtypeLike, KeysCollection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import no_collation
from monai.transforms.inverse import InvertibleTransform
from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform
from monai.transforms.utility.array import (
AddChannel,
AsChannelFirst,
AsChannelLast,
CastToType,
ClassesToIndices,
ConvertToMultiChannelBasedOnBratsClasses,
CuCIM,
DataStats,
EnsureChannelFirst,
EnsureType,
FgBgToIndices,
Identity,
IntensityStats,
LabelToMask,
Lambda,
MapLabelValue,
RemoveRepeatedChannel,
RepeatChannel,
SimulateDelay,
SplitChannel,
SqueezeDim,
ToCupy,
ToDevice,
ToNumpy,
ToPIL,
TorchVision,
ToTensor,
Transpose,
)
from monai.transforms.utils import extreme_points_to_image, get_extreme_points
from monai.utils import convert_to_numpy, ensure_tuple, ensure_tuple_rep
from monai.utils.enums import InverseKeys, TransformBackends
__all__ = [
"AddChannelD",
"AddChannelDict",
"AddChanneld",
"AddExtremePointsChannelD",
"AddExtremePointsChannelDict",
"AddExtremePointsChanneld",
"AsChannelFirstD",
"AsChannelFirstDict",
"AsChannelFirstd",
"AsChannelLastD",
"AsChannelLastDict",
"AsChannelLastd",
"CastToTypeD",
"CastToTypeDict",
"CastToTyped",
"ConcatItemsD",
"ConcatItemsDict",
"ConcatItemsd",
"ConvertToMultiChannelBasedOnBratsClassesD",
"ConvertToMultiChannelBasedOnBratsClassesDict",
"ConvertToMultiChannelBasedOnBratsClassesd",
"CopyItemsD",
"CopyItemsDict",
"CopyItemsd",
"CuCIMd",
"CuCIMD",
"CuCIMDict",
"DataStatsD",
"DataStatsDict",
"DataStatsd",
"DeleteItemsD",
"DeleteItemsDict",
"DeleteItemsd",
"EnsureChannelFirstD",
"EnsureChannelFirstDict",
"EnsureChannelFirstd",
"EnsureTypeD",
"EnsureTypeDict",
"EnsureTyped",
"FgBgToIndicesD",
"FgBgToIndicesDict",
"FgBgToIndicesd",
"IdentityD",
"IdentityDict",
"Identityd",
"IntensityStatsd",
"IntensityStatsD",
"IntensityStatsDict",
"LabelToMaskD",
"LabelToMaskDict",
"LabelToMaskd",
"LambdaD",
"LambdaDict",
"Lambdad",
"MapLabelValueD",
"MapLabelValueDict",
"MapLabelValued",
"RandCuCIMd",
"RandCuCIMD",
"RandCuCIMDict",
"RandLambdaD",
"RandLambdaDict",
"RandLambdad",
"RandTorchVisionD",
"RandTorchVisionDict",
"RandTorchVisiond",
"RemoveRepeatedChannelD",
"RemoveRepeatedChannelDict",
"RemoveRepeatedChanneld",
"RepeatChannelD",
"RepeatChannelDict",
"RepeatChanneld",
"SelectItemsD",
"SelectItemsDict",
"SelectItemsd",
"SimulateDelayD",
"SimulateDelayDict",
"SimulateDelayd",
"SplitChannelD",
"SplitChannelDict",
"SplitChanneld",
"SqueezeDimD",
"SqueezeDimDict",
"SqueezeDimd",
"ToCupyD",
"ToCupyDict",
"ToCupyd",
"ToDeviced",
"ToDeviceD",
"ToDeviceDict",
"ToNumpyD",
"ToNumpyDict",
"ToNumpyd",
"ToPILD",
"ToPILDict",
"ToPILd",
"ToTensorD",
"ToTensorDict",
"ToTensord",
"TorchVisionD",
"TorchVisionDict",
"TorchVisiond",
"Transposed",
"TransposeDict",
"TransposeD",
]
class Identityd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Identity`.
"""
backend = Identity.backend
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.identity = Identity()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.identity(d[key])
return d
class AsChannelFirstd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelFirst`.
"""
backend = AsChannelFirst.backend
def __init__(self, keys: KeysCollection, channel_dim: int = -1, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
channel_dim: which dimension of input image is the channel, default is the last dimension.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = AsChannelFirst(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class AsChannelLastd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelLast`.
"""
backend = AsChannelLast.backend
def __init__(self, keys: KeysCollection, channel_dim: int = 0, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
channel_dim: which dimension of input image is the channel, default is the first dimension.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = AsChannelLast(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class AddChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AddChannel`.
"""
backend = AddChannel.backend
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.adder = AddChannel()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.adder(d[key])
return d
class EnsureChannelFirstd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.EnsureChannelFirst`.
"""
backend = EnsureChannelFirst.backend
def __init__(
self,
keys: KeysCollection,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
strict_check: bool = True,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None and `key_{postfix}` was used to store the metadata in `LoadImaged`.
So need the key to extract metadata for channel dim information, default is `meta_dict`.
For example, for data with key `image`, metadata by default is in `image_meta_dict`.
strict_check: whether to raise an error when the meta information is insufficient.
"""
super().__init__(keys)
self.adjuster = EnsureChannelFirst(strict_check=strict_check)
self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys))
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, meta_key, meta_key_postfix in zip(self.keys, self.meta_keys, self.meta_key_postfix):
d[key] = self.adjuster(d[key], d[meta_key or f"{key}_{meta_key_postfix}"])
return d
class RepeatChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.RepeatChannel`.
"""
backend = RepeatChannel.backend
def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
repeats: the number of repetitions for each element.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.repeater = RepeatChannel(repeats)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.repeater(d[key])
return d
class RemoveRepeatedChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.RemoveRepeatedChannel`.
"""
backend = RemoveRepeatedChannel.backend
def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
repeats: the number of repetitions for each element.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.repeater = RemoveRepeatedChannel(repeats)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.repeater(d[key])
return d
class SplitChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`.
All the input specified by `keys` should be split into same count of data.
"""
backend = SplitChannel.backend
def __init__(
self,
keys: KeysCollection,
output_postfixes: Optional[Sequence[str]] = None,
channel_dim: int = 0,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
output_postfixes: the postfixes to construct keys to store split data.
for example: if the key of input data is `pred` and split 2 classes, the output
data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1])
if None, using the index number: `pred_0`, `pred_1`, ... `pred_N`.
channel_dim: which dimension of input image is the channel, default to 0.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.output_postfixes = output_postfixes
self.splitter = SplitChannel(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
rets = self.splitter(d[key])
postfixes: Sequence = list(range(len(rets))) if self.output_postfixes is None else self.output_postfixes
if len(postfixes) != len(rets):
raise AssertionError("count of split results must match output_postfixes.")
for i, r in enumerate(rets):
split_key = f"{key}_{postfixes[i]}"
if split_key in d:
raise RuntimeError(f"input data already contains key {split_key}.")
d[split_key] = r
return d
class CastToTyped(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.CastToType`.
"""
backend = CastToType.backend
def __init__(
self,
keys: KeysCollection,
dtype: Union[Sequence[Union[DtypeLike, torch.dtype]], DtypeLike, torch.dtype] = np.float32,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: convert image to this data type, default is `np.float32`.
it also can be a sequence of dtypes or torch.dtype,
each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
self.converter = CastToType()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, dtype in self.key_iterator(d, self.dtype):
d[key] = self.converter(d[key], dtype=dtype)
return d
class ToTensord(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToTensor`.
"""
backend = ToTensor.backend
def __init__(
self,
keys: KeysCollection,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: target data content type to convert, for example: torch.float, etc.
device: specify the target device to put the Tensor data.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToTensor(dtype=dtype, device=device)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
self.push_transform(d, key)
d[key] = self.converter(d[key])
return d
def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = deepcopy(dict(data))
for key in self.key_iterator(d):
# Create inverse transform
inverse_transform = ToNumpy()
# Apply inverse
d[key] = inverse_transform(d[key])
# Remove the applied transform
self.pop_transform(d, key)
return d
class EnsureTyped(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.EnsureType`.
Ensure the input data to be a PyTorch Tensor or numpy array, support: `numpy array`, `PyTorch Tensor`,
`float`, `int`, `bool`, `string` and `object` keep the original.
If passing a dictionary, list or tuple, still return dictionary, list or tuple and recursively convert
every item to the expected data type.
Note: Currently, we only convert tensor data to numpy array or scalar number in the inverse operation.
"""
backend = EnsureType.backend
def __init__(
self,
keys: KeysCollection,
data_type: str = "tensor",
dtype: Optional[Union[DtypeLike, torch.dtype]] = None,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
data_type: target data type to convert, should be "tensor" or "numpy".
dtype: target data content type to convert, for example: np.float32, torch.float, etc.
device: for Tensor data type, specify the target device.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = EnsureType(data_type=data_type, dtype=dtype, device=device)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
self.push_transform(d, key)
d[key] = self.converter(d[key])
return d
def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = deepcopy(dict(data))
for key in self.key_iterator(d):
# FIXME: currently, only convert tensor data to numpy array or scalar number,
# need to also invert numpy array but it's not easy to determine the previous data type
d[key] = convert_to_numpy(d[key])
# Remove the applied transform
self.pop_transform(d, key)
return d
class ToNumpyd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`.
"""
backend = ToNumpy.backend
def __init__(
self,
keys: KeysCollection,
dtype: Optional[DtypeLike] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: target data type when converting to numpy array.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToNumpy(dtype=dtype)
def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class ToCupyd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToCupy`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: data type specifier. It is inferred from the input by default.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = ToCupy.backend
def __init__(self, keys: KeysCollection, dtype=None, allow_missing_keys: bool = False) -> None:
super().__init__(keys, allow_missing_keys)
self.converter = ToCupy(dtype=dtype)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class ToPILd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`.
"""
backend = ToPIL.backend
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToPIL()
def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class Transposed(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Transpose`.
"""
backend = Transpose.backend
def __init__(
self, keys: KeysCollection, indices: Optional[Sequence[int]], allow_missing_keys: bool = False
) -> None:
super().__init__(keys, allow_missing_keys)
self.transform = Transpose(indices)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.transform(d[key])
# if None was supplied then numpy uses range(a.ndim)[::-1]
indices = self.transform.indices or range(d[key].ndim)[::-1]
self.push_transform(d, key, extra_info={"indices": indices})
return d
def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = deepcopy(dict(data))
for key in self.key_iterator(d):
transform = self.get_most_recent_transform(d, key)
# Create inverse transform
fwd_indices = np.array(transform[InverseKeys.EXTRA_INFO]["indices"])
inv_indices = np.argsort(fwd_indices)
inverse_transform = Transpose(inv_indices.tolist())
# Apply inverse
d[key] = inverse_transform(d[key])
# Remove the applied transform
self.pop_transform(d, key)
return d
class DeleteItemsd(MapTransform):
"""
Delete specified items from data dictionary to release memory.
It will remove the key-values and copy the others to construct a new dictionary.
"""
def __init__(
self,
keys: KeysCollection,
sep: str = ".",
use_re: Union[Sequence[bool], bool] = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to delete, can be "A{sep}B{sep}C"
to delete key `C` in nested dictionary, `C` can be regular expression.
See also: :py:class:`monai.transforms.compose.MapTransform`
sep: the separator tag to define nested dictionary keys, default to ".".
use_re: whether the specified key is a regular expression, it also can be
a list of bool values, map the to keys.
"""
super().__init__(keys)
self.sep = sep
self.use_re = ensure_tuple_rep(use_re, len(self.keys))
def __call__(self, data):
def _delete_item(keys, d, use_re: bool = False):
key = keys[0]
if len(keys) > 1:
d[key] = _delete_item(keys[1:], d[key], use_re)
return d
return {k: v for k, v in d.items() if (use_re and not re.search(key, k)) or (not use_re and k != key)}
d = dict(data)
for key, use_re in zip(self.keys, self.use_re):
d = _delete_item(key.split(self.sep), d, use_re)
return d
class SelectItemsd(MapTransform):
"""
Select only specified items from data dictionary to release memory.
It will copy the selected key-values and construct and new dictionary.
"""
def __call__(self, data):
return {key: data[key] for key in self.key_iterator(data)}
class SqueezeDimd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SqueezeDim`.
"""
backend = SqueezeDim.backend
def __init__(self, keys: KeysCollection, dim: int = 0, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dim: dimension to be squeezed. Default: 0 (the first dimension)
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = SqueezeDim(dim=dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class DataStatsd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.DataStats`.
"""
backend = DataStats.backend
def __init__(
self,
keys: KeysCollection,
prefix: Union[Sequence[str], str] = "Data",
data_type: Union[Sequence[bool], bool] = True,
data_shape: Union[Sequence[bool], bool] = True,
value_range: Union[Sequence[bool], bool] = True,
data_value: Union[Sequence[bool], bool] = False,
additional_info: Optional[Union[Sequence[Callable], Callable]] = None,
logger_handler: Optional[logging.Handler] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
prefix: will be printed in format: "{prefix} statistics".
it also can be a sequence of string, each element corresponds to a key in ``keys``.
data_type: whether to show the type of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
data_shape: whether to show the shape of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
value_range: whether to show the value range of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
data_value: whether to show the raw value of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
a typical example is to print some properties of Nifti image: affine, pixdim, etc.
additional_info: user can define callable function to extract
additional info from input data. it also can be a sequence of string, each element
corresponds to a key in ``keys``.
logger_handler: add additional handler to output data: save to file, etc.
add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html
the handler should have a logging level of at least `INFO`.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.prefix = ensure_tuple_rep(prefix, len(self.keys))
self.data_type = ensure_tuple_rep(data_type, len(self.keys))
self.data_shape = ensure_tuple_rep(data_shape, len(self.keys))
self.value_range = ensure_tuple_rep(value_range, len(self.keys))
self.data_value = ensure_tuple_rep(data_value, len(self.keys))
self.additional_info = ensure_tuple_rep(additional_info, len(self.keys))
self.logger_handler = logger_handler
self.printer = DataStats(logger_handler=logger_handler)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, prefix, data_type, data_shape, value_range, data_value, additional_info in self.key_iterator(
d, self.prefix, self.data_type, self.data_shape, self.value_range, self.data_value, self.additional_info
):
d[key] = self.printer(
d[key],
prefix,
data_type,
data_shape,
value_range,
data_value,
additional_info,
)
return d
class SimulateDelayd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SimulateDelay`.
"""
backend = SimulateDelay.backend
def __init__(
self, keys: KeysCollection, delay_time: Union[Sequence[float], float] = 0.0, allow_missing_keys: bool = False
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
delay_time: The minimum amount of time, in fractions of seconds, to accomplish this identity task.
It also can be a sequence of string, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.delay_time = ensure_tuple_rep(delay_time, len(self.keys))
self.delayer = SimulateDelay()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, delay_time in self.key_iterator(d, self.delay_time):
d[key] = self.delayer(d[key], delay_time=delay_time)
return d
class CopyItemsd(MapTransform):
"""
Copy specified items from data dictionary and save with different key names.
It can copy several items together and copy several times.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self, keys: KeysCollection, times: int, names: KeysCollection, allow_missing_keys: bool = False
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
times: expected copy times, for example, if keys is "img", times is 3,
it will add 3 copies of "img" data to the dictionary.
names: the names corresponding to the newly copied data,
the length should match `len(keys) x times`. for example, if keys is ["img", "seg"]
and times is 2, names can be: ["img_1", "seg_1", "img_2", "seg_2"].
allow_missing_keys: don't raise exception if key is missing.
Raises:
ValueError: When ``times`` is nonpositive.
ValueError: When ``len(names)`` is not ``len(keys) * times``. Incompatible values.
"""
super().__init__(keys, allow_missing_keys)
if times < 1:
raise ValueError(f"times must be positive, got {times}.")
self.times = times
names = ensure_tuple(names)
if len(names) != (len(self.keys) * times):
raise ValueError(
"len(names) must match len(keys) * times, "
f"got len(names)={len(names)} len(keys) * times={len(self.keys) * times}."
)
self.names = names
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
"""
Raises:
KeyError: When a key in ``self.names`` already exists in ``data``.
"""
d = dict(data)
key_len = len(self.keys)
for i in range(self.times):
for key, new_key in self.key_iterator(d, self.names[i * key_len : (i + 1) * key_len]):
if new_key in d:
raise KeyError(f"Key {new_key} already exists in data.")
val = d[key]
if isinstance(val, torch.Tensor):
d[new_key] = val.detach().clone()
else:
d[new_key] = deepcopy(val)
return d
class ConcatItemsd(MapTransform):
"""
Concatenate specified items from data dictionary together on the first dim to construct a big array.
Expect all the items are numpy array or PyTorch Tensor.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, keys: KeysCollection, name: str, dim: int = 0, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be concatenated together.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: the name corresponding to the key to store the concatenated data.
dim: on which dimension to concatenate the items, default is 0.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.name = name
self.dim = dim
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
"""
Raises:
TypeError: When items in ``data`` differ in type.
TypeError: When the item type is not in ``Union[numpy.ndarray, torch.Tensor]``.
"""
d = dict(data)
output = []
data_type = None
for key in self.key_iterator(d):
if data_type is None:
data_type = type(d[key])
elif not isinstance(d[key], data_type):
raise TypeError("All items in data must have the same type.")
output.append(d[key])
if data_type is np.ndarray:
d[self.name] = np.concatenate(output, axis=self.dim)
elif data_type is torch.Tensor:
d[self.name] = torch.cat(output, dim=self.dim) # type: ignore
else:
raise TypeError(f"Unsupported data type: {data_type}, available options are (numpy.ndarray, torch.Tensor).")
return d
class Lambdad(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Lambda`.
For example:
.. code-block:: python
:emphasize-lines: 2
input_data={'image': np.zeros((10, 2, 2)), 'label': np.ones((10, 2, 2))}
lambd = Lambdad(keys='label', func=lambda x: x[:4, :, :])
print(lambd(input_data)['label'].shape)
(4, 2, 2)
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
func: Lambda/function to be applied. It also can be a sequence of Callable,
each element corresponds to a key in ``keys``.
inv_func: Lambda/function of inverse operation if want to invert transforms, default to `lambda x: x`.
It also can be a sequence of Callable, each element corresponds to a key in ``keys``.
overwrite: whether to overwrite the original data in the input dictionary with lamdbda function output.
default to True. it also can be a sequence of bool, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
Note: The inverse operation doesn't allow to define `extra_info` or access other information, such as the
image's original size. If need these complicated information, please write a new InvertibleTransform directly.
"""
backend = Lambda.backend
def __init__(
self,
keys: KeysCollection,
func: Union[Sequence[Callable], Callable],
inv_func: Union[Sequence[Callable], Callable] = no_collation,
overwrite: Union[Sequence[bool], bool] = True,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.func = ensure_tuple_rep(func, len(self.keys))
self.inv_func = ensure_tuple_rep(inv_func, len(self.keys))
self.overwrite = ensure_tuple_rep(overwrite, len(self.keys))
self._lambd = Lambda()
def _transform(self, data: Any, func: Callable):
return self._lambd(data, func=func)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, func, overwrite in self.key_iterator(d, self.func, self.overwrite):
ret = self._transform(data=d[key], func=func)
if overwrite:
d[key] = ret
self.push_transform(d, key)
return d
def _inverse_transform(self, transform_info: Dict, data: Any, func: Callable):
return self._lambd(data, func=func)
def inverse(self, data):
d = deepcopy(dict(data))
for key, inv_func, overwrite in self.key_iterator(d, self.inv_func, self.overwrite):
transform = self.get_most_recent_transform(d, key)
ret = self._inverse_transform(transform_info=transform, data=d[key], func=inv_func)
if overwrite:
d[key] = ret
self.pop_transform(d, key)
return d
class RandLambdad(Lambdad, RandomizableTransform):
"""
Randomizable version :py:class:`monai.transforms.Lambdad`, the input `func` may contain random logic,
or randomly execute the function based on `prob`. so `CacheDataset` will not execute it and cache the results.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
func: Lambda/function to be applied. It also can be a sequence of Callable,
each element corresponds to a key in ``keys``.
inv_func: Lambda/function of inverse operation if want to invert transforms, default to `lambda x: x`.
It also can be a sequence of Callable, each element corresponds to a key in ``keys``.
overwrite: whether to overwrite the original data in the input dictionary with lamdbda function output.
default to True. it also can be a sequence of bool, each element corresponds to a key in ``keys``.
prob: probability of executing the random function, default to 1.0, with 100% probability to execute.
note that all the data specified by `keys` will share the same random probability to execute or not.
allow_missing_keys: don't raise exception if key is missing.
For more details, please check :py:class:`monai.transforms.Lambdad`.
Note: The inverse operation doesn't allow to define `extra_info` or access other information, such as the
image's original size. If need these complicated information, please write a new InvertibleTransform directly.
"""
backend = Lambda.backend
def __init__(
self,
keys: KeysCollection,
func: Union[Sequence[Callable], Callable],
inv_func: Union[Sequence[Callable], Callable] = no_collation,
overwrite: Union[Sequence[bool], bool] = True,
prob: float = 1.0,
allow_missing_keys: bool = False,
) -> None:
Lambdad.__init__(
self=self,
keys=keys,
func=func,
inv_func=inv_func,
overwrite=overwrite,
allow_missing_keys=allow_missing_keys,
)
RandomizableTransform.__init__(self=self, prob=prob, do_transform=True)
def _transform(self, data: Any, func: Callable):
return self._lambd(data, func=func) if self._do_transform else data
def __call__(self, data):
self.randomize(data)
return super().__call__(data)
def _inverse_transform(self, transform_info: Dict, data: Any, func: Callable):
return self._lambd(data, func=func) if transform_info[InverseKeys.DO_TRANSFORM] else data
class LabelToMaskd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.LabelToMask`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
select_labels: labels to generate mask from. for 1 channel label, the `select_labels`
is the expected label values, like: [1, 2, 3]. for One-Hot format label, the
`select_labels` is the expected channel indices.
merge_channels: whether to use `np.any()` to merge the result on channel dim.
if yes, will return a single channel mask with binary data.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = LabelToMask.backend
def __init__( # pytype: disable=annotation-type-mismatch
self,
keys: KeysCollection,
select_labels: Union[Sequence[int], int],
merge_channels: bool = False,
allow_missing_keys: bool = False,
) -> None: # pytype: disable=annotation-type-mismatch
super().__init__(keys, allow_missing_keys)
self.converter = LabelToMask(select_labels=select_labels, merge_channels=merge_channels)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class FgBgToIndicesd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.FgBgToIndices`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
fg_postfix: postfix to save the computed foreground indices in dict.
for example, if computed on `label` and `postfix = "_fg_indices"`, the key will be `label_fg_indices`.
bg_postfix: postfix to save the computed background indices in dict.
for example, if computed on `label` and `postfix = "_bg_indices"`, the key will be `label_bg_indices`.
image_key: if image_key is not None, use ``label == 0 & image > image_threshold`` to determine
the negative sample(background). so the output items will not map to all the voxels in the label.
image_threshold: if enabled image_key, use ``image > image_threshold`` to determine
the valid image content area and select background only in this area.
output_shape: expected shape of output indices. if not None, unravel indices to specified shape.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = FgBgToIndices.backend
def __init__(
self,
keys: KeysCollection,
fg_postfix: str = "_fg_indices",
bg_postfix: str = "_bg_indices",
image_key: Optional[str] = None,
image_threshold: float = 0.0,
output_shape: Optional[Sequence[int]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.fg_postfix = fg_postfix
self.bg_postfix = bg_postfix
self.image_key = image_key
self.converter = FgBgToIndices(image_threshold, output_shape)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
image = d[self.image_key] if self.image_key else None
for key in self.key_iterator(d):
d[str(key) + self.fg_postfix], d[str(key) + self.bg_postfix] = self.converter(d[key], image)
return d
class ClassesToIndicesd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ClassesToIndices`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
indices_postfix: postfix to save the computed indices of all classes in dict.
for example, if computed on `label` and `postfix = "_cls_indices"`, the key will be `label_cls_indices`.
num_classes: number of classes for argmax label, not necessary for One-Hot label.
image_key: if image_key is not None, use ``image > image_threshold`` to define valid region, and only select
the indices within the valid region.
image_threshold: if enabled image_key, use ``image > image_threshold`` to determine the valid image content
area and select only the indices of classes in this area.
output_shape: expected shape of output indices. if not None, unravel indices to specified shape.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = ClassesToIndices.backend
def __init__(
self,
keys: KeysCollection,
indices_postfix: str = "_cls_indices",
num_classes: Optional[int] = None,
image_key: Optional[str] = None,
image_threshold: float = 0.0,
output_shape: Optional[Sequence[int]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.indices_postfix = indices_postfix
self.image_key = image_key
self.converter = ClassesToIndices(num_classes, image_threshold, output_shape)
def __call__(self, data: Mapping[Hashable, Any]):
d = dict(data)
image = d[self.image_key] if self.image_key else None
for key in self.key_iterator(d):
d[str(key) + self.indices_postfix] = self.converter(d[key], image)
return d
class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ConvertToMultiChannelBasedOnBratsClasses`.
Convert labels to multi channels based on brats18 classes:
label 1 is the necrotic and non-enhancing tumor core
label 2 is the the peritumoral edema
label 4 is the GD-enhancing tumor
The possible classes are TC (Tumor core), WT (Whole tumor)
and ET (Enhancing tumor).
"""
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False):
super().__init__(keys, allow_missing_keys)
self.converter = ConvertToMultiChannelBasedOnBratsClasses()
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class AddExtremePointsChanneld(Randomizable, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AddExtremePointsChannel`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
label_key: key to label source to get the extreme points.
background: Class index of background label, defaults to 0.
pert: Random perturbation amount to add to the points, defaults to 0.0.
sigma: if a list of values, must match the count of spatial dimensions of input data,
and apply every value in the list to 1 spatial dimension. if only 1 value provided,
use it for all spatial dimensions.
rescale_min: minimum value of output data.
rescale_max: maximum value of output data.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
label_key: str,
background: int = 0,
pert: float = 0.0,
sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 3.0,
rescale_min: float = -1.0,
rescale_max: float = 1.0,
allow_missing_keys: bool = False,
):
MapTransform.__init__(self, keys, allow_missing_keys)
self.background = background
self.pert = pert
self.points: List[Tuple[int, ...]] = []
self.label_key = label_key
self.sigma = sigma
self.rescale_min = rescale_min
self.rescale_max = rescale_max
def randomize(self, label: np.ndarray) -> None:
self.points = get_extreme_points(label, rand_state=self.R, background=self.background, pert=self.pert)
def __call__(self, data):
d = dict(data)
label = d[self.label_key]
if label.shape[0] != 1:
raise ValueError("Only supports single channel labels!")
# Generate extreme points
self.randomize(label[0, :])
for key in self.key_iterator(d):
img = d[key]
points_image = extreme_points_to_image(
points=self.points,
label=label,
sigma=self.sigma,
rescale_min=self.rescale_min,
rescale_max=self.rescale_max,
)
d[key] = np.concatenate([img, points_image], axis=0)
return d
class TorchVisiond(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.TorchVision` for non-randomized transforms.
For randomized transforms of TorchVision use :py:class:`monai.transforms.RandTorchVisiond`.
Note:
As most of the TorchVision transforms only work for PIL image and PyTorch Tensor, this transform expects input
data to be dict of PyTorch Tensors, users can easily call `ToTensord` transform to convert Numpy to Tensor.
"""
def __init__(
self,
keys: KeysCollection,
name: str,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in TorchVision package.
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the TorchVision transform.
kwargs: parameters for the TorchVision transform.
"""
super().__init__(keys, allow_missing_keys)
self.trans = TorchVision(name, *args, **kwargs)
def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.trans(d[key])
return d
class RandTorchVisiond(Randomizable, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.TorchVision` for randomized transforms.
For deterministic non-randomized transforms of TorchVision use :py:class:`monai.transforms.TorchVisiond`.
Note:
- As most of the TorchVision transforms only work for PIL image and PyTorch Tensor, this transform expects input
data to be dict of PyTorch Tensors, users can easily call `ToTensord` transform to convert Numpy to Tensor.
- This class inherits the ``Randomizable`` purely to prevent any dataset caching to skip the transform
computation. If the random factor of the underlying torchvision transform is not derived from `self.R`,
the results may not be deterministic.
See Also: :py:class:`monai.transforms.Randomizable`.
"""
def __init__(
self,
keys: KeysCollection,
name: str,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in TorchVision package.
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the TorchVision transform.
kwargs: parameters for the TorchVision transform.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
self.trans = TorchVision(name, *args, **kwargs)
def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.trans(d[key])
return d
class MapLabelValued(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.MapLabelValue`.
"""
def __init__(
self,
keys: KeysCollection,
orig_labels: Sequence,
target_labels: Sequence,
dtype: DtypeLike = np.float32,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
orig_labels: original labels that map to others.
target_labels: expected label values, 1: 1 map to the `orig_labels`.
dtype: convert the output data to dtype, default to float32.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.mapper = MapLabelValue(orig_labels=orig_labels, target_labels=target_labels, dtype=dtype)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.mapper(d[key])
return d
class IntensityStatsd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.IntensityStats`.
Compute statistics for the intensity values of input image and store into the meta data dictionary.
For example: if `ops=[lambda x: np.mean(x), "max"]` and `key_prefix="orig"`, may generate below stats:
`{"orig_custom_0": 1.5, "orig_max": 3.0}`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
ops: expected operations to compute statistics for the intensity.
if a string, will map to the predefined operations, supported: ["mean", "median", "max", "min", "std"]
mapping to `np.nanmean`, `np.nanmedian`, `np.nanmax`, `np.nanmin`, `np.nanstd`.
if a callable function, will execute the function on input image.
key_prefix: the prefix to combine with `ops` name to generate the key to store the results in the
meta data dictionary. if some `ops` are callable functions, will use "{key_prefix}_custom_{index}"
as the key, where index counts from 0.
mask_keys: if not None, specify the mask array for the image to extract only the interested area to compute
statistics, mask must have the same shape as the image.
it should be a sequence of strings or None, map to the `keys`.
channel_wise: whether to compute statistics for every channel of input image separately.
if True, return a list of values for every operation, default to False.
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
used to store the computed statistics to the meta dict.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None, use `key_{postfix}` to to fetch the meta data according
to the key data, default is `meta_dict`, the meta data is a dictionary object.
used to store the computed statistics to the meta dict.
allow_missing_keys: don't raise exception if key is missing.
"""
def __init__(
self,
keys: KeysCollection,
ops: Sequence[Union[str, Callable]],
key_prefix: str,
mask_keys: Optional[KeysCollection] = None,
channel_wise: bool = False,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.stats = IntensityStats(ops=ops, key_prefix=key_prefix, channel_wise=channel_wise)
self.mask_keys = ensure_tuple_rep(None, len(self.keys)) if mask_keys is None else ensure_tuple(mask_keys)
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
def __call__(self, data) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key, mask_key, meta_key, meta_key_postfix in self.key_iterator(
d, self.mask_keys, self.meta_keys, self.meta_key_postfix
):
meta_key = meta_key or f"{key}_{meta_key_postfix}"
d[key], d[meta_key] = self.stats(
img=d[key],
meta_data=d.get(meta_key),
mask=d.get(mask_key) if mask_key is not None else None,
)
return d
class ToDeviced(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToDevice`.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
keys: KeysCollection,
device: Union[torch.device, str],
allow_missing_keys: bool = False,
**kwargs,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
device: target device to move the Tensor, for example: "cuda:1".
allow_missing_keys: don't raise exception if key is missing.
kwargs: other args for the PyTorch `Tensor.to()` API, for more details:
https://pytorch.org/docs/stable/generated/torch.Tensor.to.html.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToDevice(device=device, **kwargs)
def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class CuCIMd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.CuCIM` for non-randomized transforms.
For randomized transforms of CuCIM use :py:class:`monai.transforms.RandCuCIMd`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in CuCIM package.
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the CuCIM transform.
kwargs: parameters for the CuCIM transform.
Note:
CuCIM transforms only work with CuPy arrays, this transform expects input data to be `cupy.ndarray`.
Users can call `ToCuPy` transform to convert a numpy array or torch tensor to cupy array.
"""
def __init__(
self,
keys: KeysCollection,
name: str,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(keys=keys, allow_missing_keys=allow_missing_keys)
self.trans = CuCIM(name, *args, **kwargs)
def __call__(self, data):
"""
Args:
data: Dict[Hashable, `cupy.ndarray`]
Returns:
Dict[Hashable, `cupy.ndarray`]
"""
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.trans(d[key])
return d
class RandCuCIMd(CuCIMd, RandomizableTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.CuCIM` for randomized transforms.
For deterministic non-randomized transforms of CuCIM use :py:class:`monai.transforms.CuCIMd`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in CuCIM package.
apply_prob: the probability to apply the transform (default=1.0)
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the CuCIM transform.
kwargs: parameters for the CuCIM transform.
Note:
- CuCIM transform only work with CuPy arrays, so this transform expects input data to be `cupy.ndarray`.
Users can call `ToCuPy` transform to convert a numpy array or torch tensor to cupy array.
- If the cuCIM transform is already randomized the `apply_prob` argument has nothing to do with
the randomness of the underlying cuCIM transform. `apply_prob` defines if the transform (either randomized
or non-randomized) being applied randomly, so it can apply non-randomized tranforms randomly but be careful
with setting `apply_prob` to anything than 1.0 when using along with cuCIM's randomized transforms.
- If the random factor of the underlying cuCIM transform is not derived from `self.R`,
the results may not be deterministic. See Also: :py:class:`monai.transforms.Randomizable`.
"""
def __init__(
self,
apply_prob: float = 1.0,
*args,
**kwargs,
) -> None:
CuCIMd.__init__(self, *args, **kwargs)
RandomizableTransform.__init__(self, prob=apply_prob)
def __call__(self, data):
"""
Args:
data: Dict[Hashable, `cupy.ndarray`]
Returns:
Dict[Hashable, `cupy.ndarray`]
"""
self.randomize(data)
if not self._do_transform:
return dict(data)
return super().__call__(data)
IdentityD = IdentityDict = Identityd
AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd
AsChannelLastD = AsChannelLastDict = AsChannelLastd
AddChannelD = AddChannelDict = AddChanneld
EnsureChannelFirstD = EnsureChannelFirstDict = EnsureChannelFirstd
RemoveRepeatedChannelD = RemoveRepeatedChannelDict = RemoveRepeatedChanneld
RepeatChannelD = RepeatChannelDict = RepeatChanneld
SplitChannelD = SplitChannelDict = SplitChanneld
CastToTypeD = CastToTypeDict = CastToTyped
ToTensorD = ToTensorDict = ToTensord
EnsureTypeD = EnsureTypeDict = EnsureTyped
ToNumpyD = ToNumpyDict = ToNumpyd
ToCupyD = ToCupyDict = ToCupyd
ToPILD = ToPILDict = ToPILd
TransposeD = TransposeDict = Transposed
DeleteItemsD = DeleteItemsDict = DeleteItemsd
SelectItemsD = SelectItemsDict = SelectItemsd
SqueezeDimD = SqueezeDimDict = SqueezeDimd
DataStatsD = DataStatsDict = DataStatsd
SimulateDelayD = SimulateDelayDict = SimulateDelayd
CopyItemsD = CopyItemsDict = CopyItemsd
ConcatItemsD = ConcatItemsDict = ConcatItemsd
LambdaD = LambdaDict = Lambdad
LabelToMaskD = LabelToMaskDict = LabelToMaskd
FgBgToIndicesD = FgBgToIndicesDict = FgBgToIndicesd
ClassesToIndicesD = ClassesToIndicesDict = ClassesToIndicesd
ConvertToMultiChannelBasedOnBratsClassesD = (
ConvertToMultiChannelBasedOnBratsClassesDict
) = ConvertToMultiChannelBasedOnBratsClassesd
AddExtremePointsChannelD = AddExtremePointsChannelDict = AddExtremePointsChanneld
TorchVisionD = TorchVisionDict = TorchVisiond
RandTorchVisionD = RandTorchVisionDict = RandTorchVisiond
RandLambdaD = RandLambdaDict = RandLambdad
MapLabelValueD = MapLabelValueDict = MapLabelValued
IntensityStatsD = IntensityStatsDict = IntensityStatsd
ToDeviceD = ToDeviceDict = ToDeviced
CuCIMD = CuCIMDict = CuCIMd
RandCuCIMD = RandCuCIMDict = RandCuCIMd
| 39.642066 | 120 | 0.648841 |
850c4eed4265db8cc45336dd535e6aa16427e73b | 525 | py | Python | Learning Python/practice/prnt, str, num/numbers.py | Magical-Man/Learning-Python | 488347b06be8013ab048963a0a0e9e81995d18b6 | [
"MIT"
] | null | null | null | Learning Python/practice/prnt, str, num/numbers.py | Magical-Man/Learning-Python | 488347b06be8013ab048963a0a0e9e81995d18b6 | [
"MIT"
] | null | null | null | Learning Python/practice/prnt, str, num/numbers.py | Magical-Man/Learning-Python | 488347b06be8013ab048963a0a0e9e81995d18b6 | [
"MIT"
] | null | null | null | print (8 * 8)
#there are several different kinds of math operations and symbols
# + plus adding
#- minus subtracting
# / slash dividing
# * asterisk multiplying
# % modulous dividing with remainder
# < less than true false
# > greater than true false
# <= less than equal true false
# >= greater than equal true false
#Now we will test all of these
print(3 + 3)
print(3 - 3)
print(3 / 3)
print(3 *3)
print(4 % 3)
print(5 > 6)
print(5 < 6)
print(5 <= 6)
print(6 <=6)
| 20.192308 | 65 | 0.607619 |
ec901f7328ecdfde0b3ccc1f2bd5a260f0851fe5 | 1,158 | py | Python | lib/test_bindings.py | pymor/pymor-deal.II | 520b36b42d7e58e8adaefb4c772d36d650f30c27 | [
"BSD-2-Clause"
] | 7 | 2016-05-12T12:15:30.000Z | 2020-06-14T08:06:27.000Z | lib/test_bindings.py | pymor/pymor-deal.II | 520b36b42d7e58e8adaefb4c772d36d650f30c27 | [
"BSD-2-Clause"
] | 8 | 2020-01-24T13:13:22.000Z | 2022-02-24T08:25:22.000Z | lib/test_bindings.py | pymor/pymor-deal.II | 520b36b42d7e58e8adaefb4c772d36d650f30c27 | [
"BSD-2-Clause"
] | 2 | 2019-03-02T14:32:22.000Z | 2021-10-06T09:10:01.000Z | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2018 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pydealii_bindings as dealii
def test_vector():
v = dealii.Vector(10)
u = dealii.Vector(10)
ones = dealii.Vector(10)
for i in range(len(ones)):
ones[i] = 1
w = dealii.Vector(u)
assert u.size() == w.size() == v.size()
v[1] = 3
u[9], u[1] = 3, 3
assert v != u
u[9] = 0
assert v == u
u[1] = 0
# currently not working
# g = iter(u)
# for val in u:
# assert val == 0
u[1:] = dealii.Vector(9)
u[:] = ones
for i in range(len(u)):
assert u[i] == 1
v[:] = np.ones((10,), np.double)
assert v == u
v.axpy(1.1, u)
ddones = dealii.Vector(100)
ddones[:] = np.ones((100,), np.double)
npdd = np.array(ddones, copy=False)
assert np.allclose(npdd, np.ones((100,), dtype=np.double))
npdd += 1.0
ddones /= 2.0
assert np.allclose(npdd, ddones)
if __name__ == "__main__":
test_vector()
| 24.125 | 77 | 0.57772 |
e127733d9795a18762c920210a765e26d847b3b0 | 1,427 | py | Python | blueque/listener.py | ustudio/Blueque | f973c470d6558856bbd7f3bf4d6a3e42d38fce85 | [
"Apache-2.0"
] | 5 | 2016-12-03T23:10:45.000Z | 2018-06-06T17:06:27.000Z | blueque/listener.py | ustudio/Blueque | f973c470d6558856bbd7f3bf4d6a3e42d38fce85 | [
"Apache-2.0"
] | 8 | 2015-06-19T21:32:48.000Z | 2021-01-08T19:27:45.000Z | blueque/listener.py | ustudio/Blueque | f973c470d6558856bbd7f3bf4d6a3e42d38fce85 | [
"Apache-2.0"
] | 1 | 2017-05-18T06:15:17.000Z | 2017-05-18T06:15:17.000Z | from blueque.process_helpers import process_running
import os
import socket
import time
class Listener(object):
def __init__(self, queue, task_factory):
super(Listener, self).__init__()
self._hostname = socket.getfqdn()
self._pid = os.getpid()
self._name = "_".join((self._hostname, str(self._pid)))
self._queue = queue
self._queue.add_listener(self._name)
self._task_factory = task_factory
def _parse_name(self, name):
host, pid = name.rsplit('_', 1)
return host, int(pid)
def listen(self):
while True:
task_id = self._queue.dequeue(self._name)
if task_id is not None:
return self._task_factory(task_id)
else:
time.sleep(1)
def claim_orphan(self):
for listener in self._queue.get_listeners():
host, pid = self._parse_name(listener)
if host != self._hostname:
continue
if pid == self._pid:
continue
if process_running(pid):
continue
if self._queue.remove_listener(listener) == 0:
# already claimed
continue
task_id = self._queue.reclaim_task(listener, self._name)
if task_id is None:
continue
return self._task_factory(task_id)
return None
| 25.482143 | 68 | 0.567624 |
c6ee8b75944aa328e566138154ae36edefdf01bb | 112 | py | Python | deploy/apache/wsgi.py | archman/unicorn-webapp | fac170e228760246c56673587b4a0aa2758adf53 | [
"MIT"
] | 1 | 2018-07-06T16:04:32.000Z | 2018-07-06T16:04:32.000Z | deploy/apache/wsgi.py | archman/unicorn-webapp | fac170e228760246c56673587b4a0aa2758adf53 | [
"MIT"
] | 1 | 2021-11-02T14:12:57.000Z | 2021-11-02T14:12:57.000Z | deploy/apache/wsgi.py | archman/unicorn-webapp | fac170e228760246c56673587b4a0aa2758adf53 | [
"MIT"
] | 1 | 2018-09-27T17:06:56.000Z | 2018-09-27T17:06:56.000Z | #!/usr/bin/env python3
import sys
sys.path.insert(0, '/usr/share/unicorn')
from app import app as application
| 16 | 40 | 0.741071 |
3bf9f9aea79b4940fe9ce236b504ece6711ab2f9 | 2,995 | py | Python | setup.py | serge-sotnyk/inception-external-recommender | ad3c9a5d18d45940b2002c7c72b7d3dcfcca258e | [
"Apache-2.0"
] | null | null | null | setup.py | serge-sotnyk/inception-external-recommender | ad3c9a5d18d45940b2002c7c72b7d3dcfcca258e | [
"Apache-2.0"
] | null | null | null | setup.py | serge-sotnyk/inception-external-recommender | ad3c9a5d18d45940b2002c7c72b7d3dcfcca258e | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
from setuptools import setup, find_packages
# Package meta-data.
NAME = "inception-rec"
DESCRIPTION = "INCEpTION external recommender library in Python"
HOMEPAGE = "https://inception-project.github.io/"
EMAIL = "inception-users@googlegroups.com"
AUTHOR = "The INCEpTION team"
REQUIRES_PYTHON = ">=3.6.0"
install_requires = [
"flask",
"filelock",
"dkpro-cassis>=0.5.0",
"joblib",
]
contrib_dependencies = [
]
test_dependencies = [
"pytest",
"codecov",
"pytest-cov",
]
dev_dependencies = [
"waitress",
"black",
"wget"
]
doc_dependencies = [
"sphinx",
"sphinx-autodoc-typehints",
"sphinx-rtd-theme"
]
extras = {
"test": test_dependencies,
"dev": dev_dependencies,
"doc": doc_dependencies,
"contrib": contrib_dependencies,
}
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if "README.rst" is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package"s __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, "ariadne", "__version__.py")) as f:
exec(f.read(), about)
# Where the magic happens:
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=HOMEPAGE,
packages=find_packages(exclude="tests"),
keywords="uima dkpro inception nlp",
project_urls={
"Bug Tracker": "https://github.com/serge-sotnyk/inception-external-recommender/issues",
"Documentation": "https://github.com/inception-project/inception-external-recommender",
"Source Code": "https://github.com/serge-sotnyk/inception-external-recommender",
},
install_requires=install_requires,
test_suite="tests",
tests_require=test_dependencies,
extras_require=extras,
include_package_data=True,
license="Apache License 2.0",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
"Topic :: Text Processing :: Linguistic"
],
)
| 26.741071 | 95 | 0.668114 |
8de18155b7410b62598a4078529a184027a9cff5 | 1,276 | py | Python | maxarcat_client/test/test_search_body.py | fsvenson/maxarcat | 3b015b73734c274dbc821ac118980dbcd2e36879 | [
"MIT"
] | null | null | null | maxarcat_client/test/test_search_body.py | fsvenson/maxarcat | 3b015b73734c274dbc821ac118980dbcd2e36879 | [
"MIT"
] | null | null | null | maxarcat_client/test/test_search_body.py | fsvenson/maxarcat | 3b015b73734c274dbc821ac118980dbcd2e36879 | [
"MIT"
] | 2 | 2021-02-25T08:43:06.000Z | 2022-02-21T19:18:21.000Z | # coding: utf-8
"""
Maxar Content API - Catalog
The Maxar Content Catalog API implements a STAC-compliant service for searching the Maxar content catalog. __The STAC specification is still under development. When version 1.0 of the STAC specification is released the Content Catalog API will be updated to reflect any changes, some of which will not be backward compatible with this current version.__ For information on STAC see [stacspec.org](https://stacspec.org) # noqa: E501
OpenAPI spec version: 0.9
Contact: DL-Content-Catalog@maxar.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import maxarcat_client
from maxarcat_client.models.search_body import SearchBody # noqa: E501
from maxarcat_client.rest import ApiException
class TestSearchBody(unittest.TestCase):
"""SearchBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSearchBody(self):
"""Test SearchBody"""
# FIXME: construct object with mandatory attributes with example values
# model = maxarcat_client.models.search_body.SearchBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 31.9 | 440 | 0.732759 |
89f98dbbb4de95181e5eddf12f565f1869e397d9 | 622 | py | Python | GUI/dialogs/geometry_dialogs/landing_dialog/landing_gear_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | 7 | 2021-03-17T01:23:28.000Z | 2021-05-06T20:41:21.000Z | GUI/dialogs/geometry_dialogs/landing_dialog/landing_gear_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | null | null | null | GUI/dialogs/geometry_dialogs/landing_dialog/landing_gear_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | null | null | null | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from GUI.tabs.geometry_tabs_.landing_gear.landing_gear_tab import landing_gear_tab
class landing_gear_dialog(QDialog):
def __init__(self):
super().__init__()
self.tab = landing_gear_tab()
self.layout = QFormLayout(self)
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, self)
self.layout.addRow(self.tab)
self.layout.addRow(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
| 31.1 | 82 | 0.694534 |
e65f9534a8d4407674ace98975fddbe39ac28de4 | 271 | py | Python | tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_7/ar_12/test_artificial_128_Quantization_ConstantTrend_7_12_0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_7/ar_12/test_artificial_128_Quantization_ConstantTrend_7_12_0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_7/ar_12/test_artificial_128_Quantization_ConstantTrend_7_12_0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); | 38.714286 | 171 | 0.738007 |
ad98b405c270fb42a3b257fd39f7166d8a93aff4 | 1,602 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/sms/apis/BatchSendRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/sms/apis/BatchSendRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/sms/apis/BatchSendRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class BatchSendRequest(JDCloudRequest):
"""
指定模板群发短信
"""
def __init__(self, parameters, header=None, version="v1"):
super(BatchSendRequest, self).__init__(
'/regions/{regionId}/batchSend', 'POST', header, version)
self.parameters = parameters
class BatchSendParameters(object):
def __init__(self, regionId, templateId, signId, phoneList, ):
"""
:param regionId: Region ID
:param templateId: 模板Id
:param signId: 签名Id
:param phoneList: 群发的国内电话号码,群发时一次最多不要超过200个手机号
"""
self.regionId = regionId
self.templateId = templateId
self.signId = signId
self.phoneList = phoneList
self.params = None
def setParams(self, params):
"""
:param params: (Optional) 短信模板变量对应的数据值,Array格式
"""
self.params = params
| 29.127273 | 75 | 0.682272 |
25afba94385e8aba86bf83e3db52dd52b28a266b | 3,313 | py | Python | main.py | kristerduster/stock-fundamentals | edef9b273968de01cbb79b8efe93b75f4dec2ae9 | [
"MIT"
] | null | null | null | main.py | kristerduster/stock-fundamentals | edef9b273968de01cbb79b8efe93b75f4dec2ae9 | [
"MIT"
] | null | null | null | main.py | kristerduster/stock-fundamentals | edef9b273968de01cbb79b8efe93b75f4dec2ae9 | [
"MIT"
] | null | null | null | #import stock info module from yahoo fin api to use methods
import yahoo_fin.stock_info as si
#import pandas module to use pandas methods
import pandas as pd
#define function (acronym for convert to number) takes object parameter, converts to number, ex: changes 13.56 B to 13,560,000,000
def ctn(x):
x=str(x)
if "B" in x:
x=x.split("B")
x=float(('').join(x))*1000000000
elif "M" in x:
x = x.split("M")
x=float(("").join(x))*1000000
elif "K" in x:
x = x.split("K")
x=float(("").join(x))*1000
elif "T" in x:
x = x.split("T")
x=float(("").join(x))*1000000000000
return(x)
#function takes string parameter ticker, gets stats valuation (dataframe), switches rows and columns, deletes unnecessary columns
def org_stats_valuation(ticker):
df = si.get_stats_valuation(ticker)
df.rename(columns={"Unnamed: 0":"Attribute"},inplace=True)
df=df.set_index('Attribute').transpose()
df['Ticker']=ticker
df=df.set_index('Ticker')
df=df.iloc[0:1]
df = df.drop(['Market Cap (intraday) 5','Enterprise Value 3','Price/Sales (ttm)','PEG Ratio (5 yr expected) 1','Enterprise Value/Revenue 3','Enterprise Value/EBITDA 6'], axis=1)
return df
#function takes string parameter ticker, gets stats (dataframe), switches rows and columns, deletes unnecessary columns
def org_stats(ticker):
df2=si.get_stats(ticker)
df2.columns=['Attribute','Recent']
df2=df2.set_index('Attribute').transpose()
df2['Ticker']=ticker
df2=df2.set_index('Ticker')
df2=df2.iloc[0:2]
df2=df2.drop(df2.columns[0:30],axis=1)
df2=df2.drop(['Return on Assets (ttm)','Diluted EPS (ttm)','Quarterly Earnings Growth (yoy)','Total Cash Per Share (mrq)','Current Ratio (mrq)','Book Value Per Share (mrq)'],axis=1)
df2=df2.drop(df2.columns[4:8],axis=1)
return df2
#function takes string parameter ticker, gets cash flow (dataframe), switches rows and columns, deletes unnecessary columns
def org_cash_flow(ticker):
df3=si.get_cash_flow(ticker)
df3=df3.iloc[1:2,:1]
df3=df3.transpose()
df3['Ticker']=ticker
df3=df3.set_index('Ticker')
df3.rename(columns={"changeInCash":"Net Cash Change"},inplace=True)
return df3
#define new function with one list parameter tickers, sorts ticker data for each ticker in one dataframe, creates new columns of data based on existing columns
def get_fundamentals(tickers):
bigDF=pd.DataFrame()
for ticker in tickers:
df=org_stats_valuation(ticker)
df2=org_stats(ticker)
df3=org_cash_flow(ticker)
DF=pd.concat([df,df2,df3],axis=1)
bigDF=pd.concat([bigDF,DF])
bigDF["Spendings on Expenditures"]=(bigDF["Operating Cash Flow (ttm)"].apply(ctn)-bigDF["Levered Free Cash Flow (ttm)"].apply(ctn))/bigDF["Operating Cash Flow (ttm)"].apply(ctn)
bigDF["Debt/Net Income"]=bigDF["Total Debt (mrq)"].apply(ctn)/bigDF["Net Income Avi to Common (ttm)"].apply(ctn)
bigDF["Dividends and Buyouts"]=(bigDF["Levered Free Cash Flow (ttm)"].apply(ctn)-bigDF["Net Cash Change"])/bigDF["Levered Free Cash Flow (ttm)"].apply(ctn)
bigDF["Free Cash/Revenue"]=bigDF["Levered Free Cash Flow (ttm)"].apply(ctn)/bigDF["Revenue (ttm)"].apply(ctn)
return(bigDF)
get_fundamentals(["INTC","NVDA","MSFT","FB"])
| 48.720588 | 185 | 0.679445 |
0478f3c8bd4203f238622e97f3b2f2f8f49180be | 353 | py | Python | django_import_data/migrations/0018_remove_modelimportattempt_row_data.py | GreenBankObservatory/django-import-data | 80b75f5a1a750c75c1d9f6c759a357cf600d4a5e | [
"MIT"
] | 1 | 2021-09-22T14:37:41.000Z | 2021-09-22T14:37:41.000Z | django_import_data/migrations/0018_remove_modelimportattempt_row_data.py | GreenBankObservatory/django-import-data | 80b75f5a1a750c75c1d9f6c759a357cf600d4a5e | [
"MIT"
] | null | null | null | django_import_data/migrations/0018_remove_modelimportattempt_row_data.py | GreenBankObservatory/django-import-data | 80b75f5a1a750c75c1d9f6c759a357cf600d4a5e | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-07-08 19:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_import_data', '0017_auto_20190708_1504'),
]
operations = [
migrations.RemoveField(
model_name='modelimportattempt',
name='row_data',
),
]
| 19.611111 | 58 | 0.620397 |
3125387b09450e85c9b88c20c03355c47519399a | 984 | py | Python | model-optimizer/mo/front/kaldi/extractors/activation_ext.py | undeadinu/dldt | fbc7a4a710c24def8ab199926a7da90a0394b87d | [
"Apache-2.0"
] | 3 | 2019-07-08T09:03:03.000Z | 2020-09-09T10:34:17.000Z | model-optimizer/mo/front/kaldi/extractors/activation_ext.py | undeadinu/dldt | fbc7a4a710c24def8ab199926a7da90a0394b87d | [
"Apache-2.0"
] | 3 | 2020-11-13T18:59:18.000Z | 2022-02-10T02:14:53.000Z | model-optimizer/mo/front/kaldi/extractors/activation_ext.py | undeadinu/dldt | fbc7a4a710c24def8ab199926a7da90a0394b87d | [
"Apache-2.0"
] | 1 | 2018-12-05T07:38:25.000Z | 2018-12-05T07:38:25.000Z | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.ops.op import Op
class ActivationFrontExtractor(FrontExtractorOp):
op = 'activation'
enabled = True
@staticmethod
def extract(node):
mapping_rule = {
'operation': node.pb.operation
}
Op.get_op_class_by_name('Activation').update_node_stat(node, mapping_rule)
return __class__.enabled
| 29.818182 | 82 | 0.734756 |
37c5ef61e2e4c021eb5d5f919a617987f7f32763 | 215 | py | Python | codigo/Live25/tabulares/ods_parser.py | cassiasamp/live-de-python | 00b5e51793097544ba9b75c97a0d30e63970bf45 | [
"MIT"
] | 572 | 2018-04-03T03:17:08.000Z | 2022-03-31T19:05:32.000Z | codigo/Live25/tabulares/ods_parser.py | cassiasamp/live-de-python | 00b5e51793097544ba9b75c97a0d30e63970bf45 | [
"MIT"
] | 176 | 2018-05-18T15:56:16.000Z | 2022-03-28T20:39:07.000Z | codigo/Live25/tabulares/ods_parser.py | cassiasamp/live-de-python | 00b5e51793097544ba9b75c97a0d30e63970bf45 | [
"MIT"
] | 140 | 2018-04-18T13:59:11.000Z | 2022-03-29T00:43:49.000Z | import ezodf
doc = ezodf.opendoc('episodios.ods')
# type(doc)
folhas = list(doc.sheets.names())
ep_folha = doc.sheets[folhas[0]]
linhas = sum(list(ep_folha.rows()), [])
print(list(map(lambda x: x.value, linhas)))
| 21.5 | 43 | 0.693023 |
1bbd77a24d056c3d22878fd6737b4541a3266094 | 886 | py | Python | isi_sdk_8_1_1/test/test_mapping_import.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_1/test/test_mapping_import.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_1/test/test_mapping_import.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.mapping_import import MappingImport # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestMappingImport(unittest.TestCase):
"""MappingImport unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMappingImport(self):
"""Test MappingImport"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.mapping_import.MappingImport() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.609756 | 83 | 0.699774 |
ba1cce940083a1e90b7891cd1938ab449ef8d4d3 | 566 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractBierutranslationsHomeBlog.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractBierutranslationsHomeBlog.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractBierutranslationsHomeBlog.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z |
def extractBierutranslationsHomeBlog(item):
'''
Parser for 'bierutranslations.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 25.727273 | 104 | 0.64311 |
2f1bc58eef4fedc834bc1db3c7fd47178424ce04 | 21,036 | py | Python | commands/moderation.py | OxLemonxO/RobTheBoat | a556ad76665f23b83421ce6a62f3170cf8850508 | [
"MIT"
] | null | null | null | commands/moderation.py | OxLemonxO/RobTheBoat | a556ad76665f23b83421ce6a62f3170cf8850508 | [
"MIT"
] | null | null | null | commands/moderation.py | OxLemonxO/RobTheBoat | a556ad76665f23b83421ce6a62f3170cf8850508 | [
"MIT"
] | null | null | null | import asyncio
from discord.ext import commands
from utils.mysql import *
from utils.channel_logger import Channel_Logger
from utils.tools import *
from utils import checks
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.logger = Channel_Logger(bot)
@checks.server_mod_or_perms(kick_members=True)
@commands.command()
async def kick(self, ctx, user:discord.Member):
"""Kicks the specified user from the server"""
try:
await ctx.guild.kick(user)
await ctx.send("Finally kicked `{}`.".format(user))
except discord.errors.Forbidden:
if user.top_role.position == ctx.me.top_role.position:
await ctx.send("Did you ever know that I can't really kick that user just simply because it's on the same role level as me?")
elif user.top_role.position > ctx.me.top_role.position:
await ctx.send("HEEEEEEEEEEEYYYYYYYY, I can't kick them! They're higher than me.")
else:
await ctx.send("I don't have the `Kick Members` permission...")
@checks.server_mod_or_perms(ban_members=True)
@commands.command()
async def ban(self, ctx, user:discord.Member, *, reason:str=None):
"""Bans the specified user from the server"""
if reason is None:
reason = "No reason was specified"
reason += "**\n\n**Banned by {}".format(ctx.author)
try:
await ctx.guild.ban(user, delete_message_days=0, reason=reason)
except discord.errors.Forbidden:
if user.top_role.position == ctx.me.top_role.position:
await ctx.send("Can't ban someone if they're on the same level as I am. ")
elif user.top_role.position > ctx.me.top_role.position:
await ctx.send("I can't ban someone if they're higher than me, fool!")
else:
await ctx.send("Can't smash someone with a ban hammer if I don't even have the `Ban Members` permission.")
return
await ctx.send("Successfully banned `{}`".format(user))
@checks.server_mod_or_perms(ban_members=True)
@commands.command()
async def unban(self, ctx, *, username:str):
"""Unbans the user with the specifed name from the server"""
try:
banlist = await ctx.guild.bans()
except discord.errors.Forbidden:
await ctx.send("Hey. Uh. Sorry to break it to you, but I don't have the `Ban Members` permission, which also allows to unban.")
return
user = None
for ban in banlist:
if ban.user.name == username:
user = ban.user
if user is None:
await ctx.send("For somewhat reason, `{}` isn't on the banlist.".format(username))
return
await ctx.guild.unban(user)
await ctx.send("Pardoned `{}`.".format(user))
@checks.server_mod_or_perms(ban_members=True)
@commands.command()
async def hackban(self, ctx, id:int, *, reason:str=None):
"""Bans the user with the specified id from the server"""
if reason is None:
reason = "No reason was specified"
reason += "**\n\n**Banned by {}".format(ctx.author)
try:
await self.bot.http.ban(id, ctx.guild.id, delete_message_days=0, reason=reason)
except discord.errors.HTTPException or discord.errors.NotFound:
await ctx.send("No Discord Member exists with the ID of `{}`".format(id))
return
except discord.errors.Forbidden:
await ctx.send("Can't really ban someone with an ID without the goddamn `Ban Members` permission.")
return
banlist = await ctx.guild.bans()
for ban in banlist:
if ban.user.id == id:
user = ban.user
await ctx.send("Successfully banned `{}`".format(user))
@commands.command()
async def banlist(self, ctx):
"""Displays the server's banlist"""
try:
banlist = await ctx.guild.bans()
except discord.errors.Forbidden:
await ctx.send("Can't list them without the `Ban Members` permission.")
return
bancount = len(banlist)
display_bans = []
bans = None
if bancount == 0:
bans = "Hooray! No one's banned."
else:
for ban in banlist:
if len(", ".join(display_bans)) < 1800:
display_bans.append(str(ban.user))
else:
bans = ", ".join(display_bans) + "\n... and {} more".format(len(banlist) - len(display_bans))
break
if not bans:
bans = ", ".join(display_bans)
await ctx.send("Total bans: `{}`\n```{}```".format(bancount, bans))
@checks.server_mod_or_perms(manage_roles=True)
@commands.command()
async def mute(self, ctx, user:discord.Member, *, reason:str=None):
"""Mutes the specified user"""
if reason is None:
reason = "No reason was specified"
reason += "**\n\n**Muted by {}".format(ctx.author)
mute_role_name = read_data_entry(ctx.guild.id, "mute-role")
mute_role = discord.utils.get(ctx.guild.roles, name=mute_role_name)
if mute_role is None:
await ctx.send("wyd I can't find the `{}`".format(mute_role_name))
return
try:
await user.add_roles(mute_role, reason=reason)
await ctx.send("Hushed `{}`".format(user))
except discord.errors.Forbidden:
if mute_role.position == ctx.me.top_role.position:
await ctx.send("Why did you mute me, fool?")
elif mute_role.position > ctx.me.top_role.position:
await ctx.send("I can't add the mute role because it's somehow higher than me? You better well damn fix that if you want to use this.")
else:
await ctx.send("I'm missing that one permission to actually remove and add roles. Oh wait, it's the `Manage Roles` permission.")
@checks.server_mod_or_perms(manage_roles=True)
@commands.command()
async def unmute(self, ctx, user:discord.Member):
"""Unmutes the specified user"""
mute_role_name = read_data_entry(ctx.guild.id, "mute-role")
mute_role = discord.utils.get(ctx.guild.roles, name=mute_role_name)
if mute_role is None:
await ctx.send("I could not find any role named `{}`".format(mute_role_name))
return
try:
await user.remove_roles(mute_role, reason="Unmuted by {}".format(ctx.author))
await ctx.send("Successfully unmuted `{}`".format(user))
except discord.errors.Forbidden:
if mute_role.position == ctx.me.top_role.position:
await ctx.send("WHY'D YOU MUTE ME THOT")
elif mute_role.position > ctx.me.top_role.position:
await ctx.send("I can't remove the mute if the role is higher than me...")
else:
await ctx.send("I'm missing that one permission to actually remove and add roles. Oh wait, it's the `Manage Roles` permission.")
@checks.server_mod_or_perms(manage_messages=True)
@commands.command(aliases=['p', 'purge', '🇵', '🅿'])
async def prune(self, ctx, amount:int):
"""Mass deletes a specified amount of messages"""
try:
await ctx.message.delete()
except discord.errors.Forbidden:
await ctx.send("And you would expect me to mass delete messages without manage messages? I wonder why'd you think that one...")
return
deleted = await ctx.channel.purge(limit=amount)
deleted_message = await ctx.send("{} Deleted {} messages".format(ctx.author.mention, len(deleted)))
await asyncio.sleep(10)
# The try and except pass is so in the event a user prunes again or deletes the prune notification before the bot automatically does it, it will not raise an error
try:
await deleted_message.delete()
except:
pass
@checks.server_mod_or_perms(manage_messages=True)
@commands.command()
async def pin(self, ctx, id:int):
"""Pins the message with the specified ID to the channel"""
try:
message = await ctx.channel.get_message(id)
except discord.errors.NotFound:
await ctx.send("Can't find the message ID `{}`".format(id))
return
try:
await message.pin()
except discord.errors.Forbidden:
await ctx.send("Can't pin it to the wall without `Manage Messages`")
@checks.server_mod_or_perms(manage_messages=True)
@commands.command()
async def unpin(self, ctx, id:int):
"""Unpins the message with the specified ID from the channel"""
pinned_messages = await ctx.channel.pins()
message = discord.utils.get(pinned_messages, id=id)
if message is None:
await ctx.send("Can't find the message ID `{}`".format(id))
return
try:
await message.unpin()
await ctx.send("Successfully unpinned the message!")
except discord.errors.Forbidden:
await ctx.send("`Can't unpin it from the wall without `Manage Messages`")
#i got REALLY fucking tired of editing every single Manage Role perm here
@checks.server_admin_or_perms(manage_roles=True)
@commands.command()
async def addrole(self, ctx, user:discord.Member, *, name:str):
"""Adds the specified role to the specified user"""
role = discord.utils.get(ctx.guild.roles, name=name)
if role is None:
await ctx.send("No role with the name of `{}` was found on this server".format(name))
return
try:
await user.add_roles(role, reason="The role \"{}\" was added by {}".format(role.name, ctx.author))
await ctx.send("Successfully added the `{}` role to `{}`".format(name, user))
except discord.errors.Forbidden:
if role.position == ctx.me.top_role.position:
await ctx.send("I can't add the highest role I have to other users. Sorry, not my rules. Blame Discord.")
elif role.position > ctx.me.top_role.position:
await ctx.send("Can't add roles that are definitely higher than the top one I already have.")
else:
await ctx.send("I do not have the `Manage Roles` permission")
@checks.server_admin_or_perms(manage_roles=True)
@commands.command()
async def removerole(self, ctx, user:discord.Member, *, name:str):
"""Removes the specified role from the specified user"""
role = discord.utils.get(ctx.guild.roles, name=name)
if role is None:
await ctx.send("No role with the name of `{}` was found on this server".format(name))
return
try:
await user.remove_roles(role, reason="The role \"{}\" was removed by {}".format(role.name, ctx.author))
await ctx.send("Successfully removed the `{}` role from `{}`".format(name, user))
except discord.errors.Forbidden:
if role.position == ctx.me.top_role.position:
await ctx.send("I can't remove the highest role I have to other users. Sorry, not my rules. Blame Discord.")
elif role.position > ctx.me.top_role.position:
await ctx.send("Can't remove roles that are definitely higher than the top one I already have.")
else:
await ctx.send("I do not have the `Manage Roles` permission")
@checks.server_admin_or_perms(manage_roles=True)
@commands.command()
async def createrole(self, ctx, *, name:str):
"""Creates a role with the specified name"""
try:
await ctx.guild.create_role(name=name, reason="Created by {}".format(ctx.author), permissions=ctx.guild.default_role.permissions)
await ctx.send("Made a role named `{}`".format(name))
except discord.errors.Forbidden:
await ctx.send("I do not have the `Manage Roles` permission")
@checks.server_admin_or_perms(manage_roles=True)
@commands.command()
async def deleterole(self, ctx, *, name:str):
"""Deletes the role with the specified name"""
role = discord.utils.get(ctx.guild.roles, name=name)
if role is None:
await ctx.send("Can't find the role named `{}`".format(name))
return
try:
await role.delete(reason="Deleted by {}".format(ctx.author))
await ctx.send("Trashed out the role `{}`".format(name))
except discord.errors.Forbidden:
if role.position == ctx.me.top_role.position:
await ctx.send("I can't delete my own highest role, dingus.")
elif role.position > ctx.me.top_role.position:
await ctx.send("I can't delete any roles higher than the one I have, dork.")
else:
await ctx.send("I do not have the `Manage Roles` permission")
@checks.server_admin_or_perms(manage_roles=True)
@commands.command()
async def editrole(self, ctx, type:str, value:str, *, name:str):
"""Edits a role with the specified name"""
role = discord.utils.get(ctx.guild.roles, name=name)
if role is None:
await ctx.send("There isn't a role named `{}` anywhere on the server...".format(name))
return
if type == "color":
if value != "remove":
try:
color = discord.Color(value=int(value.strip("#"), 16))
except:
await ctx.send("`{}` isn't a valid color. Better be using hexadecimal color codes! (Ex: #FF0000)".format(value))
return
else:
color = discord.Color.default()
try:
await role.edit(reason="Edited by {}".format(ctx.author), color=color)
await ctx.send("Edited the role named `{}`".format(name))
except discord.errors.Forbidden:
if role.position == ctx.me.top_role.position:
await ctx.send("Can't even touch my highest role.")
elif role.position > ctx.me.top_role.position:
await ctx.send("Can't edit it because it's higher than my highest role (this gets annoying if you've seen it 95 times)")
else:
await ctx.send("I do not have the `Manage Roles` permission")
except discord.errors.NotFound:
# Don't ask, for some reason if the role is higher than the bot's highest role it returns a NotFound 404 error
await ctx.send("That role is higher than my highest role (HOH) (HOH)")
elif type == "permissions":
try:
perms = discord.Permissions(permissions=int(value))
except:
await ctx.send("`{}` is not a valid permission number! If you need help finding the permission number, then go to <http://creeperseth.com/discordpermcalc> for a permission calculator!".format(value))
return
try:
await role.edit(reason="Edited by {}".format(ctx.author), permissions=perms)
await ctx.send("Edited the role named `{}`".format(name))
except discord.errors.Forbidden:
await ctx.send("I either do not have the `Manage Roles` permission")
except discord.errors.NotFound:
await ctx.send("That role is higher than my highest role (HOH)")
elif type == "position":
try:
pos = int(value)
except:
await self.bot.send_message(ctx.channel, "`" + value + "` is not a valid number")
return
if pos >= ctx.guild.me.top_role.position:
await ctx.send("That number is not lower than my highest role's position. My highest role's permission is `{}`".format(ctx.guild.me.top_role.position))
return
try:
if pos <= 0:
pos = 1
await role.edit(reason="Moved by {}".format(ctx.author), position=pos)
await ctx.send("Edited the role named `{}`".format(name))
except discord.errors.Forbidden:
await ctx.send("I do not have the `Manage Roles` permission")
except discord.errors.NotFound:
await ctx.send("That role is higher than my highest role (HOH)")
elif type == "separate":
try:
bool = convert_to_bool(value)
except ValueError:
await ctx.send("`{}` is not a valid boolean".format(value))
return
try:
await role.edit(reason="Edited by {}".format(ctx.author), hoist=bool)
await ctx.send("Edited the role named `{}`".format(name))
except discord.errors.Forbidden:
await ctx.send("I do not have the `Manage Roles` permission or that role is not lower than my highest role.")
elif type == "mentionable":
try:
bool = convert_to_bool(value)
except ValueError:
await ctx.send("`{}` is not a valid boolean".format(value))
return
try:
await role.edit(reason="Edited by {}".format(ctx.author), mentionable=bool)
await ctx.send("Edited the role named `{}`".format(name))
except discord.errors.Forbidden:
await ctx.send("I do not have the `Manage Roles` permission")
except discord.errors.NotFound:
await ctx.send("That role is higher than my highest role (HOH)")
else:
await ctx.send("Invalid type specified, valid types are `color`, `permissions`, `position`, `separate`, and `mentionable`")
@checks.server_admin_or_perms(manage_roles=True)
@commands.command()
async def renamerole(self, ctx, name:str, newname:str):
"""Renames a role with the specified name, be sure to put double quotes (\") around the name and the new name"""
role = discord.utils.get(ctx.guild.roles, name=name)
if role is None:
await ctx.send("No role was found on this server with the name of `{}`".format(name))
return
try:
await role.edit(reason="Renamed by {}".format(ctx.author), name=newname)
await ctx.send("Successfully renamed the `{}` role to `{}`".format(name, newname))
except discord.errors.Forbidden:
if role.position == ctx.me.top_role.position:
await ctx.send("I can't change my name of my own highest role. Do it yourself manually, you lazy child.")
elif role.position > ctx.me.top_role.position:
await ctx.send("boooooooooooooooooooooo icantchangeitbecauseitshigherthanmyhighestroooooooooole")
else:
await ctx.send("I do not have the `Manage Roles` permission")
@checks.server_mod_or_perms(ban_members=True)
@commands.command()
async def massban(self, ctx, *, ids:str):
"""Mass bans users by ids (separate ids with spaces pls)"""
await ctx.channel.trigger_typing()
ids = ids.split(" ")
failed_ids = []
success = 0
for id in ids:
try:
await self.bot.http.ban(id, ctx.guild.id, delete_message_days=0)
success += 1
except:
failed_ids.append("`{}`".format(id))
if len(failed_ids) != 0:
await ctx.send("I couldn't ban the following ID(s): {}".format(", ".join(ids)))
await ctx.send("I mass banned successfully {} out of {} users.".format(success, len(ids)))
@checks.server_mod_or_perms(manage_messages=True)
@commands.command()
async def removereactions(self, ctx, id:int):
"""Clear reactions from a message"""
try:
message = await ctx.channel.get_message(id)
except discord.errors.NotFound:
await ctx.send("I can't find that one message ID (`{}`) with the reactions on it".format(id))
return
try:
await message.clear_reactions()
await ctx.send("Removed the shitty reactions from the message.")
except discord.errors.Forbidden:
await ctx.send("I don't have the `Manage Messages` permission to remove them shit reactions.")
def setup(bot):
bot.add_cog(Moderation(bot))
| 50.934625 | 216 | 0.58918 |
1867db84d8f3ff1a9adac3a2c8e66cd1823f7516 | 13,455 | py | Python | Predictions/csv/last_rows.py | janithmehta/StockMarketPrediction | 5f85b81289ad599bd30b5cd8555eec0f7bfb509d | [
"MIT"
] | 18 | 2018-02-01T09:41:16.000Z | 2022-01-13T06:53:56.000Z | Predictions/csv/wrong_csv/last_rows.py | darbary/StockMarketPrediction | 5f85b81289ad599bd30b5cd8555eec0f7bfb509d | [
"MIT"
] | 1 | 2018-08-08T06:29:28.000Z | 2018-08-15T07:10:49.000Z | Predictions/csv/wrong_csv/last_rows.py | darbary/StockMarketPrediction | 5f85b81289ad599bd30b5cd8555eec0f7bfb509d | [
"MIT"
] | 10 | 2017-05-09T06:25:56.000Z | 2021-01-05T23:17:58.000Z | import pandas as pd
import statistics as st
import numpy as np
list1=['AMBUJACEM','ASIANPAINT','BANKBARODA','HDIL','HEROMOTOCO','HINDUNILVR','ITC','INFY','TCS','MARUTI']
for m in list1:
name=m+'.csv'
df=pd.read_csv(name)
#df=df[::-1]
#For Old format
'''
df=df.replace([np.inf,-np.inf],np.nan)
df=df.replace('#DIV/0!',np.nan)
df=df.replace('null',np.nan)
df=df.dropna()
df['X1'] = 0.00
df['X2'] = 0.00
df['X3'] = 0.00
df['X4'] = 0.00
df['X5'] = 0.00
df['X6'] = 0.00
df['X7'] = 0.00
df['X8'] = 0.00
df['X9'] = 0.00
df['X10'] = 0.00
df['X11'] = 0.00
df['X12'] = 0.00
df['X13'] = 0.00
df['X14'] = 0.00
df['X15'] = 0.00
df['X16']=0.00
df['X17']=0.00
df['X18']=0.00
df['X19']=0.00
df['X20']=0.00
df['X21']=0.00
df['X22']=0.00
df['X23']=0.00
df['X24']=0.00
df['K5']=0.00
df['K10']=0.00
df['K15']=0.00
df['K20']=0.00
df['M1']=0.00
df['M5']=0.00
df['M10']=0.00
df['M15']=0.00
df['M20']=0.00
df['One Day Momentum']=0.00
df['Five Day Momentum']=0.00
df['Ten Day Momentum']=0.00
df['Fifteen Day Momentum']=0.00
df['Twenty Day Momentum']=0.00
df['Next Day Price']=0.00
df['5 Day Price']=0.00
df['10 Day Price']=0.00
df['15 Day Price']=0.00
df['20 Day Price']=0.00
df['One Day Change']=0.00
df['Five Day Change']=0.00
df['Ten Day Change']=0.00
df['Fifteen Day Change']=0.00
df['Twenty Day Change']=0.00
df['One Day Trend']=0.00
df['Five Day Trend']=0.00
df['Ten Day Trend']=0.00
df['Fifteen Day Trend']=0.00
df['Twenty Day Trend']=0.00
df['Close']=df['Close'].astype(float)
df['High']=df['High'].astype(float)
df['Low']=df['Low'].astype(float)
'''
for i in range(len(df)-20,len(df)):
df['X1'][i]=(df['Close'][i]-df['Close'][i-1])/df['Close'][i-1]
ma5=0.0
for j in range(1,6):
ma5+=df['Close'][i-j]
ma5=ma5/5
df['X2'][i]=(df['Close'][i]-ma5)/ma5
ma10=0.0
for j in range(1,11):
ma10+=df['Close'][i-j]
ma10=ma10/10
df['X3'][i]=(df['Close'][i]-ma10)/ma10
ma15=0.0
for j in range(1,16):
ma15+=df['Close'][i-j]
ma15=ma15/15
df['X4'][i]=(df['Close'][i]-ma15)/ma15
ma20=0.0
for j in range(1,21):
ma20+=df['Close'][i-j]
ma20=ma20/20
df['X5'][i]=(df['Close'][i]-ma20)/ma20
ma25=0.0
for j in range(1,26):
ma25+=df['Close'][i-j]
ma25=ma25/25
df['X6'][i]=(df['Close'][i]-ma25)/ma25
ma30=0.0
for j in range(1,31):
ma30+=df['Close'][i-j]
ma30=ma30/30
df['X7'][i]=(df['Close'][i]-ma30)/ma30
ma35=0.0
for j in range(1,36):
ma35+=df['Close'][i-j]
ma35=ma35/35
df['X8'][i]=(df['Close'][i]-ma35)/ma35
ma40=0.0
for j in range(1,41):
ma40+=df['Close'][i-j]
ma40=ma40/40
df['X9'][i]=(df['Close'][i]-ma40)/ma40
ub10=0.0
#print(df['Close'][i-1:i-11])
ub10=ma10+0.02*st.pstdev(df['Close'][i-11:i-1])
lb10=0.0
lb10=ma10-0.02*st.pstdev(df['Close'][i-11:i-1])
ub20=0.0
ub20=ma20+0.02*st.pstdev(df['Close'][i-21:i-1])
lb20=0.0
lb20=ma20-0.02*st.pstdev(df['Close'][i-21:i-1])
ub30=0.0
ub30=ma30+0.02*st.pstdev(df['Close'][i-31:i-1])
lb30=0.0
lb30=ma30-0.02*st.pstdev(df['Close'][i-31:i-1])
if df['Close'][i]>ub10:
df['X10'][i]=df['Close'][i]-ub10
elif df['Close'][i]<lb10:
df['X10'][i]=df['Close'][i]-lb10
if df['Close'][i]>ub20:
df['X11'][i]=df['Close'][i]-ub20
elif df['Close'][i]<lb20:
df['X11'][i]=df['Close'][i]-lb20
if df['Close'][i]>ub30:
df['X12'][i]=df['Close'][i]-ub30
elif df['Close'][i]<lb30:
df['X12'][i]=df['Close'][i]-lb30
rs5=0.0
change=0.0
gain=0.0
loss=0.0
for j in range(0,5):
change=df['Close'][i-j]-df['Close'][i-j-1]
if change>0.0:
gain+=change
elif change<0.0:
loss+=(-1*change)
if loss==0.0:
rsi5=100.0
else:
rs5=gain/loss
rsi5=0.0
rsi5=100.0-100.0/(1+rs5)
df['X13'][i]=(rsi5-50.0)/50.0
rs10=0.0
change=0.0
gain=0.0
loss=0.0
for j in range(0,10):
change=df['Close'][i-j]-df['Close'][i-j-1]
if change>0.0:
gain+=change
elif change<0.0:
loss+=(-1*change)
if loss==0.0:
rsi10=100
else:
rs10=gain/loss
rsi10=0.0
rsi10=100.00-100.00/(1+rs10)
df['X14'][i]=(rsi10-50.00)/50.00
rs15=0.0
change=0.0
gain=0.0
loss=0.0
for j in range(0,15):
change=df['Close'][i-j]-df['Close'][i-j-1]
if change>0.0:
gain+=change
elif change<0.0:
loss+=(-1*change)
if loss==0.0:
rsi15=100.00
else:
rs15=gain/loss
rsi15=0.0
rsi15=100.00-100.00/(1+rs15)
df['X15'][i]=(rsi15-50.00)/50.00
rs20=0.0
change=0.0
gain=0.0
loss=0.0
for j in range(0,20):
change=df['Close'][i-j]-df['Close'][i-j-1]
if change>0.0:
gain+=change
elif change<0.0:
loss+=(-1*change)
if loss==0.0:
rsi20=100.00
else:
rs20=gain/loss
rsi20=0.0
rsi20=100.00-100.00/(1+rs20)
df['X16'][i]=(rsi20-50.00)/50.00
min_low_price=1000000
max_high_price=0
for j in range(1,6):
min_low_price=min(min_low_price,df['Low'][i-j])
max_high_price=max(max_high_price,df['High'][i-j])
k5=100*((df['Close'][i]-min_low_price)/(max_high_price-min_low_price))
df['K5'][i] = k5
df['X17'][i]=(k5-50)/50
min_low_price=1000000
max_high_price=0
for j in range(1,11):
min_low_price=min(min_low_price,df['Low'][i-j])
max_high_price=max(max_high_price,df['High'][i-j])
k10=100*((df['Close'][i]-min_low_price)/(max_high_price-min_low_price))
df['K10'][i] = k10
df['X18'][i]=(k10-50)/50
min_low_price=1000000
max_high_price=0
for j in range(1,16):
min_low_price=min(min_low_price,df['Low'][i-j])
max_high_price=max(max_high_price,df['High'][i-j])
k15=100*((df['Close'][i]-min_low_price)/(max_high_price-min_low_price))
df['K15'][i]=k15
df['X19'][i]=(k15-50)/50
min_low_price=1000000
max_high_price=0
for j in range(1,21):
min_low_price=min(min_low_price,df['Low'][i-j])
max_high_price=max(max_high_price,df['High'][i-j])
k20=100*((df['Close'][i]-min_low_price)/(max_high_price-min_low_price))
df['K20'][i]=k20
df['X20'][i]=(k20-50)/50
#Newly added Code on 22/1/17
#new code 21/2/17
'''
if(i-1>-1):
df['Next Day Price'][i]=df['Close'][i+1]
if(i-5>-1):
df['5 Day Price'][i]=df['Close'][i+5]
if(i-10>-1):
df['10 Day Price'][i]=df['Close'][i+10]
if(i-15>-1):
df['15 Day Price'][i]=df['Close'][i+15]
if(i-20>-1):
df['20 Day Price'][i]=df['Close'][i+20]
'''
df['M1'][i]=(df['Close'][i]-df['Close'][i-1])/(df['Close'][i-1])
df['M5'][i]=(df['Close'][i]-df['Close'][i-5])/(df['Close'][i-5])
df['M10'][i]=(df['Close'][i]-df['Close'][i-10])/(df['Close'][i-10])
df['M15'][i]=(df['Close'][i]-df['Close'][i-15])/(df['Close'][i-15])
df['M20'][i]=(df['Close'][i]-df['Close'][i-20])/(df['Close'][i-20])
if df['M1'][i]>0:
df['One Day Momentum'][i]=1
else:
df['One Day Momentum'][i]=-1
if df['M5'][i]>0:
df['Five Day Momentum'][i]=1
else:
df['Five Day Momentum'][i]=-1
if df['M10'][i]>0:
df['Ten Day Momentum'][i]=1
else:
df['Ten Day Momentum'][i]=-1
if df['M15'][i]>0:
df['Fifteen Day Momentum'][i]=1
else:
df['Fifteen Day Momentum'][i]=-1
if df['M20'][i]>0:
df['Twenty Day Momentum'][i]=1
else:
df['Twenty Day Momentum'][i]=-1
df['One Day Change'][i]=(df['Close'][i-1]-df['Close'][i])/(df['Close'][i])
if df['One Day Change'][i]>0:
df['One Day Trend'][i]=1
else:
df['One Day Trend'][i]=-1
df['Five Day Change'][i]=(df['Close'][i-5]-df['Close'][i])/(df['Close'][i])
if df['Five Day Change'][i]>0:
df['Five Day Trend'][i]=1
else:
df['Five Day Trend'][i]=-1
df['Ten Day Change'][i]=(df['Close'][i-10]-df['Close'][i])/(df['Close'][i])
if df['Ten Day Change'][i]>0:
df['Ten Day Trend'][i]=1
else:
df['Ten Day Trend'][i]=-1
df['Fifteen Day Change'][i]=(df['Close'][i-15]-df['Close'][i])/(df['Close'][i])
if df['Fifteen Day Change'][i]>0:
df['Fifteen Day Trend'][i]=1
else:
df['Fifteen Day Trend'][i]=-1
df['Twenty Day Change'][i]=(df['Close'][i-20]-df['Close'][i])/(df['Close'][i])
if df['Twenty Day Change'][i]>0:
df['Twenty Day Trend'][i]=1
else:
df['Twenty Day Trend'][i]=-1
print("Loop1")
for i in range(len(df)-20,len(df)):
d5=0.0
for j in range(1,6):
d5+=df['K5'][i-j]
d5=d5/5
df['X21'][i]=(df['K5'][i]-d5-50)/50
d10=0.0
for j in range(1,11):
d10+=df['K10'][i-j]
d10=d10/10
df['X22'][i]=(df['K10'][i]-d10-50)/50
d15=0.0
for j in range(1,16):
d15+=df['K15'][i-j]
d15=d15/15
df['X23'][i]=(df['K15'][i]-d15-50)/50
d20=0.0
for j in range(1,21):
d20+=df['K20'][i-j]
d20=d20/20
df['X24'][i]=(df['K20'][i]-d20-50)/50
print("loop2")
#df=df.ix[20:]
#df=df.ix[:-20]
#df=df.drop(df.index[0:41])
#df=df.drop(df.index[790:len(df['Date'])])
df.drop('Unnamed: 0', axis=1, inplace=True)
df.to_csv(name)
| 32.188995 | 106 | 0.365292 |
3102c6d711ac87d1ac5d6160c439fed7b2e76845 | 1,740 | py | Python | setup.py | ihmeuw/vivarium_conic_vitamin_a_supp_gbd2019 | 5cd99c9fad9d93b69801e82835dfb1f843e7782a | [
"BSD-3-Clause"
] | null | null | null | setup.py | ihmeuw/vivarium_conic_vitamin_a_supp_gbd2019 | 5cd99c9fad9d93b69801e82835dfb1f843e7782a | [
"BSD-3-Clause"
] | null | null | null | setup.py | ihmeuw/vivarium_conic_vitamin_a_supp_gbd2019 | 5cd99c9fad9d93b69801e82835dfb1f843e7782a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
if __name__ == "__main__":
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "src")
about = {}
with open(os.path.join(src_dir, "vivarium_conic_vitamin_a_supp_gbd2019", "__about__.py")) as f:
exec(f.read(), about)
with open(os.path.join(base_dir, "README.rst")) as f:
long_description = f.read()
install_requirements = [
'vivarium==0.10.10',
'vivarium_public_health==0.10.14',
'click',
'gbd_mapping>=3.0.0, <4.0.0',
'jinja2',
'loguru',
'numpy',
'pandas',
'scipy',
'tables',
'pyyaml',
]
# use "pip install -e .[dev]" to install required components + extra components
extras_require = [
'vivarium_cluster_tools==1.2.10',
'vivarium_inputs[data]==4.0.4',
]
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__summary__'],
long_description=long_description,
license=about['__license__'],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
package_dir={'': 'src'},
packages=find_packages(where='src'),
include_package_data=True,
install_requires=install_requirements,
extras_require={
'dev': extras_require,
},
zip_safe=False,
entry_points='''
[console_scripts]
make_artifacts=vivarium_conic_vitamin_a_supp_gbd2019.tools.cli:make_artifacts
make_results=vivarium_conic_vitamin_a_supp_gbd2019.tools.cli:make_results
'''
)
| 25.217391 | 99 | 0.597126 |
eff41738ab63d16911e60f00eb5f48d7e14e13a4 | 3,393 | py | Python | third_party/tests/YosysTestSuite/rpc/frontend.py | parzival3/Surelog | cf126533ebfb2af7df321057af9e3535feb30487 | [
"Apache-2.0"
] | 156 | 2019-11-16T17:29:55.000Z | 2022-01-21T05:41:13.000Z | third_party/tests/YosysTestSuite/rpc/frontend.py | parzival3/Surelog | cf126533ebfb2af7df321057af9e3535feb30487 | [
"Apache-2.0"
] | 414 | 2021-06-11T07:22:01.000Z | 2022-03-31T22:06:14.000Z | third_party/tests/YosysTestSuite/rpc/frontend.py | parzival3/Surelog | cf126533ebfb2af7df321057af9e3535feb30487 | [
"Apache-2.0"
] | 30 | 2019-11-18T16:31:40.000Z | 2021-12-26T01:22:51.000Z | def modules():
return ["python_inv"]
def derive(module, parameters):
assert module == r"python_inv"
if parameters.keys() != {r"\width"}:
raise ValueError("Invalid parameters")
return "ilang", r"""
module \impl
wire width {width:d} input 1 \i
wire width {width:d} output 2 \o
cell $neg $0
parameter \A_SIGNED 1'0
parameter \A_WIDTH 32'{width:b}
parameter \Y_WIDTH 32'{width:b}
connect \A \i
connect \Y \o
end
end
module \python_inv
wire width {width:d} input 1 \i
wire width {width:d} output 2 \o
cell \impl $0
connect \i \i
connect \o \o
end
end
""".format(width=parameters[r"\width"])
# ----------------------------------------------------------------------------
import json
import argparse
import sys, socket, os
try:
import msvcrt, win32pipe, win32file
except ImportError:
msvcrt = win32pipe = win32file = None
def map_parameter(parameter):
if parameter["type"] == "unsigned":
return int(parameter["value"], 2)
if parameter["type"] == "signed":
width = len(parameter["value"])
value = int(parameter["value"], 2)
if value & (1 << (width - 1)):
value = -((1 << width) - value)
return value
if parameter["type"] == "string":
return parameter["value"]
if parameter["type"] == "real":
return float(parameter["value"])
def call(input_json):
input = json.loads(input_json)
if input["method"] == "modules":
return json.dumps({"modules": modules()})
if input["method"] == "derive":
try:
frontend, source = derive(input["module"],
{name: map_parameter(value) for name, value in input["parameters"].items()})
return json.dumps({"frontend": frontend, "source": source})
except ValueError as e:
return json.dumps({"error": str(e)})
def main():
parser = argparse.ArgumentParser()
modes = parser.add_subparsers(dest="mode")
mode_stdio = modes.add_parser("stdio")
if os.name == "posix":
mode_path = modes.add_parser("unix-socket")
if os.name == "nt":
mode_path = modes.add_parser("named-pipe")
mode_path.add_argument("path")
args = parser.parse_args()
if args.mode == "stdio":
while True:
input = sys.stdin.readline()
if not input: break
sys.stdout.write(call(input) + "\n")
sys.stdout.flush()
if args.mode == "unix-socket":
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(args.path)
try:
sock.listen(1)
conn, addr = sock.accept()
file = conn.makefile("rw")
while True:
input = file.readline()
if not input: break
file.write(call(input) + "\n")
file.flush()
finally:
sock.close()
os.unlink(args.path)
if args.mode == "named-pipe":
pipe = win32pipe.CreateNamedPipe(args.path, win32pipe.PIPE_ACCESS_DUPLEX,
win32pipe.PIPE_TYPE_BYTE|win32pipe.PIPE_READMODE_BYTE|win32pipe.PIPE_WAIT,
1, 4096, 4096, 0, None)
win32pipe.ConnectNamedPipe(pipe, None)
try:
while True:
input = b""
while not input.endswith(b"\n"):
result, data = win32file.ReadFile(pipe, 4096)
assert result == 0
input += data
assert not b"\n" in input or input.endswith(b"\n")
output = (call(input.decode("utf-8")) + "\n").encode("utf-8")
length = len(output)
while length > 0:
result, done = win32file.WriteFile(pipe, output)
assert result == 0
length -= done
except win32file.error as e:
if e.args[0] == 109: # ERROR_BROKEN_PIPE
pass
else:
raise
if __name__ == "__main__":
main()
| 26.716535 | 80 | 0.644562 |
f548f72c851943202f587791ccf51c2ca1f7b02b | 545 | py | Python | server/managers/BulleManager.py | b3ckerdev/Transformice-Server | d87ef61618fbed2736b72347ccf645765ad22b66 | [
"MIT"
] | 2 | 2021-03-15T14:46:57.000Z | 2022-01-27T10:50:49.000Z | server/managers/BulleManager.py | b3ckerdev/Transformice-Server | d87ef61618fbed2736b72347ccf645765ad22b66 | [
"MIT"
] | null | null | null | server/managers/BulleManager.py | b3ckerdev/Transformice-Server | d87ef61618fbed2736b72347ccf645765ad22b66 | [
"MIT"
] | null | null | null | import random
class BulleManager:
__bulles__ = []
@staticmethod
def get():
return BulleManager.__bulles__
@staticmethod
def count():
return len(BulleManager.__bulles__)
@staticmethod
def add(bulle_ip):
BulleManager.__bulles__.append(bulle_ip)
@staticmethod
def remove(bulle_ip):
BulleManager.__bulles__.remove(bulle_ip)
@staticmethod
def get_bulle(room):
for bulle in BulleManager.__bulles__:
if room in bulle.bulle_rooms:
return bulle
return random.choice(BulleManager.__bulles__) | 20.185185 | 47 | 0.730275 |
55244a0d81363097896cadd79c5cf307e1cfad22 | 22,465 | py | Python | venv/lib/python2.7/site-packages/sslyze/plugins/robot_plugin.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | venv/lib/python2.7/site-packages/sslyze/plugins/robot_plugin.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/sslyze/plugins/robot_plugin.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import socket
import types
from enum import Enum
from typing import Optional, Tuple, Text, List, Dict, Type
from xml.etree.ElementTree import Element
import binascii
import math
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_pem_x509_certificate
from nassl._nassl import WantReadError
from nassl.ssl_client import ClientCertificateRequested, OpenSslVersionEnum
from tls_parser.change_cipher_spec_protocol import TlsChangeCipherSpecRecord
from sslyze.plugins import plugin_base
from sslyze.plugins.plugin_base import PluginScanResult, PluginScanCommand
from sslyze.server_connectivity_info import ServerConnectivityInfo
from tls_parser.alert_protocol import TlsAlertRecord
from tls_parser.record_protocol import TlsRecordTlsVersionBytes
from tls_parser.exceptions import NotEnoughData
from tls_parser.handshake_protocol import TlsHandshakeRecord, TlsHandshakeTypeByte, TlsRsaClientKeyExchangeRecord
from tls_parser.parser import TlsRecordParser
from tls_parser.tls_version import TlsVersionEnum
from sslyze.utils.ssl_connection import SSLHandshakeRejected
from sslyze.utils.thread_pool import ThreadPool
class RobotScanCommand(PluginScanCommand):
"""Test the server(s) for the Return Of Bleichenbacher's Oracle Threat vulnerability.
"""
@classmethod
def get_cli_argument(cls):
# type: () -> Text
return 'robot'
@classmethod
def get_title(cls):
# type: () -> Text
return 'ROBOT Attack'
@classmethod
def is_aggressive(cls):
# type: () -> bool
# Each scan spawns 10 threads
return True
class RobotPmsPaddingPayloadEnum(Enum):
VALID = 0
WRONG_FIRST_TWO_BYTES = 1
WRONG_POSITION_00 = 2
NO_00_IN_THE_MIDDLE = 3
WRONG_VERSION_NUMBER = 4
class RobotTlsRecordPayloads(object):
# From https://github.com/robotattackorg/robot-detect and testssl.sh
# The high level idea of an oracle attack is to send several payloads that are slightly wrong, in different ways,
# hoping that the server is going to give a different response (a TLS alert, a connection reset, no data, etc.) for
# each payload
_CKE_PAYLOADS_HEX = {
RobotPmsPaddingPayloadEnum.VALID: "0002{pms_padding}00{tls_version}{pms}", # noqa: E241
RobotPmsPaddingPayloadEnum.WRONG_FIRST_TWO_BYTES: "4117{pms_padding}00{tls_version}{pms}", # noqa: E241
RobotPmsPaddingPayloadEnum.WRONG_POSITION_00: "0002{pms_padding}11{pms}0011", # noqa: E241
RobotPmsPaddingPayloadEnum.NO_00_IN_THE_MIDDLE: "0002{pms_padding}111111{pms}", # noqa: E241
RobotPmsPaddingPayloadEnum.WRONG_VERSION_NUMBER: "0002{pms_padding}000202{pms}", # noqa: E241
}
_PMS_HEX = "aa112233445566778899112233445566778899112233445566778899112233445566778899112233445566778899"
@classmethod
def get_client_key_exchange_record(cls, robot_payload_enum, tls_version, modulus, exponent):
# type: (RobotPmsPaddingPayloadEnum, TlsVersionEnum, int, int) -> TlsRsaClientKeyExchangeRecord
"""A client key exchange record with a hardcoded pre_master_secret, and a valid or invalid padding.
"""
pms_padding = cls._compute_pms_padding(modulus)
tls_version_hex = binascii.b2a_hex(TlsRecordTlsVersionBytes[tls_version.name].value).decode('ascii')
pms_with_padding_payload = cls._CKE_PAYLOADS_HEX[robot_payload_enum]
final_pms = pms_with_padding_payload.format(pms_padding=pms_padding, tls_version=tls_version_hex,
pms=cls._PMS_HEX)
cke_robot_record = TlsRsaClientKeyExchangeRecord.from_parameters(
tls_version, exponent, modulus, int(final_pms, 16)
)
return cke_robot_record
@staticmethod
def _compute_pms_padding(modulus):
# type: (int) -> Text
# Generate the padding for the pre_master_scecret
modulus_bit_size = int(math.ceil(math.log(modulus, 2)))
modulus_byte_size = (modulus_bit_size + 7) // 8
# pad_len is length in hex chars, so bytelen * 2
pad_len = (modulus_byte_size - 48 - 3) * 2
pms_padding_hex = ("abcd" * (pad_len // 2 + 1))[:pad_len]
return pms_padding_hex
# Encrypted Finished record corresponding to the PMS below and the ch_def client hello in the ROBOT poc script
_FINISHED_RECORD = bytearray.fromhex(
'005091a3b6aaa2b64d126e5583b04c113259c4efa48e40a19b8e5f2542c3b1d30f8d80b7582b72f08b21dfcbff09d4b281676a0fb40'
'd48c20c4f388617ff5c00808a96fbfe9bb6cc631101a6ba6b6bc696f0'
)
@classmethod
def get_finished_record_bytes(cls, tls_version):
# type: (TlsVersionEnum) -> bytes
"""The Finished TLS record corresponding to the hardcoded PMS used in the Client Key Exchange record.
"""
# TODO(AD): The ROBOT poc script uses the same Finished record for all possible client hello (default, GCM,
# etc.); as the Finished record contains a hashes of all previous records, it will be wrong and will cause
# servers to send a TLS Alert 20
# Here just like in the poc script, the Finished message does not match the Client Hello we sent
return b'\x16' + TlsRecordTlsVersionBytes[tls_version.name].value + cls._FINISHED_RECORD
class RobotScanResultEnum(Enum):
"""An enum to provide the result of running a RobotScanCommand.
"""
VULNERABLE_WEAK_ORACLE = 1 #: The server is vulnerable but the attack would take too long
VULNERABLE_STRONG_ORACLE = 2 #: The server is vulnerable and real attacks are feasible
NOT_VULNERABLE_NO_ORACLE = 3 #: The server supports RSA cipher suites but does not act as an oracle
NOT_VULNERABLE_RSA_NOT_SUPPORTED = 4 #: The server does not supports RSA cipher suites
UNKNOWN_INCONSISTENT_RESULTS = 5 #: Could not determine whether the server is vulnerable or not
class RobotServerResponsesAnalyzer(object):
def __init__(self, payload_responses):
# type: (Dict[RobotPmsPaddingPayloadEnum, List[Text]]) -> None
# A mapping of a ROBOT payload enum -> a list of two server responses as text
self._payload_responses = payload_responses
def compute_result_enum(self):
# type: () -> RobotScanResultEnum
"""Look at the server's response to each ROBOT payload and return the conclusion of the analysis.
"""
# Ensure the results were consistent
for payload_enum, server_responses in self._payload_responses.items():
# We ran the check twice per payload and the two responses should be the same
if server_responses[0] != server_responses[1]:
return RobotScanResultEnum.UNKNOWN_INCONSISTENT_RESULTS
# Check if the server acts as an oracle by checking if the server replied differently to the payloads
if len(set([server_responses[0] for server_responses in self._payload_responses.values()])) == 1:
# All server responses were identical - no oracle
return RobotScanResultEnum.NOT_VULNERABLE_NO_ORACLE
# All server responses were NOT identical, server is vulnerable
# Check to see if it is a weak oracle
response_1 = self._payload_responses[RobotPmsPaddingPayloadEnum.WRONG_FIRST_TWO_BYTES][0]
response_2 = self._payload_responses[RobotPmsPaddingPayloadEnum.WRONG_POSITION_00][0]
response_3 = self._payload_responses[RobotPmsPaddingPayloadEnum.NO_00_IN_THE_MIDDLE][0]
# From the original script:
# If the response to the invalid PKCS#1 request (oracle_bad1) is equal to both
# requests starting with 0002, we have a weak oracle. This is because the only
# case where we can distinguish valid from invalid requests is when we send
# correctly formatted PKCS#1 message with 0x00 on a correct position. This
# makes our oracle weak
if response_1 == response_2 == response_3:
return RobotScanResultEnum.VULNERABLE_WEAK_ORACLE
else:
return RobotScanResultEnum.VULNERABLE_STRONG_ORACLE
class RobotPlugin(plugin_base.Plugin):
"""Test the server(s) for the Return Of Bleichenbacher's Oracle Threat vulnerability.
"""
@classmethod
def get_available_commands(cls):
# type: () -> List[Type[PluginScanCommand]]
return [RobotScanCommand]
def process_task(self, server_info, scan_command):
# type: (ServerConnectivityInfo, PluginScanCommand) -> RobotScanResult
if not isinstance(scan_command, RobotScanCommand):
raise ValueError('Unexpected scan command')
rsa_params = None
# With TLS 1.2 some servers are only vulnerable when using the GCM cipher suites - try them first
if server_info.highest_ssl_version_supported == OpenSslVersionEnum.TLSV1_2:
cipher_string = 'AES128-GCM-SHA256:AES256-GCM-SHA384'
rsa_params = self._get_rsa_parameters(server_info, cipher_string)
if rsa_params is None:
# The attempts with GCM TLS 1.2 RSA cipher suites failed - try the normal RSA cipher suites
cipher_string = 'RSA'
rsa_params = self._get_rsa_parameters(server_info, cipher_string)
if rsa_params is None:
# Could not connect to the server using RSA - not vulnerable
return RobotScanResult(server_info, scan_command, RobotScanResultEnum.NOT_VULNERABLE_RSA_NOT_SUPPORTED)
rsa_modulus, rsa_exponent = rsa_params
# On the first attempt, finish the TLS handshake after sending the Robot payload
robot_should_complete_handshake = True
robot_result_enum = self._run_oracle_over_threads(server_info, cipher_string, rsa_modulus, rsa_exponent,
robot_should_complete_handshake)
if robot_result_enum == RobotScanResultEnum.NOT_VULNERABLE_NO_ORACLE:
# Try again but this time do not finish the TLS handshake - for some servers it will reveal an oracle
robot_should_complete_handshake = False
robot_result_enum = self._run_oracle_over_threads(server_info, cipher_string, rsa_modulus, rsa_exponent,
robot_should_complete_handshake)
return RobotScanResult(server_info, scan_command, robot_result_enum)
@classmethod
def _run_oracle_over_threads(cls, server_info, cipher_string, rsa_modulus, rsa_exponent, should_complete_handshake):
# type: (ServerConnectivityInfo, Text, int, int, bool) -> RobotScanResultEnum
# Use threads to speed things up
thread_pool = ThreadPool()
for payload_enum in RobotPmsPaddingPayloadEnum:
# Run each payload twice to ensure the results are consistent
thread_pool.add_job((cls._send_robot_payload, [server_info, cipher_string, payload_enum,
should_complete_handshake, rsa_modulus, rsa_exponent]))
thread_pool.add_job((cls._send_robot_payload, [server_info, cipher_string, payload_enum,
should_complete_handshake, rsa_modulus, rsa_exponent]))
# Use one thread per check
thread_pool.start(nb_threads=len(RobotPmsPaddingPayloadEnum) * 2)
# Store the results - two attempts per ROBOT payload
payload_responses = {
RobotPmsPaddingPayloadEnum.VALID: [],
RobotPmsPaddingPayloadEnum.WRONG_FIRST_TWO_BYTES: [],
RobotPmsPaddingPayloadEnum.WRONG_POSITION_00: [],
RobotPmsPaddingPayloadEnum.NO_00_IN_THE_MIDDLE: [],
RobotPmsPaddingPayloadEnum.WRONG_VERSION_NUMBER: [],
} # type: Dict[RobotPmsPaddingPayloadEnum, List[Text]]
for completed_job in thread_pool.get_result():
(job, (payload_enum, server_response)) = completed_job
payload_responses[payload_enum].append(server_response)
for failed_job in thread_pool.get_error():
# Should never happen when running the Robot check as we catch all exceptions in the handshake
(_, exception) = failed_job
raise exception
thread_pool.join()
return RobotServerResponsesAnalyzer(payload_responses).compute_result_enum()
@staticmethod
def _get_rsa_parameters(server_info, openssl_cipher_string):
# type: (ServerConnectivityInfo, Text) -> Optional[Tuple[int, int]]
ssl_connection = server_info.get_preconfigured_ssl_connection()
ssl_connection.ssl_client.set_cipher_list(openssl_cipher_string)
parsed_cert = None
try:
# Perform the SSL handshake
ssl_connection.connect()
certificate = ssl_connection.ssl_client.get_peer_certificate()
parsed_cert = load_pem_x509_certificate(certificate.as_pem().encode('ascii'), backend=default_backend())
except SSLHandshakeRejected:
# Server does not support RSA cipher suites?
pass
except ClientCertificateRequested:
# AD: The server asked for a client cert. We could still retrieve the server certificate, but it is unclear
# to me if the ROBOT check is supposed to work even if we do not provide a client cert. My guess is that
# it should not work since it requires completing a full handshake, which we can't without a client cert.
# Hence, propagate the error to make the check fail.
raise
finally:
ssl_connection.close()
if parsed_cert:
return parsed_cert.public_key().public_numbers().n, parsed_cert.public_key().public_numbers().e
else:
return None
@staticmethod
def _send_robot_payload(
server_info, # type: ServerConnectivityInfo
rsa_cipher_string, # type: Text
robot_payload_enum, # type: RobotPmsPaddingPayloadEnum
robot_should_finish_handshake, # type: bool
rsa_modulus, # type: int
rsa_exponent # type: int
):
# type: (...) -> Tuple[RobotPmsPaddingPayloadEnum, Text]
# Do a handshake which each record and keep track of what the server returned
ssl_connection = server_info.get_preconfigured_ssl_connection()
# Replace nassl.sslClient.do_handshake() with a ROBOT checking SSL handshake so that all the SSLyze
# options (startTLS, proxy, etc.) still work
ssl_connection.ssl_client.do_handshake = types.MethodType(do_handshake_with_robot,
ssl_connection.ssl_client)
ssl_connection.ssl_client.set_cipher_list(rsa_cipher_string)
# Compute the payload
cke_payload = RobotTlsRecordPayloads.get_client_key_exchange_record(
robot_payload_enum, server_info.highest_ssl_version_supported, rsa_modulus, rsa_exponent
)
# H4ck: we need to pass some arguments to the handshake but there is no simple way to do it; we use an attribute
ssl_connection.ssl_client._robot_cke_record = cke_payload
ssl_connection.ssl_client._robot_should_finish_handshake = robot_should_finish_handshake
server_response = ''
try:
# Start the SSL handshake
ssl_connection.connect()
except ServerResponseToRobot as e:
# Should always be thrown
server_response = e.server_response
finally:
ssl_connection.close()
return robot_payload_enum, server_response
class ServerResponseToRobot(Exception):
def __init__(self, server_response):
# type: (Text) -> None
# Could be a TLS alert or some data, always as text so we can easily detect different responses
self.server_response = server_response
def do_handshake_with_robot(self): # type: ignore
"""Modified do_handshake() to send a ROBOT payload and return the result.
"""
try:
# Start the handshake using nassl - will throw WantReadError right away
self._ssl.do_handshake()
except WantReadError:
# Send the Client Hello
len_to_read = self._network_bio.pending()
while len_to_read:
# Get the data from the SSL engine
handshake_data_out = self._network_bio.read(len_to_read)
# Send it to the peer
self._sock.send(handshake_data_out)
len_to_read = self._network_bio.pending()
# Retrieve the server's response - directly read the underlying network socket
# Retrieve data until we get to the ServerHelloDone
# The server may send back a ServerHello, an Alert or a CertificateRequest first
did_receive_hello_done = False
remaining_bytes = b''
while not did_receive_hello_done:
try:
tls_record, len_consumed = TlsRecordParser.parse_bytes(remaining_bytes)
remaining_bytes = remaining_bytes[len_consumed::]
except NotEnoughData:
# Try to get more data
raw_ssl_bytes = self._sock.recv(16381)
if not raw_ssl_bytes:
# No data?
break
remaining_bytes = remaining_bytes + raw_ssl_bytes
continue
if isinstance(tls_record, TlsHandshakeRecord):
# Does the record contain a ServerDone message?
for handshake_message in tls_record.subprotocol_messages:
if handshake_message.handshake_type == TlsHandshakeTypeByte.SERVER_DONE:
did_receive_hello_done = True
break
# If not, it could be a ServerHello, Certificate or a CertificateRequest if the server requires client auth
elif isinstance(tls_record, TlsAlertRecord):
# Server returned a TLS alert
break
else:
raise ValueError('Unknown record? Type {}'.format(tls_record.header.type))
if did_receive_hello_done:
# Send a special Client Key Exchange Record as the payload
self._sock.send(self._robot_cke_record.to_bytes())
if self._robot_should_finish_handshake:
# Then send a CCS record
ccs_record = TlsChangeCipherSpecRecord.from_parameters(
tls_version=TlsVersionEnum[self._ssl_version.name]
)
self._sock.send(ccs_record.to_bytes())
# Lastly send a Finished record
finished_record_bytes = RobotTlsRecordPayloads.get_finished_record_bytes(self._ssl_version)
self._sock.send(finished_record_bytes)
# Return whatever the server sent back by raising an exception
# The goal is to detect similar/different responses
while True:
try:
tls_record, len_consumed = TlsRecordParser.parse_bytes(remaining_bytes)
remaining_bytes = remaining_bytes[len_consumed::]
except NotEnoughData:
# Try to get more data
try:
raw_ssl_bytes = self._sock.recv(16381)
if not raw_ssl_bytes:
# No data?
raise ServerResponseToRobot('No data')
except socket.error as e:
# Server closed the connection after receiving the CCS payload
raise ServerResponseToRobot('socket.error {}'.format(str(e)))
remaining_bytes = remaining_bytes + raw_ssl_bytes
continue
if isinstance(tls_record, TlsAlertRecord):
raise ServerResponseToRobot('TLS Alert {} {}'.format(tls_record.alert_description,
tls_record.alert_severity))
else:
break
raise ServerResponseToRobot('Ok')
class RobotScanResult(PluginScanResult):
"""The result of running a RobotScanCommand on a specific server.
Attributes:
robot_result_enum (RobotScanResultEnum): An Enum providing the result of the Robot scan.
"""
def __init__(self, server_info, scan_command, robot_result_enum):
# type: (ServerConnectivityInfo, RobotScanCommand, RobotScanResultEnum) -> None
super(RobotScanResult, self).__init__(server_info, scan_command)
self.robot_result_enum = robot_result_enum
def as_text(self):
# type: () -> List[Text]
if self.robot_result_enum == RobotScanResultEnum.VULNERABLE_STRONG_ORACLE:
robot_txt = 'VULNERABLE - Strong oracle, a real attack is possible'
elif self.robot_result_enum == RobotScanResultEnum.VULNERABLE_WEAK_ORACLE:
robot_txt = 'VULNERABLE - Weak oracle, the attack would take too long'
elif self.robot_result_enum == RobotScanResultEnum.NOT_VULNERABLE_NO_ORACLE:
robot_txt = 'OK - Not vulnerable'
elif self.robot_result_enum == RobotScanResultEnum.NOT_VULNERABLE_RSA_NOT_SUPPORTED:
robot_txt = 'OK - Not vulnerable, RSA cipher suites not supported'
elif self.robot_result_enum == RobotScanResultEnum.UNKNOWN_INCONSISTENT_RESULTS:
robot_txt = 'UNKNOWN - Received inconsistent results'
else:
raise ValueError('Should never happen')
return [self._format_title(self.scan_command.get_title()), self._format_field('', robot_txt)]
def as_xml(self):
# type: () -> Element
xml_output = Element(self.scan_command.get_cli_argument(), title=self.scan_command.get_title())
xml_output.append(Element('robotAttack', resultEnum=self.robot_result_enum.name))
return xml_output
| 49.373626 | 121 | 0.668329 |
195fdfcbf0ab13e99ed72417886818cb28db21e5 | 3,318 | py | Python | tensorflow_datasets/testing/fake_data_generation/flic.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:27.000Z | 2021-05-10T10:41:27.000Z | tensorflow_datasets/testing/fake_data_generation/flic.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/testing/fake_data_generation/flic.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2021-08-02T22:12:40.000Z | 2021-08-02T22:12:40.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates FLIC like files with random data for testing."""
import os
from absl import app
from absl import flags
import numpy as np
import scipy.io
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core.utils import py_utils
from tensorflow_datasets.testing import fake_data_utils
flags.DEFINE_string("tfds_dir", py_utils.tfds_dir(),
"Path to tensorflow_datasets directory")
FLAGS = flags.FLAGS
def _output_dir(data):
"""Returns output directory."""
dname = "FLIC" if data == "small" else "FLIC-full"
return os.path.join(FLAGS.tfds_dir, "testing", "test_data", "fake_examples",
"flic", dname)
def _generate_image(data, fdir, fname):
dirname = os.path.join(_output_dir(data), fdir)
if not os.path.exists(dirname):
os.makedirs(dirname)
tf.io.gfile.copy(
fake_data_utils.get_random_jpeg(480, 720),
os.path.join(dirname, fname),
overwrite=True)
def _generate_mat(data, train_fname, test_fname):
"""Generate MAT file for given data type (small or full)."""
dirname = os.path.join(_output_dir(data), "examples.mat")
data = {
"examples":
np.array([
np.array([
np.array([1, 2, 3], dtype=np.uint16),
"example_movie",
np.array(
[np.array([1.0, 2.0, 3.0]),
np.array([1.0, 2.0, 3.0])]),
train_fname,
np.array([1.0, 2.0, 3.0]),
1.0,
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
True,
False,
]),
np.array([
np.array([1, 2, 3], dtype=np.uint16),
"example_movie",
np.array(
[np.array([1.0, 2.0, 3.0]),
np.array([1.0, 2.0, 3.0])]),
test_fname,
np.array([1.0, 2.0, 3.0]),
1.0,
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
False,
True,
]),
]),
}
scipy.io.savemat(dirname, data)
def main(unused_argv):
_generate_image("small", "images", "example_movie00000001.jpg")
_generate_image("small", "images", "example_movie00000002.jpg")
_generate_mat("small", "example_movie00000001.jpg",
"example_movie00000002.jpg")
_generate_image("full", "images", "example_movie00000003.jpg")
_generate_image("full", "images", "example_movie00000004.jpg")
_generate_mat("full", "example_movie00000003.jpg",
"example_movie00000004.jpg")
if __name__ == "__main__":
app.run(main)
| 31.6 | 78 | 0.589512 |
69ba4ed8e89c06f186b94b0a9345b10c1f7f72e9 | 8,727 | py | Python | db_rest_api/test_api.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | 2 | 2020-01-14T08:59:10.000Z | 2020-12-18T16:21:38.000Z | db_rest_api/test_api.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | null | null | null | db_rest_api/test_api.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | null | null | null | import unittest
import json
import sys
from itertools import combinations
from datetime import datetime
from db_rest_api.api import MAX_STATEMENTS
from indra.statements import stmts_from_json
from db_rest_api import api
TIMELIMIT = 1
SIZELIMIT = 4e7
class DbApiTestCase(unittest.TestCase):
def setUp(self):
api.app.testing = True
self.app = api.app.test_client()
def tearDown(self):
pass
def __time_get_query(self, end_point, query_str):
start_time = datetime.now()
resp = self.app.get('/%s/?%s' % (end_point, query_str))
t_delta = datetime.now() - start_time
dt = t_delta.seconds + t_delta.microseconds/1e6
print(dt)
size = int(resp.headers['Content-Length'])
raw_size = sys.getsizeof(resp.data)
print("Raw size: {raw:f}/{lim:f}, Compressed size: {comp:f}/{lim:f}."
.format(raw=raw_size/1e6, lim=SIZELIMIT/1e6, comp=size/1e6))
return resp, dt, size
def __check_good_statement_query(self, *args, **kwargs):
check_stmts = kwargs.pop('check_stmts', True)
time_limit = kwargs.pop('time_limit', TIMELIMIT)
query_str = '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items()]
+ list(args))
resp, dt, size = self.__time_get_query('statements', query_str)
assert resp.status_code == 200, \
('Got error code %d: \"%s\".'
% (resp.status_code, resp.data.decode()))
resp_dict = json.loads(resp.data.decode('utf-8'))
assert not resp_dict['limited']
json_stmts = resp_dict['statements']
assert len(json_stmts) is not 0, \
'Did not get any statements.'
assert size <= SIZELIMIT, \
("Query took up %f MB. Must be less than %f MB."
% (size/1e6, SIZELIMIT/1e6))
stmts = stmts_from_json(json_stmts)
assert all([s.evidence for s in stmts]), \
"Some statements lack evidence."
# To allow for faster response-times, we currently do not include
# support links in the response.
# assert any([s.supports + s.supported_by for s in stmts]),\
# ("Some statements lack support: %s."
# % str([str(s) for s in stmts if not s.supports+s.supported_by]))
# if check_stmts:
# assert all([not s1.matches(s2)
# for s1, s2 in combinations(stmts, 2)]),\
# ("Some statements match: %s."
# % str([(s1, s2) for s1, s2 in combinations(stmts, 2)
# if s1.matches(s2)]))
assert dt <= time_limit, \
("Query took %f seconds. Must be less than %f seconds."
% (dt, time_limit))
return resp
def test_blank_response(self):
"""Test the response to an empty request."""
resp, dt, size = self.__time_get_query('statements', '')
assert resp.status_code == 400, \
('Got unexpected response with code %d: %s.'
% (resp.status_code, resp.data.decode()))
assert dt <= TIMELIMIT, \
("Query took %f seconds. Must be less than %f seconds."
% (dt, TIMELIMIT))
assert size <= SIZELIMIT, \
"Query took up %f MB. Must be less than %f MB." % (size/1e6,
SIZELIMIT/1e6)
def test_specific_query(self):
"""Test whether we can get a "fully" specified statement."""
self.__check_good_statement_query(object='MAP2K1', subject='MAPK1',
type='Phosphorylation')
def test_object_only_query(self):
"""Test whether we can get an object only statement."""
self.__check_good_statement_query(object='GLUL',
type='IncreaseAmount')
def test_query_with_two_agents(self):
"""Test a query were the roles of the agents are not given."""
self.__check_good_statement_query('agent=MAP2K1', 'agent=MAPK1',
type='Phosphorylation')
def test_query_with_other(self):
"""Test that we can get an ActiveForm."""
self.__check_good_statement_query(agent='MAPK1', type='ActiveForm')
def test_bad_camel(self):
"""Test that a type can be poorly formatted and resolve correctly."""
self.__check_good_statement_query(agent='MAPK1', type='acTivefOrm')
def test_big_query(self):
"""Load-test with several big queries."""
self.__check_good_statement_query(agent='AKT1', check_stmts=False,
time_limit=5)
self.__check_good_statement_query(agent='MAPK1', check_stmts=False,
time_limit=10)
def test_query_with_too_many_stmts(self):
"""Test our check of statement length and the response."""
resp, dt, size = self.__time_get_query('statements',
'agent=TP53&on_limit=error')
assert resp.status_code == 413, "Unexpected status code: %s" % str(resp)
assert dt < 30, "Query took too long: %d" % dt
assert 'Acetylation' in json.loads(resp.data.decode('utf-8'))['statements']
resp, dt, size = self.__time_get_query('statements',
'agent=TP53&on_limit=sample')
assert resp.status_code == 200, str(resp)
assert dt < 30, dt
resp_dict = json.loads(resp.data.decode('utf-8'))
assert len(resp_dict['statements']) == MAX_STATEMENTS
resp, dt, size = self.__time_get_query('statements',
'agent=TP53&on_limit=truncate')
def test_query_with_hgnc_ns(self):
"""Test specifying HGNC as a namespace."""
self.__check_good_statement_query(subject='6871@HGNC', object='MAP2K1',
type='Phosphorylation')
def test_query_with_text_ns(self):
"""Test specifying TEXT as a namespace."""
self.__check_good_statement_query(subject='ERK@TEXT', type='Phosphorylation')
def test_query_with_hgnc_symbol_ns(self):
"""Test specifying HGNC-SYMBOL as a namespace."""
self.__check_good_statement_query(subject='MAPK1@HGNC-SYMBOL',
type='Phosphorylation')
def test_query_with_chebi_ns(self):
"""Test specifying CHEBI as a namespace."""
self.__check_good_statement_query(subject='CHEBI:6801@CHEBI')
def test_query_with_bad_hgnc(self):
resp, dt, size = self.__time_get_query('statements',
('subject=MEK&object=ERK'
'&type=Phosphorylation'))
assert resp.status_code != 200, "Got good status code."
assert dt <= TIMELIMIT, dt
assert size <= SIZELIMIT, size
def test_famplex_query(self):
resp, dt, size = self.__time_get_query('statements',
('subject=PDGF@FPLX'
'&object=FOS'
'&type=Phosphorylation'))
resp_dict = json.loads(resp.data.decode('utf-8'))
stmts = stmts_from_json(resp_dict['statements'])
assert len(stmts)
assert all([s.agent_list()[0].db_refs.get('FPLX') == 'PDGF'
for s in stmts]),\
'Not all subjects match.'
assert dt <= TIMELIMIT, dt
assert size <= SIZELIMIT, size
def __test_basic_paper_query(self, id_val, id_type, min_num_results=1):
query_str = 'id=%s&type=%s' % (id_val, id_type)
resp, dt, size = self.__time_get_query('papers', query_str)
assert dt <= TIMELIMIT, dt
assert size <= SIZELIMIT, size
assert resp.status_code == 200, str(resp)
json_str = resp.data.decode('utf-8')
json_list = json.loads(json_str)['statements']
assert len(json_list) >= min_num_results, (min_num_results,
len(json_list))
return
def test_pmid_paper_query(self):
self.__test_basic_paper_query('8436299', 'pmid')
# Now check without pmid specified (should be assumed.)
resp, _, _ = self.__time_get_query('papers', 'id=8436299')
assert resp.status_code == 200, str(resp)
def test_pmcid_paper_query(self):
self.__test_basic_paper_query('PMC5770457', 'pmcid')
def test_trid_paper_query(self):
self.__test_basic_paper_query('28145129', 'trid')
if __name__ == '__main__':
unittest.main()
| 42.570732 | 85 | 0.576143 |
21ee399f035f3042134c162511659e50c2a90e93 | 5,230 | py | Python | crabageprediction/venv/Lib/site-packages/numpy/f2py/diagnose.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 20,453 | 2015-01-02T09:00:47.000Z | 2022-03-31T23:35:56.000Z | crabageprediction/venv/Lib/site-packages/numpy/f2py/diagnose.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 14,862 | 2015-01-01T01:28:34.000Z | 2022-03-31T23:48:52.000Z | bot/lib/python3.7/site-packages/numpy/f2py/diagnose.py | carlosrh18/DavinciBot | d73a6b7f68d7bab25d134d3f85c6b63a86c206c5 | [
"MIT"
] | 9,362 | 2015-01-01T15:49:43.000Z | 2022-03-31T21:26:51.000Z | #!/usr/bin/env python3
import os
import sys
import tempfile
def run_command(cmd):
print('Running %r:' % (cmd))
os.system(cmd)
print('------')
def run():
_path = os.getcwd()
os.chdir(tempfile.gettempdir())
print('------')
print('os.name=%r' % (os.name))
print('------')
print('sys.platform=%r' % (sys.platform))
print('------')
print('sys.version:')
print(sys.version)
print('------')
print('sys.prefix:')
print(sys.prefix)
print('------')
print('sys.path=%r' % (':'.join(sys.path)))
print('------')
try:
import numpy
has_newnumpy = 1
except ImportError:
print('Failed to import new numpy:', sys.exc_info()[1])
has_newnumpy = 0
try:
from numpy.f2py import f2py2e
has_f2py2e = 1
except ImportError:
print('Failed to import f2py2e:', sys.exc_info()[1])
has_f2py2e = 0
try:
import numpy.distutils
has_numpy_distutils = 2
except ImportError:
try:
import numpy_distutils
has_numpy_distutils = 1
except ImportError:
print('Failed to import numpy_distutils:', sys.exc_info()[1])
has_numpy_distutils = 0
if has_newnumpy:
try:
print('Found new numpy version %r in %s' %
(numpy.__version__, numpy.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_f2py2e:
try:
print('Found f2py2e version %r in %s' %
(f2py2e.__version__.version, f2py2e.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_numpy_distutils:
try:
if has_numpy_distutils == 2:
print('Found numpy.distutils version %r in %r' % (
numpy.distutils.__version__,
numpy.distutils.__file__))
else:
print('Found numpy_distutils version %r in %r' % (
numpy_distutils.numpy_distutils_version.numpy_distutils_version,
numpy_distutils.__file__))
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 1:
print(
'Importing numpy_distutils.command.build_flib ...', end=' ')
import numpy_distutils.command.build_flib as build_flib
print('ok')
print('------')
try:
print(
'Checking availability of supported Fortran compilers:')
for compiler_class in build_flib.all_compilers:
compiler_class(verbose=1).is_available()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print(
'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)')
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.fcompiler ...', end=' ')
import numpy.distutils.fcompiler as fcompiler
else:
print('Importing numpy_distutils.fcompiler ...', end=' ')
import numpy_distutils.fcompiler as fcompiler
print('ok')
print('------')
try:
print('Checking availability of supported Fortran compilers:')
fcompiler.show_fcompilers()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.cpuinfo ...', end=' ')
from numpy.distutils.cpuinfo import cpuinfo
print('ok')
print('------')
else:
try:
print(
'Importing numpy_distutils.command.cpuinfo ...', end=' ')
from numpy_distutils.command.cpuinfo import cpuinfo
print('ok')
print('------')
except Exception as msg:
print('error:', msg, '(ignore it)')
print('Importing numpy_distutils.cpuinfo ...', end=' ')
from numpy_distutils.cpuinfo import cpuinfo
print('ok')
print('------')
cpu = cpuinfo()
print('CPU information:', end=' ')
for name in dir(cpuinfo):
if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])():
print(name[1:], end=' ')
print('------')
except Exception as msg:
print('error:', msg)
print('------')
os.chdir(_path)
if __name__ == "__main__":
run()
| 33.741935 | 102 | 0.478967 |
449881578f011e35bf3228d4b17eb15e702469a8 | 18,506 | gyp | Python | Source/devtools/devtools.gyp | primiano/blink-gitcs | 0b5424070e3006102e0036deea1e2e263b871eaa | [
"BSD-3-Clause"
] | 1 | 2017-08-25T05:15:52.000Z | 2017-08-25T05:15:52.000Z | Source/devtools/devtools.gyp | primiano/blink-gitcs | 0b5424070e3006102e0036deea1e2e263b871eaa | [
"BSD-3-Clause"
] | null | null | null | Source/devtools/devtools.gyp | primiano/blink-gitcs | 0b5424070e3006102e0036deea1e2e263b871eaa | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'includes': [
'devtools.gypi',
],
'targets': [
{
'target_name': 'devtools_frontend_resources',
'type': 'none',
'dependencies': [
'supported_css_properties',
'frontend_protocol_sources',
'build_applications',
],
'conditions': [
['debug_devtools==0', {
'dependencies': [
'concatenated_devtools_css',
'concatenated_inspector_css',
'concatenated_toolbox_css',
],
}],
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/resources/inspector/Images',
'files': [
'<@(devtools_image_files)',
],
},
],
},
{
'target_name': 'devtools_extension_api',
'type': 'none',
'actions': [{
'action_name': 'devtools_extension_api',
'script_name': 'scripts/generate_devtools_extension_api.py',
'inputs': [
'<@(_script_name)',
'<@(devtools_extension_api_files)',
],
'outputs': ['<(PRODUCT_DIR)/resources/inspector/devtools_extension_api.js'],
'action': ['python', '<@(_script_name)', '<@(_outputs)', '<@(devtools_extension_api_files)'],
}],
},
{
'target_name': 'generate_devtools_grd',
'type': 'none',
'dependencies': [
'devtools_extension_api',
'devtools_frontend_resources',
],
'conditions': [
['debug_devtools==0', {
'actions': [{
'action_name': 'generate_devtools_grd',
'script_name': 'scripts/generate_devtools_grd.py',
'relative_path_dirs': [
'<(PRODUCT_DIR)/resources/inspector',
'front_end'
],
'static_files': [
# Intentionally empty. Should get rebuilt when switching from debug_devtools==1.
],
'devtools_static_files_list': '<|(devtools_static_grd_files.tmp <@(_static_files))',
'generated_files': [
'<(PRODUCT_DIR)/resources/inspector/devtools.css',
'<(PRODUCT_DIR)/resources/inspector/devtools.html',
'<(PRODUCT_DIR)/resources/inspector/devtools.js',
'<(PRODUCT_DIR)/resources/inspector/inspector.css',
'<(PRODUCT_DIR)/resources/inspector/inspector.html',
'<(PRODUCT_DIR)/resources/inspector/inspector.js',
'<(PRODUCT_DIR)/resources/inspector/toolbox.css',
'<(PRODUCT_DIR)/resources/inspector/toolbox.html',
'<(PRODUCT_DIR)/resources/inspector/toolbox.js',
'<(PRODUCT_DIR)/resources/inspector/audits_module.js',
'<(PRODUCT_DIR)/resources/inspector/console_module.js',
'<(PRODUCT_DIR)/resources/inspector/devices_module.js',
'<(PRODUCT_DIR)/resources/inspector/elements_module.js',
'<(PRODUCT_DIR)/resources/inspector/heap_snapshot_worker_module.js',
'<(PRODUCT_DIR)/resources/inspector/layers_module.js',
'<(PRODUCT_DIR)/resources/inspector/network_module.js',
'<(PRODUCT_DIR)/resources/inspector/profiler_module.js',
'<(PRODUCT_DIR)/resources/inspector/promises_module.js',
'<(PRODUCT_DIR)/resources/inspector/resources_module.js',
'<(PRODUCT_DIR)/resources/inspector/script_formatter_worker_module.js',
'<(PRODUCT_DIR)/resources/inspector/settings_module.js',
'<(PRODUCT_DIR)/resources/inspector/source_frame_module.js',
'<(PRODUCT_DIR)/resources/inspector/sources_module.js',
'<(PRODUCT_DIR)/resources/inspector/temp_storage_shared_worker_module.js',
'<(PRODUCT_DIR)/resources/inspector/timeline_module.js',
'<(PRODUCT_DIR)/resources/inspector/devtools_extension_api.js',
],
'inputs': [
'<@(_script_name)',
'<@(_static_files)',
'<@(_generated_files)',
'<@(devtools_image_files)',
'<(_devtools_static_files_list)',
],
'images_path': [
'front_end/Images',
],
'outputs': ['<(SHARED_INTERMEDIATE_DIR)/devtools/devtools_resources.grd'],
'action': ['python', '<@(_script_name)', '<@(_generated_files)', '--static_files_list', '<(_devtools_static_files_list)', '--relative_path_dirs', '<@(_relative_path_dirs)', '--images', '<@(_images_path)', '--output', '<@(_outputs)'],
}],
},
{
# If we're not concatenating devtools files, we want to
# run after the original files have been copied to
# <(PRODUCT_DIR)/resources/inspector.
'dependencies': ['devtools_frontend_resources'],
'actions': [{
'action_name': 'generate_devtools_grd',
'script_name': 'scripts/generate_devtools_grd.py',
'relative_path_dirs': [
'front_end',
'<(PRODUCT_DIR)/resources/inspector',
],
'static_files': [
'<@(all_devtools_files)',
'front_end/Runtime.js',
],
'devtools_static_files_list': '<|(devtools_static_grd_files.tmp <@(_static_files))',
'generated_files': [
'<(PRODUCT_DIR)/resources/inspector/InspectorBackendCommands.js',
'<(PRODUCT_DIR)/resources/inspector/SupportedCSSProperties.js',
'<(PRODUCT_DIR)/resources/inspector/devtools.html',
'<(PRODUCT_DIR)/resources/inspector/inspector.html',
'<(PRODUCT_DIR)/resources/inspector/toolbox.html',
],
'inputs': [
'<@(_script_name)',
'<@(_static_files)',
'<@(_generated_files)',
'<@(devtools_image_files)',
'<(_devtools_static_files_list)',
],
'images_path': [
'front_end/Images',
],
# Note that other files are put under /devtools directory, together with declared devtools_resources.grd
'outputs': ['<(SHARED_INTERMEDIATE_DIR)/devtools/devtools_resources.grd'],
'action': ['python', '<@(_script_name)', '<@(_generated_files)', '--static_files_list', '<(_devtools_static_files_list)', '--relative_path_dirs', '<@(_relative_path_dirs)', '--images', '<@(_images_path)', '--output', '<@(_outputs)'],
}],
}],
],
},
{
'target_name': 'frontend_protocol_sources',
'type': 'none',
'actions': [
{
'action_name': 'generateInspectorProtocolFrontendSources',
'inputs': [
# The python script in action below.
'scripts/CodeGeneratorFrontend.py',
# Input file for the script.
'protocol.json',
],
'outputs': [
'<(PRODUCT_DIR)/resources/inspector/InspectorBackendCommands.js',
],
'action': [
'python',
'scripts/CodeGeneratorFrontend.py',
'protocol.json',
'--output_js_dir', '<(PRODUCT_DIR)/resources/inspector/',
],
'message': 'Generating Inspector protocol frontend sources from protocol.json',
},
]
},
{
'target_name': 'supported_css_properties',
'type': 'none',
'actions': [
{
'action_name': 'generateSupportedCSSProperties',
'inputs': [
# The python script in action below.
'scripts/generate_supported_css.py',
# Input files for the script.
'../core/css/CSSProperties.in',
],
'outputs': [
'<(PRODUCT_DIR)/resources/inspector/SupportedCSSProperties.js',
],
'action': [
'python',
'<@(_inputs)',
'<@(_outputs)',
],
'message': 'Generating supported CSS properties for front end',
},
]
},
# Frontend applications and modules.
{
'target_name': 'build_applications',
'type': 'none',
'dependencies': [
'supported_css_properties',
'frontend_protocol_sources',
],
'output_path': '<(PRODUCT_DIR)/resources/inspector/',
'actions': [{
'action_name': 'build_applications',
'script_name': 'scripts/build_applications.py',
'helper_scripts': [
'scripts/modular_build.py',
'scripts/concatenate_application_code.py',
],
'inputs': [
'<@(_script_name)',
'<@(_helper_scripts)',
'<@(all_devtools_files)',
'front_end/devtools.html',
'front_end/inspector.html',
'front_end/toolbox.html',
'<(_output_path)/InspectorBackendCommands.js',
'<(_output_path)/SupportedCSSProperties.js',
],
'action': ['python', '<@(_script_name)', 'devtools', 'inspector', 'toolbox', '--input_path', 'front_end', '--output_path', '<@(_output_path)', '--debug', '<@(debug_devtools)'],
'conditions': [
['debug_devtools==0', { # Release
'outputs': [
'<(_output_path)/devtools.html',
'<(_output_path)/devtools.js',
'<(_output_path)/inspector.html',
'<(_output_path)/inspector.js',
'<(_output_path)/toolbox.html',
'<(_output_path)/toolbox.js',
'<(_output_path)/audits_module.js',
'<(_output_path)/console_module.js',
'<(_output_path)/devices_module.js',
'<(_output_path)/elements_module.js',
'<(_output_path)/heap_snapshot_worker_module.js',
'<(_output_path)/layers_module.js',
'<(_output_path)/network_module.js',
'<(_output_path)/profiler_module.js',
'<(_output_path)/promises_module.js',
'<(_output_path)/resources_module.js',
'<(_output_path)/script_formatter_worker_module.js',
'<(_output_path)/settings_module.js',
'<(_output_path)/source_frame_module.js',
'<(_output_path)/sources_module.js',
'<(_output_path)/temp_storage_shared_worker_module.js',
'<(_output_path)/timeline_module.js',
],
},
{ # Debug
'outputs': [
'<(_output_path)/devtools.html',
'<(_output_path)/inspector.html',
'<(_output_path)/toolbox.html',
]
}]
]
}],
'conditions': [
['debug_devtools==0', { # Release
},
{ # Debug
# Copy runtime core and non-module directories here.
'copies': [
{
'destination': '<(_output_path)',
'files': [
'<@(devtools_core_base_files)',
'<@(devtools_core_css_files)',
],
},
{
'destination': '<(_output_path)/UglifyJS',
'files': [
'<@(devtools_uglify_files)',
],
},
{
'destination': '<(_output_path)/cm',
'files': [
'<@(devtools_cm_js_files)',
'<@(devtools_cm_css_files)',
],
},
]
}]
]
},
], # targets
'conditions': [
['debug_devtools==0', {
'targets': [
{
'target_name': 'concatenated_devtools_css',
'type': 'none',
'actions': [{
'action_name': 'concatenate_devtools_css',
'script_name': 'scripts/concatenate_css_files.py',
'input_stylesheet': 'front_end/devtools.css',
'inputs': [
'<@(_script_name)',
'<@(_input_stylesheet)',
'<@(devtools_core_css_files)',
],
'search_path': [ 'front_end' ],
'outputs': ['<(PRODUCT_DIR)/resources/inspector/devtools.css'],
'action': ['python', '<@(_script_name)', '<@(_input_stylesheet)', '<@(_outputs)'],
}],
},
{
'target_name': 'concatenated_inspector_css',
'type': 'none',
'actions': [{
'action_name': 'concatenate_inspector_css',
'script_name': 'scripts/concatenate_css_files.py',
'input_stylesheet': 'front_end/inspector.css',
'inputs': [
'<@(_script_name)',
'<@(_input_stylesheet)',
'<@(devtools_core_css_files)',
],
'search_path': [ 'front_end' ],
'outputs': ['<(PRODUCT_DIR)/resources/inspector/inspector.css'],
'action': ['python', '<@(_script_name)', '<@(_input_stylesheet)', '<@(_outputs)'],
}],
},
{
'target_name': 'concatenated_toolbox_css',
'type': 'none',
'actions': [{
'action_name': 'concatenate_toolbox_css',
'script_name': 'scripts/concatenate_css_files.py',
'input_stylesheet': 'front_end/toolbox.css',
'inputs': [
'<@(_script_name)',
'<@(_input_stylesheet)',
'<@(devtools_core_css_files)',
],
'search_path': [ 'front_end' ],
'outputs': ['<(PRODUCT_DIR)/resources/inspector/toolbox.css'],
'action': ['python', '<@(_script_name)', '<@(_input_stylesheet)', '<@(_outputs)'],
}],
},
],
}],
], # conditions
}
| 48.572178 | 257 | 0.45034 |
2c0c94ab6e1a54403ba3bf2e6cd87543c698cde5 | 1,331 | py | Python | test/test_cluster_nodes_onefs_version.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_cluster_nodes_onefs_version.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_cluster_nodes_onefs_version.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.cluster_nodes_onefs_version import ClusterNodesOnefsVersion
class TestClusterNodesOnefsVersion(unittest.TestCase):
""" ClusterNodesOnefsVersion unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testClusterNodesOnefsVersion(self):
"""
Test ClusterNodesOnefsVersion
"""
model = swagger_client.models.cluster_nodes_onefs_version.ClusterNodesOnefsVersion()
if __name__ == '__main__':
unittest.main() | 27.163265 | 92 | 0.746807 |
5565f14d40cd6255a96f070259d3b3e1e73778c2 | 13,436 | py | Python | espnet/nets/e2e_asr_common.py | LuoTianqi/espnet | b7ff2546b37c3f12b2bd45d879f2f0f88767b639 | [
"Apache-2.0"
] | 1 | 2020-02-06T15:59:22.000Z | 2020-02-06T15:59:22.000Z | espnet/nets/e2e_asr_common.py | LuoTianqi/espnet | b7ff2546b37c3f12b2bd45d879f2f0f88767b639 | [
"Apache-2.0"
] | null | null | null | espnet/nets/e2e_asr_common.py | LuoTianqi/espnet | b7ff2546b37c3f12b2bd45d879f2f0f88767b639 | [
"Apache-2.0"
] | 1 | 2021-02-28T05:57:51.000Z | 2021-02-28T05:57:51.000Z | #!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Common functions for ASR."""
import argparse
import editdistance
import json
import logging
import numpy as np
import six
import sys
from itertools import groupby
def end_detect(ended_hyps, i, M=3, D_end=np.log(1 * np.exp(-10))):
"""End detection.
desribed in Eq. (50) of S. Watanabe et al
"Hybrid CTC/Attention Architecture for End-to-End Speech Recognition"
:param ended_hyps:
:param i:
:param M:
:param D_end:
:return:
"""
if len(ended_hyps) == 0:
return False
count = 0
best_hyp = sorted(ended_hyps, key=lambda x: x['score'], reverse=True)[0]
for m in six.moves.range(M):
# get ended_hyps with their length is i - m
hyp_length = i - m
hyps_same_length = [x for x in ended_hyps if len(x['yseq']) == hyp_length]
if len(hyps_same_length) > 0:
best_hyp_same_length = sorted(hyps_same_length, key=lambda x: x['score'], reverse=True)[0]
if best_hyp_same_length['score'] - best_hyp['score'] < D_end:
count += 1
if count == M:
return True
else:
return False
# TODO(takaaki-hori): add different smoothing methods
def label_smoothing_dist(odim, lsm_type, transcript=None, blank=0):
"""Obtain label distribution for loss smoothing.
:param odim:
:param lsm_type:
:param blank:
:param transcript:
:return:
"""
if transcript is not None:
with open(transcript, 'rb') as f:
trans_json = json.load(f)['utts']
if lsm_type == 'unigram':
assert transcript is not None, 'transcript is required for %s label smoothing' % lsm_type
labelcount = np.zeros(odim)
for k, v in trans_json.items():
ids = np.array([int(n) for n in v['output'][0]['tokenid'].split()])
# to avoid an error when there is no text in an uttrance
if len(ids) > 0:
labelcount[ids] += 1
labelcount[odim - 1] = len(transcript) # count <eos>
labelcount[labelcount == 0] = 1 # flooring
labelcount[blank] = 0 # remove counts for blank
labeldist = labelcount.astype(np.float32) / np.sum(labelcount)
else:
logging.error(
"Error: unexpected label smoothing type: %s" % lsm_type)
sys.exit()
return labeldist
def get_vgg2l_odim(idim, in_channel=3, out_channel=128):
"""Return the output size of the VGG frontend.
:param in_channel: input channel size
:param out_channel: output channel size
:return: output size
:rtype int
"""
idim = idim / in_channel
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 1st max pooling
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 2nd max pooling
return int(idim) * out_channel # numer of channels
class ErrorCalculator(object):
"""Calculate CER and WER for E2E_ASR and CTC models during training.
:param y_hats: numpy array with predicted text
:param y_pads: numpy array with true (target) text
:param char_list:
:param sym_space:
:param sym_blank:
:return:
"""
def __init__(self, char_list, sym_space, sym_blank, report_cer=False, report_wer=False):
"""Construct an ErrorCalculator object."""
super(ErrorCalculator, self).__init__()
self.report_cer = report_cer
self.report_wer = report_wer
self.char_list = char_list
self.space = sym_space
self.blank = sym_blank
self.idx_blank = self.char_list.index(self.blank)
if self.space in self.char_list:
self.idx_space = self.char_list.index(self.space)
else:
self.idx_space = None
def __call__(self, ys_hat, ys_pad, is_ctc=False):
"""Calculate sentence-level WER/CER score.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:param bool is_ctc: calculate CER score for CTC
:return: sentence-level WER score
:rtype float
:return: sentence-level CER score
:rtype float
"""
cer, wer = None, None
if is_ctc:
return self.calculate_cer_ctc(ys_hat, ys_pad)
elif not self.report_cer and not self.report_wer:
return cer, wer
seqs_hat, seqs_true = self.convert_to_char(ys_hat, ys_pad)
if self.report_cer:
cer = self.calculate_cer(seqs_hat, seqs_true)
if self.report_wer:
wer = self.calculate_wer(seqs_hat, seqs_true)
return cer, wer
def calculate_cer_ctc(self, ys_hat, ys_pad):
"""Calculate sentence-level CER score for CTC.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:return: average sentence-level CER score
:rtype float
"""
cers, char_ref_lens = [], []
for i, y in enumerate(ys_hat):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat, seq_true = [], []
for idx in y_hat:
idx = int(idx)
if idx != -1 and idx != self.idx_blank and idx != self.idx_space:
seq_hat.append(self.char_list[int(idx)])
for idx in y_true:
idx = int(idx)
if idx != -1 and idx != self.idx_blank and idx != self.idx_space:
seq_true.append(self.char_list[int(idx)])
hyp_chars = "".join(seq_hat)
ref_chars = "".join(seq_true)
if len(ref_chars) > 0:
cers.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
cer_ctc = float(sum(cers)) / sum(char_ref_lens) if cers else None
return cer_ctc
def convert_to_char(self, ys_hat, ys_pad):
"""Convert index to character.
:param torch.Tensor seqs_hat: prediction (batch, seqlen)
:param torch.Tensor seqs_true: reference (batch, seqlen)
:return: token list of prediction
:rtype list
:return: token list of reference
:rtype list
"""
seqs_hat, seqs_true = [], []
for i, y_hat in enumerate(ys_hat):
y_true = ys_pad[i]
eos_true = np.where(y_true == -1)[0]
eos_true = eos_true[0] if len(eos_true) > 0 else len(y_true)
# To avoid wrong higher WER than the one obtained from the decoding
# eos from y_true is used to mark the eos in y_hat
# because of that y_hats has not padded outs with -1.
seq_hat = [self.char_list[int(idx)] for idx in y_hat[:eos_true]]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
seq_hat_text = "".join(seq_hat).replace(self.space, ' ')
seq_hat_text = seq_hat_text.replace(self.blank, '')
seq_true_text = "".join(seq_true).replace(self.space, ' ')
seqs_hat.append(seq_hat_text)
seqs_true.append(seq_true_text)
return seqs_hat, seqs_true
def calculate_cer(self, seqs_hat, seqs_true):
"""Calculate sentence-level CER score.
:param list seqs_hat: prediction
:param list seqs_true: reference
:return: average sentence-level CER score
:rtype float
"""
char_eds, char_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_chars = seq_hat_text.replace(' ', '')
ref_chars = seq_true_text.replace(' ', '')
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
return float(sum(char_eds)) / sum(char_ref_lens)
def calculate_wer(self, seqs_hat, seqs_true):
"""Calculate sentence-level WER score.
:param list seqs_hat: prediction
:param list seqs_true: reference
:return: average sentence-level WER score
:rtype float
"""
word_eds, word_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
return float(sum(word_eds)) / sum(word_ref_lens)
class ErrorCalculatorTrans(object):
"""Calculate CER and WER for transducer models.
Args:
decoder (nn.Module): decoder module
args (Namespace): argument Namespace containing options
report_cer (boolean): compute CER option
report_wer (boolean): compute WER option
"""
def __init__(self, decoder, args, report_cer=False, report_wer=False):
"""Construct an ErrorCalculator object for transducer model."""
super(ErrorCalculatorTrans, self).__init__()
self.dec = decoder
recog_args = {'beam_size': args.beam_size,
'nbest': args.nbest,
'space': args.sym_space,
'score_norm_transducer': args.score_norm_transducer}
self.recog_args = argparse.Namespace(**recog_args)
self.char_list = args.char_list
self.space = args.sym_space
self.blank = args.sym_blank
self.report_cer = args.report_cer
self.report_wer = args.report_wer
def __call__(self, hs_pad, ys_pad):
"""Calculate sentence-level WER/CER score for transducer models.
Args:
hs_pad (torch.Tensor): batch of padded input sequence (batch, T, D)
ys_pad (torch.Tensor): reference (batch, seqlen)
Returns:
(float): sentence-level CER score
(float): sentence-level WER score
"""
cer, wer = None, None
if not self.report_cer and not self.report_wer:
return cer, wer
batchsize = int(hs_pad.size(0))
batch_nbest = []
for b in six.moves.range(batchsize):
if self.recog_args.beam_size == 1:
nbest_hyps = self.dec.recognize(hs_pad[b], self.recog_args)
else:
nbest_hyps = self.dec.recognize_beam(hs_pad[b], self.recog_args)
batch_nbest.append(nbest_hyps)
ys_hat = [nbest_hyp[0]['yseq'][1:] for nbest_hyp in batch_nbest]
seqs_hat, seqs_true = self.convert_to_char(ys_hat, ys_pad.cpu())
if self.report_cer:
cer = self.calculate_cer(seqs_hat, seqs_true)
if self.report_wer:
wer = self.calculate_wer(seqs_hat, seqs_true)
return cer, wer
def convert_to_char(self, ys_hat, ys_pad):
"""Convert index to character.
Args:
ys_hat (torch.Tensor): prediction (batch, seqlen)
ys_pad (torch.Tensor): reference (batch, seqlen)
Returns:
(list): token list of prediction
(list): token list of reference
"""
seqs_hat, seqs_true = [], []
for i, y_hat in enumerate(ys_hat):
y_true = ys_pad[i]
eos_true = np.where(y_true == -1)[0]
eos_true = eos_true[0] if len(eos_true) > 0 else len(y_true)
seq_hat = [self.char_list[int(idx)] for idx in y_hat[:eos_true]]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
seq_hat_text = "".join(seq_hat).replace(self.space, ' ')
seq_hat_text = seq_hat_text.replace(self.blank, '')
seq_true_text = "".join(seq_true).replace(self.space, ' ')
seqs_hat.append(seq_hat_text)
seqs_true.append(seq_true_text)
return seqs_hat, seqs_true
def calculate_cer(self, seqs_hat, seqs_true):
"""Calculate sentence-level CER score for transducer model.
Args:
seqs_hat (torch.Tensor): prediction (batch, seqlen)
seqs_true (torch.Tensor): reference (batch, seqlen)
Returns:
(float): average sentence-level CER score
"""
char_eds, char_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_chars = seq_hat_text.replace(' ', '')
ref_chars = seq_true_text.replace(' ', '')
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
return float(sum(char_eds)) / sum(char_ref_lens)
def calculate_wer(self, seqs_hat, seqs_true):
"""Calculate sentence-level WER score for transducer model.
Args:
seqs_hat (torch.Tensor): prediction (batch, seqlen)
seqs_true (torch.Tensor): reference (batch, seqlen)
Returns:
(float): average sentence-level WER score
"""
word_eds, word_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
return float(sum(word_eds)) / sum(word_ref_lens)
| 34.363171 | 102 | 0.607175 |
284cc42ad58fa9d7f464b554d862caf47c34fb71 | 59,546 | py | Python | gewittergefahr/gg_utils/gridded_forecasts.py | dopplerchase/GewitterGefahr | 4415b08dd64f37eba5b1b9e8cc5aa9af24f96593 | [
"MIT"
] | 26 | 2018-10-04T01:07:35.000Z | 2022-01-29T08:49:32.000Z | gewittergefahr/gg_utils/gridded_forecasts.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 4 | 2017-12-25T02:01:08.000Z | 2018-12-19T01:54:21.000Z | gewittergefahr/gg_utils/gridded_forecasts.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 11 | 2017-12-10T23:05:29.000Z | 2022-01-29T08:49:33.000Z | """Methods to create gridded spatial forecasts from storm-cell-based ones."""
import copy
import numpy
import pandas
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import interp
from gewittergefahr.gg_utils import nwp_model_utils
from gewittergefahr.gg_utils import grid_smoothing_2d
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import prediction_io
# TODO(thunderhoser): This file needs to be cleaned up a bit. I used to allow
# each init time to have its own grid, and some of the code for this is still
# hanging around.
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
MAX_STORM_SPEED_M_S01 = 60.
LOG_MESSAGE_TIME_FORMAT = '%Y-%m-%d-%H%M%S'
GAUSSIAN_SMOOTHING_METHOD = 'gaussian'
CRESSMAN_SMOOTHING_METHOD = 'cressman'
VALID_SMOOTHING_METHODS = [GAUSSIAN_SMOOTHING_METHOD, CRESSMAN_SMOOTHING_METHOD]
DEFAULT_PROJECTION_OBJECT = nwp_model_utils.init_projection(
nwp_model_utils.RAP_MODEL_NAME)
DEFAULT_LEAD_TIME_RES_SECONDS = 60
DEFAULT_GRID_SPACING_METRES = 1000.
DEFAULT_GRID_SPACING_DEG = 0.01
DEFAULT_PROB_RADIUS_FOR_GRID_METRES = 1e4
DEFAULT_SMOOTHING_E_FOLDING_RADIUS_METRES = 5000.
DEFAULT_SMOOTHING_CUTOFF_RADIUS_METRES = 15000.
LATLNG_POLYGON_COLUMN_PREFIX = tracking_utils.BUFFER_COLUMN_PREFIX
XY_POLYGON_COLUMN_PREFIX = 'polygon_object_xy_buffer'
FORECAST_COLUMN_PREFIX = 'forecast_probability_buffer'
GRID_ROWS_IN_POLYGON_COLUMN_PREFIX = 'grid_rows_in_buffer'
GRID_COLUMNS_IN_POLYGON_COLUMN_PREFIX = 'grid_columns_in_buffer'
COLUMN_PREFIXES = [
LATLNG_POLYGON_COLUMN_PREFIX, XY_POLYGON_COLUMN_PREFIX,
FORECAST_COLUMN_PREFIX, GRID_ROWS_IN_POLYGON_COLUMN_PREFIX,
GRID_COLUMNS_IN_POLYGON_COLUMN_PREFIX
]
LATLNG_POLYGON_COLUMN_TYPE = 'latlng'
XY_POLYGON_COLUMN_TYPE = 'xy'
FORECAST_COLUMN_TYPE = 'forecast'
GRID_ROWS_IN_POLYGON_COLUMN_TYPE = 'grid_rows_in_polygon'
GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE = 'grid_columns_in_polygon'
COLUMN_TYPES = [
LATLNG_POLYGON_COLUMN_TYPE, XY_POLYGON_COLUMN_TYPE, FORECAST_COLUMN_TYPE,
GRID_ROWS_IN_POLYGON_COLUMN_TYPE, GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE
]
SPEED_COLUMN = 'speed_m_s01'
GEOGRAPHIC_BEARING_COLUMN = 'geographic_bearing_deg'
def _check_smoothing_method(smoothing_method):
"""Ensures that smoothing method is valid.
:param smoothing_method: String name for smoothing method.
:raises: ValueError: if `smoothing_method not in VALID_SMOOTHING_METHODS`.
"""
error_checking.assert_is_string(smoothing_method)
if smoothing_method not in VALID_SMOOTHING_METHODS:
error_string = (
'\n{0:s}\nValid smoothing methods (listed above) do not include '
'"{1:s}".'
).format(str(VALID_SMOOTHING_METHODS), smoothing_method)
raise ValueError(error_string)
def _column_name_to_buffer(column_name):
"""Parses distance buffer from column name.
If distance buffer cannot be found in column name, returns None for all
outputs.
:param column_name: Name of column. This column may contain x-y polygons,
lat-long polygons, or forecast probabilities.
:return: min_buffer_dist_metres: Minimum buffer distance.
:return: max_buffer_dist_metres: Maximum buffer distance.
"""
this_column_name = copy.deepcopy(column_name)
for this_prefix in COLUMN_PREFIXES:
this_column_name = this_column_name.replace(
this_prefix, LATLNG_POLYGON_COLUMN_PREFIX)
return tracking_utils.column_name_to_buffer(this_column_name)
def _buffer_to_column_name(
min_buffer_dist_metres, max_buffer_dist_metres, column_type):
"""Generates column name for distance buffer.
:param min_buffer_dist_metres: Minimum buffer distance.
:param max_buffer_dist_metres: Max buffer distance.
:param column_type: Column type (may be any string in `COLUMN_TYPES`).
:return: column_name: Name of column.
"""
column_name = tracking_utils.buffer_to_column_name(
min_distance_metres=min_buffer_dist_metres,
max_distance_metres=max_buffer_dist_metres)
if column_type == LATLNG_POLYGON_COLUMN_TYPE:
return column_name
if column_type == XY_POLYGON_COLUMN_TYPE:
return column_name.replace(
LATLNG_POLYGON_COLUMN_PREFIX, XY_POLYGON_COLUMN_PREFIX)
if column_type == FORECAST_COLUMN_TYPE:
return column_name.replace(
LATLNG_POLYGON_COLUMN_PREFIX, FORECAST_COLUMN_PREFIX)
if column_type == GRID_ROWS_IN_POLYGON_COLUMN_TYPE:
return column_name.replace(
LATLNG_POLYGON_COLUMN_PREFIX, GRID_ROWS_IN_POLYGON_COLUMN_PREFIX)
if column_type == GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE:
return column_name.replace(
LATLNG_POLYGON_COLUMN_PREFIX, GRID_COLUMNS_IN_POLYGON_COLUMN_PREFIX)
return None
def _get_distance_buffer_columns(storm_object_table, column_type):
"""Returns all column names corresponding to distance buffers.
:param storm_object_table: pandas DataFrame.
:param column_type: Column type (may be any string in `COLUMN_TYPES`).
:return: dist_buffer_column_names: 1-D list of column names corresponding to
distance buffers. If there are no columns with distance buffers,
returns None.
"""
column_names = list(storm_object_table)
dist_buffer_column_names = None
for this_column_name in column_names:
if (column_type == XY_POLYGON_COLUMN_TYPE
and XY_POLYGON_COLUMN_PREFIX not in this_column_name):
continue
if (column_type == LATLNG_POLYGON_COLUMN_TYPE
and LATLNG_POLYGON_COLUMN_PREFIX not in this_column_name):
continue
if (column_type == FORECAST_COLUMN_TYPE
and FORECAST_COLUMN_PREFIX not in this_column_name):
continue
if (column_type == GRID_ROWS_IN_POLYGON_COLUMN_TYPE
and GRID_ROWS_IN_POLYGON_COLUMN_PREFIX not in this_column_name):
continue
if (column_type == GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE
and GRID_COLUMNS_IN_POLYGON_COLUMN_PREFIX not in
this_column_name):
continue
_, this_max_distance_metres = _column_name_to_buffer(
this_column_name)
if this_max_distance_metres is None:
continue
if dist_buffer_column_names is None:
dist_buffer_column_names = [this_column_name]
else:
dist_buffer_column_names.append(this_column_name)
return dist_buffer_column_names
def _check_distance_buffers(min_distances_metres, max_distances_metres):
"""Ensures that distance buffers are unique and abutting.
B = number of distance buffers
:param min_distances_metres: length-B numpy array of minimum buffer
distances.
:param max_distances_metres: length-B numpy array of max buffer distances.
:raises: ValueError: if distance buffers are non-unique or non-abutting.
"""
error_checking.assert_is_numpy_array(
min_distances_metres, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(
min_distances_metres, 0., allow_nan=True)
num_buffers = len(min_distances_metres)
these_expected_dim = numpy.array([num_buffers], dtype=int)
error_checking.assert_is_numpy_array(
max_distances_metres, exact_dimensions=these_expected_dim)
sort_indices = numpy.argsort(max_distances_metres)
sorted_min_distances_metres = numpy.round(
min_distances_metres[sort_indices]
)
sorted_max_distances_metres = numpy.round(
max_distances_metres[sort_indices]
)
for j in range(num_buffers):
if numpy.isnan(sorted_min_distances_metres[j]):
error_checking.assert_is_geq(sorted_max_distances_metres[j], 0.)
else:
error_checking.assert_is_greater(
sorted_max_distances_metres[j], sorted_min_distances_metres[j]
)
if (j != 0 and sorted_min_distances_metres[j]
!= sorted_max_distances_metres[j - 1]):
error_string = (
'Minimum distance for {0:d}th buffer ({1:d} m) does not equal '
'max distance for {2:d}th buffer ({3:d} m). This means the two'
' distance buffers are not abutting.'
).format(
j + 1, int(sorted_min_distances_metres[j]), j,
int(sorted_max_distances_metres[j - 1])
)
raise ValueError(error_string)
def _polygons_from_latlng_to_xy(
storm_object_table, projection_object=DEFAULT_PROJECTION_OBJECT):
"""Projects distance buffers around each storm object from lat-long to x-y.
N = number of storm objects
B = number of distance buffers around each storm object
:param storm_object_table: N-row pandas DataFrame. Each row contains the
polygons for distance buffers around one storm object. For the [j]th
distance buffer, required column is given by the following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="latlng")
:param projection_object: See doc for `polygons.project_latlng_to_xy`.
:return: storm_object_table: Same as input but with additional columns. For
the [j]th distance buffer, new column is given by the following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="xy")
"""
buffer_column_names_latlng = _get_distance_buffer_columns(
storm_object_table=storm_object_table,
column_type=LATLNG_POLYGON_COLUMN_TYPE)
num_buffers = len(buffer_column_names_latlng)
min_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
max_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
buffer_column_names_xy = [''] * num_buffers
for j in range(num_buffers):
min_buffer_distances_metres[j], max_buffer_distances_metres[j] = (
_column_name_to_buffer(buffer_column_names_latlng[j])
)
buffer_column_names_xy[j] = _buffer_to_column_name(
min_buffer_distances_metres[j], max_buffer_distances_metres[j],
column_type=XY_POLYGON_COLUMN_TYPE)
num_storm_objects = len(storm_object_table.index)
object_array = numpy.full(num_storm_objects, numpy.nan, dtype=object)
for j in range(num_buffers):
storm_object_table = storm_object_table.assign(**{
buffer_column_names_xy[j]: object_array
})
for i in range(num_storm_objects):
for j in range(num_buffers):
storm_object_table[buffer_column_names_xy[j]].values[i], _ = (
polygons.project_latlng_to_xy(
polygon_object_latlng=storm_object_table[
buffer_column_names_latlng[j]
].values[i],
projection_object=projection_object,
false_easting_metres=0., false_northing_metres=0.)
)
return storm_object_table
def _create_default_xy_grid(x_spacing_metres, y_spacing_metres):
"""Creates default x-y grid.
M = number of rows in grid
N = number of columns in grid
:param x_spacing_metres: Spacing between adjacent grid points in x-direction
(i.e., between adjacent columns).
:param y_spacing_metres: Spacing between adjacent grid points in y-direction
(i.e., between adjacent rows).
:return: grid_points_x_metres: length-N numpy array with x-coordinates of
grid points.
:return: grid_points_y_metres: length-M numpy array with y-coordinates of
grid points.
"""
rap_x_coords_metres, rap_y_coords_metres = (
nwp_model_utils.get_xy_grid_points(
model_name=nwp_model_utils.RAP_MODEL_NAME,
grid_name=nwp_model_utils.NAME_OF_130GRID)
)
false_easting_metres, false_northing_metres = (
nwp_model_utils.get_false_easting_and_northing(
model_name=nwp_model_utils.RAP_MODEL_NAME,
grid_name=nwp_model_utils.NAME_OF_130GRID)
)
rap_x_coords_metres -= false_easting_metres
rap_y_coords_metres -= false_northing_metres
x_min_metres = rap_x_coords_metres[50]
x_max_metres = rap_x_coords_metres[-30]
y_min_metres = rap_y_coords_metres[40]
y_max_metres = rap_y_coords_metres[-70]
x_min_metres = rounder.floor_to_nearest(x_min_metres, x_spacing_metres)
x_max_metres = rounder.ceiling_to_nearest(x_max_metres, x_spacing_metres)
y_min_metres = rounder.floor_to_nearest(y_min_metres, y_spacing_metres)
y_max_metres = rounder.ceiling_to_nearest(y_max_metres, y_spacing_metres)
num_rows = 1 + int(numpy.round(
(y_max_metres - y_min_metres) / y_spacing_metres
))
num_columns = 1 + int(numpy.round(
(x_max_metres - x_min_metres) / x_spacing_metres
))
return grids.get_xy_grid_points(
x_min_metres=x_min_metres, y_min_metres=y_min_metres,
x_spacing_metres=x_spacing_metres, y_spacing_metres=y_spacing_metres,
num_rows=num_rows, num_columns=num_columns)
def _create_xy_grid(storm_object_table, x_spacing_metres, y_spacing_metres,
max_lead_time_sec):
"""Creates x-y grid encompassing all storm objects.
M = number of grid rows (unique grid-point y-coordinates)
N = number of grid columns (unique grid-point x-coordinates)
:param storm_object_table: pandas DataFrame. Each row contains the polygons
and forecast probabilities for distance buffers around one storm object.
For the [j]th distance buffer, required columns are given by the
following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="xy")
:param x_spacing_metres: Spacing between adjacent grid points in x-direction
(i.e., between adjacent columns).
:param y_spacing_metres: Spacing between adjacent grid points in y-direction
(i.e., between adjacent rows).
:param max_lead_time_sec: Max lead time for which gridded forecasts will be
created.
:return: grid_points_x_metres: length-N numpy array with x-coordinates of
grid points.
:return: grid_points_y_metres: length-M numpy array with y-coordinates of
grid points.
"""
buffer_column_names_xy = _get_distance_buffer_columns(
storm_object_table=storm_object_table,
column_type=XY_POLYGON_COLUMN_TYPE)
x_min_metres = numpy.inf
x_max_metres = -numpy.inf
y_min_metres = numpy.inf
y_max_metres = -numpy.inf
num_buffers = len(buffer_column_names_xy)
num_storm_objects = len(storm_object_table.index)
for i in range(num_storm_objects):
for j in range(num_buffers):
this_polygon_object = storm_object_table[
buffer_column_names_xy[j]
].values[i]
these_x_metres = numpy.array(this_polygon_object.exterior.xy[0])
these_y_metres = numpy.array(this_polygon_object.exterior.xy[1])
x_min_metres = min([x_min_metres, numpy.min(these_x_metres)])
x_max_metres = max([x_max_metres, numpy.max(these_x_metres)])
y_min_metres = min([y_min_metres, numpy.min(these_y_metres)])
y_max_metres = max([y_max_metres, numpy.max(these_y_metres)])
x_min_metres = x_min_metres - MAX_STORM_SPEED_M_S01 * max_lead_time_sec
x_max_metres = x_max_metres + MAX_STORM_SPEED_M_S01 * max_lead_time_sec
y_min_metres = y_min_metres - MAX_STORM_SPEED_M_S01 * max_lead_time_sec
y_max_metres = y_max_metres + MAX_STORM_SPEED_M_S01 * max_lead_time_sec
x_min_metres = rounder.floor_to_nearest(x_min_metres, x_spacing_metres)
x_max_metres = rounder.ceiling_to_nearest(x_max_metres, x_spacing_metres)
y_min_metres = rounder.floor_to_nearest(y_min_metres, y_spacing_metres)
y_max_metres = rounder.ceiling_to_nearest(y_max_metres, y_spacing_metres)
num_rows = 1 + int(numpy.round(
(y_max_metres - y_min_metres) / y_spacing_metres
))
num_columns = 1 + int(numpy.round(
(x_max_metres - x_min_metres) / x_spacing_metres
))
return grids.get_xy_grid_points(
x_min_metres=x_min_metres, y_min_metres=y_min_metres,
x_spacing_metres=x_spacing_metres, y_spacing_metres=y_spacing_metres,
num_rows=num_rows, num_columns=num_columns)
def _create_latlng_grid(
grid_points_x_metres, grid_points_y_metres, latitude_spacing_deg,
longitude_spacing_deg, projection_object=DEFAULT_PROJECTION_OBJECT):
"""Creates a lat-long grid encompassing the original x-y grid.
M_xy = number of rows (unique grid-point y-coordinates) in original grid
N_xy = number of columns (unique grid-point x-coordinates) in original grid
M_ll = number of rows (unique grid-point latitudes) in new grid
N_ll = number of columns (unique grid-point longitudes) in new grid
:param grid_points_x_metres: numpy array (length N_xy) with x-coordinates of
original grid points.
:param grid_points_y_metres: numpy array (length M_xy) with y-coordinates of
original grid points.
:param latitude_spacing_deg: Spacing between latitudinally adjacent grid
points (i.e., between adjacent rows).
:param longitude_spacing_deg: Spacing between longitudinally adjacent grid
points (i.e., between adjacent columns).
:param projection_object: See doc for `projections.project_xy_to_latlng`.
:return: grid_point_latitudes_deg: numpy array (length M_ll) with latitudes
(deg N) of new grid points.
:return: grid_point_longitudes_deg: numpy array (length N_ll) with
longitudes (deg E) of new grid points.
"""
grid_point_x_matrix_metres, grid_point_y_matrix_metres = (
grids.xy_vectors_to_matrices(
x_unique_metres=grid_points_x_metres,
y_unique_metres=grid_points_y_metres)
)
latitude_matrix_deg, longitude_matrix_deg = (
projections.project_xy_to_latlng(
x_coords_metres=grid_point_x_matrix_metres,
y_coords_metres=grid_point_y_matrix_metres,
projection_object=projection_object,
false_easting_metres=0., false_northing_metres=0.)
)
min_latitude_deg = rounder.floor_to_nearest(
numpy.min(latitude_matrix_deg), latitude_spacing_deg
)
max_latitude_deg = rounder.ceiling_to_nearest(
numpy.max(latitude_matrix_deg), latitude_spacing_deg
)
min_longitude_deg = rounder.floor_to_nearest(
numpy.min(longitude_matrix_deg), longitude_spacing_deg
)
max_longitude_deg = rounder.ceiling_to_nearest(
numpy.max(longitude_matrix_deg), longitude_spacing_deg
)
num_rows = 1 + int(numpy.round(
(max_latitude_deg - min_latitude_deg) / latitude_spacing_deg
))
num_columns = 1 + int(numpy.round(
(max_longitude_deg - min_longitude_deg) / longitude_spacing_deg
))
return grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg, min_longitude_deg=min_longitude_deg,
lat_spacing_deg=latitude_spacing_deg,
lng_spacing_deg=longitude_spacing_deg, num_rows=num_rows,
num_columns=num_columns)
def _interp_probabilities_to_latlng_grid(
probability_matrix_xy, grid_points_x_metres, grid_points_y_metres,
latitude_spacing_deg, longitude_spacing_deg,
projection_object=DEFAULT_PROJECTION_OBJECT):
"""Interpolates forecast probabilities from x-y to lat-long grid.
M_xy = number of rows (unique grid-point y-coordinates) in original grid
N_xy = number of columns (unique grid-point x-coordinates) in original grid
M_ll = number of rows (unique grid-point latitudes) in new grid
N_ll = number of columns (unique grid-point longitudes) in new grid
:param probability_matrix_xy: numpy array (M_xy by N_xy) of forecast
probabilities on original grid.
:param grid_points_x_metres: numpy array (length N_xy) with x-coordinates of
original grid points.
:param grid_points_y_metres: numpy array (length M_xy) with y-coordinates of
original grid points.
:param latitude_spacing_deg: Spacing between latitudinally adjacent grid
points (i.e., between adjacent rows).
:param longitude_spacing_deg: Spacing between longitudinally adjacent grid
points (i.e., between adjacent columns).
:param projection_object: See doc for `projections.project_xy_to_latlng`.
:return: probability_matrix_latlng: numpy array (M_ll by N_ll) of forecast
probabilities on new grid.
:return: grid_point_latitudes_deg: numpy array (length M_ll) with latitudes
(deg N) of new grid points.
:return: grid_point_longitudes_deg: numpy array (length N_ll) with
longitudes (deg E) of new grid points.
"""
grid_point_latitudes_deg, grid_point_longitudes_deg = _create_latlng_grid(
grid_points_x_metres=grid_points_x_metres,
grid_points_y_metres=grid_points_y_metres,
projection_object=projection_object,
latitude_spacing_deg=latitude_spacing_deg,
longitude_spacing_deg=longitude_spacing_deg)
grid_point_lat_matrix_deg, grid_point_lng_matrix_deg = (
grids.latlng_vectors_to_matrices(
unique_latitudes_deg=grid_point_latitudes_deg,
unique_longitudes_deg=grid_point_longitudes_deg)
)
latlng_grid_x_matrix_metres, latlng_grid_y_matrix_metres = (
projections.project_latlng_to_xy(
grid_point_lat_matrix_deg, grid_point_lng_matrix_deg,
projection_object=projection_object,
false_easting_metres=0., false_northing_metres=0.)
)
num_latlng_grid_rows = len(grid_point_latitudes_deg)
num_latlng_grid_columns = len(grid_point_longitudes_deg)
num_latlng_grid_points = num_latlng_grid_rows * num_latlng_grid_columns
latlng_grid_x_vector_metres = numpy.reshape(
latlng_grid_x_matrix_metres, num_latlng_grid_points)
latlng_grid_y_vector_metres = numpy.reshape(
latlng_grid_y_matrix_metres, num_latlng_grid_points)
probability_vector_latlng = interp.interp_from_xy_grid_to_points(
input_matrix=probability_matrix_xy,
sorted_grid_point_x_metres=grid_points_x_metres,
sorted_grid_point_y_metres=grid_points_y_metres,
query_x_coords_metres=latlng_grid_x_vector_metres,
query_y_coords_metres=latlng_grid_y_vector_metres,
method_string=interp.NEAREST_NEIGHBOUR_METHOD_STRING, extrapolate=True)
probability_matrix_latlng = numpy.reshape(
probability_vector_latlng,
(num_latlng_grid_rows, num_latlng_grid_columns)
)
return (probability_matrix_latlng, grid_point_latitudes_deg,
grid_point_longitudes_deg)
def _normalize_probs_by_polygon_area(
storm_object_table, prob_radius_for_grid_metres):
"""Normalizes each forecast probability by area of the attached polygon.
Specifically, this method applies the following equation:
f_norm = 1 - (1 - f_polygon)^(pi * r^2 / A)
f_polygon = forecast probability of event occurring within the polygon
A = area of the polygon
r = `prob_radius_for_grid_metres`
f_norm = forecast probability of event occurring within radius r
Also:
N = number of storm objects
B = number of distance buffers around each storm object
:param storm_object_table: N-row pandas DataFrame. Each row contains the
polygons and forecast probabilities for distance buffers around one
storm object. For the [j]th distance buffer, required columns are given
by the following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="xy")
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="forecast")
:param prob_radius_for_grid_metres: Effective radius for gridded
probabilities. For example, if the gridded value is "probability within
10 km of a point," this should be 10 000.
:return: storm_object_table: Same as input, except that probabilities are
normalized.
"""
buffer_column_names_xy = _get_distance_buffer_columns(
storm_object_table=storm_object_table,
column_type=XY_POLYGON_COLUMN_TYPE)
num_buffers = len(buffer_column_names_xy)
min_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
max_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
forecast_column_names = [''] * num_buffers
for j in range(num_buffers):
min_buffer_distances_metres[j], max_buffer_distances_metres[j] = (
_column_name_to_buffer(buffer_column_names_xy[j])
)
forecast_column_names[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=FORECAST_COLUMN_TYPE)
num_storm_objects = len(storm_object_table.index)
prob_area_for_grid_metres2 = numpy.pi * prob_radius_for_grid_metres ** 2
for j in range(num_buffers):
these_areas_metres2 = numpy.array([
storm_object_table[buffer_column_names_xy[j]].values[i].area
for i in range(num_storm_objects)
])
these_original_probs = storm_object_table[
forecast_column_names[j]
].values
these_normalized_probs = 1. - numpy.power(
1. - these_original_probs,
prob_area_for_grid_metres2 / these_areas_metres2
)
storm_object_table = storm_object_table.assign(**{
forecast_column_names[j]: these_normalized_probs
})
return storm_object_table
def _storm_motion_from_uv_to_speed_direction(storm_object_table):
"""For each storm object, converts motion from u-v to speed-direction.
N = number of storm objects
:param storm_object_table: N-row pandas DataFrame with the following
columns.
storm_object_table.east_velocity_m_s01: Eastward velocity of storm object
(metres per second).
storm_object_table.north_velocity_m_s01: Northward velocity of storm object
(metres per second).
:return: storm_object_table: N-row pandas DataFrame with the following
columns.
storm_object_table.speed_m_s01: Storm speed (magnitude of velocity) in m/s.
storm_object_table.geographic_bearing_deg: Storm bearing in geographic
degrees (clockwise from due north).
"""
storm_speeds_m_s01, geodetic_bearings_deg = (
geodetic_utils.xy_to_scalar_displacements_and_bearings(
x_displacements_metres=storm_object_table[
tracking_utils.EAST_VELOCITY_COLUMN].values,
y_displacements_metres=storm_object_table[
tracking_utils.NORTH_VELOCITY_COLUMN].values)
)
storm_object_table = storm_object_table.assign(**{
SPEED_COLUMN: storm_speeds_m_s01,
GEOGRAPHIC_BEARING_COLUMN: geodetic_bearings_deg
})
return storm_object_table.drop(
[tracking_utils.EAST_VELOCITY_COLUMN,
tracking_utils.NORTH_VELOCITY_COLUMN],
axis=1, inplace=False
)
def _extrapolate_polygons(
storm_object_table, lead_time_seconds,
projection_object=DEFAULT_PROJECTION_OBJECT):
"""For each storm object, extrapolates distance buffers forward in time.
N = number of storm objects
:param storm_object_table: N-row pandas DataFrame. Each row contains data
for distance buffers around one storm object. For the [j]th distance
buffer, required columns are given by the following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="latlng")
Other required columns are listed below.
storm_object_table.speed_m_s01: Storm speed (magnitude of velocity) in m/s.
storm_object_table.geographic_bearing_deg: Storm bearing in geographic
degrees (clockwise from due north).
:param lead_time_seconds: Lead time. Polygons will be extrapolated this far
into the future.
:param projection_object: See doc for `_polygons_from_latlng_to_xy`.
:return: extrap_storm_object_table: N-row pandas DataFrame. Each row
contains extrapolated polygons for distance buffers around one storm
object. For the [j]th distance buffer, columns are given by the
following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="xy")
"""
buffer_column_names_latlng = _get_distance_buffer_columns(
storm_object_table=storm_object_table,
column_type=LATLNG_POLYGON_COLUMN_TYPE)
num_storm_objects = len(storm_object_table.index)
object_array = numpy.full(num_storm_objects, numpy.nan, dtype=object)
num_buffers = len(buffer_column_names_latlng)
extrap_storm_object_table = None
for j in range(num_buffers):
if extrap_storm_object_table is None:
extrap_storm_object_table = pandas.DataFrame.from_dict({
buffer_column_names_latlng[j]: object_array
})
else:
extrap_storm_object_table = extrap_storm_object_table.assign(**{
buffer_column_names_latlng[j]: object_array
})
for j in range(num_buffers):
these_first_vertex_lat_deg = [
storm_object_table[buffer_column_names_latlng[j]].values[
i].exterior.xy[1][0]
for i in range(num_storm_objects)
]
these_first_vertex_lng_deg = [
storm_object_table[buffer_column_names_latlng[j]].values[
i].exterior.xy[0][0]
for i in range(num_storm_objects)
]
these_first_vertex_lat_deg = numpy.array(these_first_vertex_lat_deg)
these_first_vertex_lng_deg = numpy.array(these_first_vertex_lng_deg)
these_extrap_lat_deg, these_extrap_lng_deg = (
geodetic_utils.start_points_and_displacements_to_endpoints(
start_latitudes_deg=these_first_vertex_lat_deg,
start_longitudes_deg=these_first_vertex_lng_deg,
scalar_displacements_metres=
storm_object_table[SPEED_COLUMN].values * lead_time_seconds,
geodetic_bearings_deg=
storm_object_table[GEOGRAPHIC_BEARING_COLUMN].values)
)
these_lat_diffs_deg = these_extrap_lat_deg - these_first_vertex_lat_deg
these_lng_diffs_deg = these_extrap_lng_deg - these_first_vertex_lng_deg
for i in range(num_storm_objects):
these_new_latitudes_deg = these_lat_diffs_deg[i] + numpy.array(
storm_object_table[
buffer_column_names_latlng[j]].values[i].exterior.xy[1]
)
these_new_longitudes_deg = these_lng_diffs_deg[i] + numpy.array(
storm_object_table[
buffer_column_names_latlng[j]].values[i].exterior.xy[0]
)
extrap_storm_object_table[
buffer_column_names_latlng[j]
].values[i] = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=these_new_longitudes_deg,
exterior_y_coords=these_new_latitudes_deg)
return _polygons_from_latlng_to_xy(
storm_object_table=extrap_storm_object_table,
projection_object=projection_object)
def _find_min_value_greater_or_equal(sorted_input_array, test_value):
"""Finds minimum value in array that is >= test value.
:param sorted_input_array: Input array. Must be sorted in ascending order.
:param test_value: Test value (scalar).
:return: min_value_geq_test: Minimum of value input_array that is >=
test_value.
:return: min_index_geq_test: Array index of min_value_geq_test in
input_array. If min_index_geq_test = i, this means that
min_value_geq_test = sorted_input_array[i].
"""
# TODO(thunderhoser): Put this method somewhere else. It applies to many
# more things than gridded forecasting.
min_index_geq_test = numpy.searchsorted(
sorted_input_array, numpy.array([test_value]), side='left'
)[0]
return sorted_input_array[min_index_geq_test], min_index_geq_test
def _find_max_value_less_than_or_equal(sorted_input_array, test_value):
"""Finds maximum value in array that is <= test value.
:param sorted_input_array: Input array. Must be sorted in ascending order.
:param test_value: Test value (scalar).
:return: max_value_leq_test: Max value of input_array that is <= test_value.
:return: max_index_leq_test: Array index of max_value_leq_test in
input_array. If max_index_leq_test = i, this means that
max_value_leq_test = sorted_input_array[i].
"""
# TODO(thunderhoser): Put this method somewhere else. It applies to many
# more things than gridded forecasting.
max_index_leq_test = -1 + numpy.searchsorted(
sorted_input_array, numpy.array([test_value]), side='right'
)[0]
return sorted_input_array[max_index_leq_test], max_index_leq_test
def _find_grid_points_in_polygon(
polygon_object_xy, grid_points_x_metres, grid_points_y_metres):
"""Finds grid points in polygon.
M = number of rows (unique grid-point y-coordinates)
N = number of columns (unique grid-point x-coordinates)
P = number of grid points in polygon
:param polygon_object_xy: Instance of `shapely.geometry.Polygon` with
vertices in x-y coordinates (metres).
:param grid_points_x_metres: length-N numpy array with x-coordinates of grid
points. Must be sorted in ascending order.
:param grid_points_y_metres: length-M numpy array with y-coordinates of grid
points. Must be sorted in ascending order.
:return: rows_in_polygon: length-P integer numpy array of rows in polygon.
:return: columns_in_polygon: length-P integer numpy array of columns in
polygon.
"""
min_x_in_polygon_metres = numpy.min(numpy.array(
polygon_object_xy.exterior.xy[0]
))
max_x_in_polygon_metres = numpy.max(numpy.array(
polygon_object_xy.exterior.xy[0]
))
min_y_in_polygon_metres = numpy.min(numpy.array(
polygon_object_xy.exterior.xy[1]
))
max_y_in_polygon_metres = numpy.max(numpy.array(
polygon_object_xy.exterior.xy[1]
))
_, min_row_to_test = _find_min_value_greater_or_equal(
grid_points_y_metres, min_y_in_polygon_metres)
_, max_row_to_test = _find_max_value_less_than_or_equal(
grid_points_y_metres, max_y_in_polygon_metres)
_, min_column_to_test = _find_min_value_greater_or_equal(
grid_points_x_metres, min_x_in_polygon_metres)
_, max_column_to_test = _find_max_value_less_than_or_equal(
grid_points_x_metres, max_x_in_polygon_metres)
rows_in_polygon = []
columns_in_polygon = []
for this_row in range(min_row_to_test, max_row_to_test + 1):
for this_column in range(min_column_to_test, max_column_to_test + 1):
this_flag = polygons.point_in_or_on_polygon(
polygon_object=polygon_object_xy,
query_x_coordinate=grid_points_x_metres[this_column],
query_y_coordinate=grid_points_y_metres[this_row]
)
if not this_flag:
continue
rows_in_polygon.append(this_row)
columns_in_polygon.append(this_column)
rows_in_polygon = numpy.array(rows_in_polygon, dtype=int)
columns_in_polygon = numpy.array(columns_in_polygon, dtype=int)
return rows_in_polygon, columns_in_polygon
def _polygons_to_grid_points(
storm_object_table, grid_points_x_metres, grid_points_y_metres):
"""Finds grid points in each polygon.
M = number of rows (unique grid-point y-coordinates)
N = number of columns (unique grid-point x-coordinates)
P = number of grid points in a given polygon
:param storm_object_table: pandas DataFrame. Each row contains the polygons
for distance buffers around one storm object. For the [j]th distance
buffer, required column is given by the following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="xy")
:param grid_points_x_metres: length-N numpy array with x-coordinates of grid
points. Must be sorted in ascending order.
:param grid_points_y_metres: length-M numpy array with y-coordinates of grid
points. Must be sorted in ascending order.
:return: storm_object_table: Same as input but with additional columns. For
the [j]th distance buffer, new columns are given by the following
command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="grid_rows_in_polygon")
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j],
column_type="grid_columns_in_polygon")
"""
xy_buffer_column_names = _get_distance_buffer_columns(
storm_object_table=storm_object_table,
column_type=XY_POLYGON_COLUMN_TYPE)
num_buffers = len(xy_buffer_column_names)
min_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
max_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
grid_rows_in_buffer_column_names = [''] * num_buffers
grid_columns_in_buffer_column_names = [''] * num_buffers
for j in range(num_buffers):
min_buffer_distances_metres[j], max_buffer_distances_metres[j] = (
_column_name_to_buffer(xy_buffer_column_names[j])
)
grid_rows_in_buffer_column_names[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=GRID_ROWS_IN_POLYGON_COLUMN_TYPE)
grid_columns_in_buffer_column_names[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE)
nested_array = storm_object_table[[
xy_buffer_column_names[0], xy_buffer_column_names[0]
]].values.tolist()
for j in range(num_buffers):
storm_object_table = storm_object_table.assign(**{
grid_rows_in_buffer_column_names[j]: nested_array
})
storm_object_table = storm_object_table.assign(**{
grid_columns_in_buffer_column_names[j]: nested_array
})
num_storm_objects = len(storm_object_table.index)
for i in range(num_storm_objects):
for j in range(num_buffers):
these_grid_rows, these_grid_columns = _find_grid_points_in_polygon(
polygon_object_xy=storm_object_table[
xy_buffer_column_names[j]].values[i],
grid_points_x_metres=grid_points_x_metres,
grid_points_y_metres=grid_points_y_metres)
storm_object_table[
grid_rows_in_buffer_column_names[j]
].values[i] = these_grid_rows
storm_object_table[
grid_columns_in_buffer_column_names[j]
].values[i] = these_grid_columns
return storm_object_table
def _extrap_polygons_to_grid_points(
orig_storm_object_table, extrap_storm_object_table,
grid_spacing_x_metres, grid_spacing_y_metres):
"""Finds grid points in each extrapolated polygon.
M = number of rows (unique grid-point y-coordinates)
N = number of columns (unique grid-point x-coordinates)
P = number of grid points in a given polygon
K = number of storm objects
t_0 = initial time (valid time of all storm objects)
t_L = lead time
:param orig_storm_object_table: K-row pandas DataFrame. Each row contains
data for distance buffers around one storm object at t_0. For the [j]th
distance buffer, required columns are given by the following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="xy")
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="grid_rows_in_polygon")
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j],
column_type="grid_columns_in_polygon")
:param extrap_storm_object_table: K-row pandas DataFrame. Each row contains
polygons for distance buffers around one storm object, extrapolated to
(t_0 + t_L). For the [j]th distance buffer, required column is given by
the following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="xy")
:param grid_spacing_x_metres: Spacing between adjacent grid points in
x-direction (i.e., between adjacent columns).
:param grid_spacing_y_metres: Spacing between adjacent grid points in
y-direction (i.e., between adjacent rows).
:return: extrap_storm_object_table: Same as input but with additional
columns. For the [j]th distance buffer, new columns are given by the
following command:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="grid_rows_in_polygon")
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j],
column_type="grid_columns_in_polygon")
"""
xy_buffer_column_names = _get_distance_buffer_columns(
storm_object_table=orig_storm_object_table,
column_type=XY_POLYGON_COLUMN_TYPE)
num_buffers = len(xy_buffer_column_names)
min_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
max_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
grid_rows_in_buffer_column_names = [''] * num_buffers
grid_columns_in_buffer_column_names = [''] * num_buffers
for j in range(num_buffers):
min_buffer_distances_metres[j], max_buffer_distances_metres[j] = (
_column_name_to_buffer(xy_buffer_column_names[j]))
grid_rows_in_buffer_column_names[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=GRID_ROWS_IN_POLYGON_COLUMN_TYPE)
grid_columns_in_buffer_column_names[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE)
nested_array = extrap_storm_object_table[[
xy_buffer_column_names[0], xy_buffer_column_names[0]
]].values.tolist()
for j in range(num_buffers):
extrap_storm_object_table = extrap_storm_object_table.assign(**{
grid_rows_in_buffer_column_names[j]: nested_array
})
extrap_storm_object_table = extrap_storm_object_table.assign(**{
grid_columns_in_buffer_column_names[j]: nested_array
})
num_storm_objects = len(orig_storm_object_table.index)
for i in range(num_storm_objects):
for j in range(num_buffers):
this_orig_polygon_object = orig_storm_object_table[
xy_buffer_column_names[j]
].values[i]
this_extrap_polygon_object = extrap_storm_object_table[
xy_buffer_column_names[j]
].values[i]
this_x_diff_metres = (
numpy.array(this_extrap_polygon_object.exterior.xy[0])[0] -
numpy.array(this_orig_polygon_object.exterior.xy[0])[0]
)
this_y_diff_metres = (
numpy.array(this_extrap_polygon_object.exterior.xy[1])[0] -
numpy.array(this_orig_polygon_object.exterior.xy[1])[0]
)
this_row_diff = int(numpy.round(
this_y_diff_metres / grid_spacing_y_metres
))
this_column_diff = int(numpy.round(
this_x_diff_metres / grid_spacing_x_metres
))
this_name = grid_rows_in_buffer_column_names[j]
extrap_storm_object_table[this_name].values[i] = (
orig_storm_object_table[this_name].values[i] + this_row_diff
)
this_name = grid_columns_in_buffer_column_names[j]
extrap_storm_object_table[this_name].values[i] = (
orig_storm_object_table[this_name].values[i] + this_column_diff
)
return extrap_storm_object_table
def create_forecast_grids(
storm_object_table, min_lead_time_sec, max_lead_time_sec,
lead_time_resolution_sec=DEFAULT_LEAD_TIME_RES_SECONDS,
grid_spacing_x_metres=DEFAULT_GRID_SPACING_METRES,
grid_spacing_y_metres=DEFAULT_GRID_SPACING_METRES,
interp_to_latlng_grid=True,
latitude_spacing_deg=DEFAULT_GRID_SPACING_DEG,
longitude_spacing_deg=DEFAULT_GRID_SPACING_DEG,
prob_radius_for_grid_metres=DEFAULT_PROB_RADIUS_FOR_GRID_METRES,
smoothing_method=None,
smoothing_e_folding_radius_metres=
DEFAULT_SMOOTHING_E_FOLDING_RADIUS_METRES,
smoothing_cutoff_radius_metres=DEFAULT_SMOOTHING_CUTOFF_RADIUS_METRES):
"""For each time with at least one storm object, creates grid of fcst probs.
T = number of times with at least one storm object
= number of forecast-initialization times
M = number of rows in given forecast grid (different for each init time)
N = number of columns in given forecast grid (different for each init time)
:param storm_object_table: pandas DataFrame with columns listed below. Each
row corresponds to one storm object. For the [j]th distance buffer,
required columns are given by the following commands:
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="latlng")
_buffer_to_column_name(min_buffer_distances_metres[j],
max_buffer_distances_metres[j], column_type="forecast")
Other required columns are listed below.
storm_object_table.full_id_string: Full storm ID.
storm_object_table.valid_time_unix_sec: Valid time.
storm_object_table.centroid_latitude_deg: Latitude (deg N) of storm
centroid.
storm_object_table.centroid_longitude_deg: Longitude (deg E) of storm
centroid.
storm_object_table.east_velocity_m_s01: Eastward velocity of storm cell
(metres per second).
storm_object_table.north_velocity_m_s01: Northward velocity of storm cell
(metres per second).
:param min_lead_time_sec: Minimum lead time. For each time with at least
one storm object, gridded probabilities will be event probabilities for
`min_lead_time_sec`...`max_lead_time_sec` into the future.
:param max_lead_time_sec: See documentation for `min_lead_time_sec`.
:param lead_time_resolution_sec: Spacing between successive lead times. For
all lead times in {min_lead_time_sec,
min_lead_time_sec + lead_time_resolution_sec,
min_lead_time_sec + 2 * lead_time_resolution_sec, ...,
max_lead_time_sec}, storm objects will be extrapolated along their
respective motion vectors. Lower values of `lead_time_resolution_sec`
lead to smoother forecast grids.
:param grid_spacing_x_metres: Spacing between adjacent grid points in
x-direction (i.e., between adjacent columns).
:param grid_spacing_y_metres: Spacing between adjacent grid points in
y-direction (i.e., between adjacent rows).
:param interp_to_latlng_grid: Boolean flag. If True, the probability field
for each initial time will be saved as both an x-y grid and a lat-long
grid.
:param latitude_spacing_deg: Spacing between meridionally adjacent grid
points (i.e., between adjacent rows).
:param longitude_spacing_deg: Spacing between zonally adjacent grid points
(i.e., between adjacent columns).
:param prob_radius_for_grid_metres: Effective radius for gridded
probabilities. For example, if the gridded value is "probability within
10 km of a point," this should be 10 000.
:param smoothing_method: Smoothing method. For each initial time, smoother
will be applied to the final forecast probability field. Valid options
are "gaussian", "cressman", and None.
:param smoothing_e_folding_radius_metres: e-folding radius for Gaussian
smoother. See documentation for `grid_smoothing_2d.apply_gaussian`.
:param smoothing_cutoff_radius_metres: Cutoff radius for Gaussian or
Cressman smoother. See documentation for
`grid_smoothing_2d.apply_gaussian` or
`grid_smoothing_2d.apply_cressman`.
:return: gridded_forecast_dict: See doc for
`prediction_io.write_gridded_predictions`.
"""
# TODO(thunderhoser): Min and max lead time should be determined by params
# for target variable.
# TODO(thunderhoser): Effective radius should allow non-zero probs outside
# of polygon buffer. Or maybe allowing for uncertainty in the future track
# would handle this.
error_checking.assert_is_integer(min_lead_time_sec)
error_checking.assert_is_geq(min_lead_time_sec, 0)
error_checking.assert_is_integer(max_lead_time_sec)
error_checking.assert_is_greater(max_lead_time_sec, min_lead_time_sec)
error_checking.assert_is_integer(lead_time_resolution_sec)
error_checking.assert_is_greater(lead_time_resolution_sec, 0)
error_checking.assert_is_boolean(interp_to_latlng_grid)
error_checking.assert_is_greater(prob_radius_for_grid_metres, 0.)
if smoothing_method is not None:
_check_smoothing_method(smoothing_method)
num_lead_times = 1 + int(numpy.round(
float(max_lead_time_sec - min_lead_time_sec) /
lead_time_resolution_sec
))
lead_times_seconds = numpy.linspace(
min_lead_time_sec, max_lead_time_sec, num=num_lead_times, dtype=int)
latlng_buffer_columns = _get_distance_buffer_columns(
storm_object_table=storm_object_table,
column_type=LATLNG_POLYGON_COLUMN_TYPE)
num_buffers = len(latlng_buffer_columns)
min_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
max_buffer_distances_metres = numpy.full(num_buffers, numpy.nan)
xy_buffer_columns = [''] * num_buffers
buffer_forecast_columns = [''] * num_buffers
buffer_row_list_columns = [''] * num_buffers
buffer_column_list_columns = [''] * num_buffers
for j in range(num_buffers):
min_buffer_distances_metres[j], max_buffer_distances_metres[j] = (
_column_name_to_buffer(latlng_buffer_columns[j])
)
xy_buffer_columns[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=XY_POLYGON_COLUMN_TYPE)
buffer_forecast_columns[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=FORECAST_COLUMN_TYPE)
buffer_row_list_columns[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=GRID_ROWS_IN_POLYGON_COLUMN_TYPE)
buffer_column_list_columns[j] = _buffer_to_column_name(
min_buffer_dist_metres=min_buffer_distances_metres[j],
max_buffer_dist_metres=max_buffer_distances_metres[j],
column_type=GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE)
_check_distance_buffers(
min_distances_metres=min_buffer_distances_metres,
max_distances_metres=max_buffer_distances_metres)
storm_object_table = _storm_motion_from_uv_to_speed_direction(
storm_object_table)
init_times_unix_sec = numpy.unique(
storm_object_table[tracking_utils.TIME_COLUMN].values
)
init_time_strings = [
time_conversion.unix_sec_to_string(t, LOG_MESSAGE_TIME_FORMAT)
for t in init_times_unix_sec
]
grid_point_x_coords_metres, grid_point_y_coords_metres = (
_create_default_xy_grid(
x_spacing_metres=grid_spacing_x_metres,
y_spacing_metres=grid_spacing_y_metres)
)
num_xy_rows = len(grid_point_y_coords_metres)
num_xy_columns = len(grid_point_x_coords_metres)
num_init_times = len(init_time_strings)
gridded_forecast_dict = {
prediction_io.INIT_TIMES_KEY: init_times_unix_sec,
prediction_io.MIN_LEAD_TIME_KEY: min_lead_time_sec,
prediction_io.MAX_LEAD_TIME_KEY: max_lead_time_sec,
prediction_io.GRID_X_COORDS_KEY: grid_point_x_coords_metres,
prediction_io.GRID_Y_COORDS_KEY: grid_point_y_coords_metres,
prediction_io.XY_PROBABILITIES_KEY: [None] * num_init_times,
prediction_io.PROJECTION_KEY: DEFAULT_PROJECTION_OBJECT
}
if interp_to_latlng_grid:
gridded_forecast_dict.update({
prediction_io.LATLNG_PROBABILITIES_KEY: [None] * num_init_times
})
for i in range(num_init_times):
this_storm_object_table = storm_object_table.loc[
storm_object_table[tracking_utils.TIME_COLUMN] ==
init_times_unix_sec[i]
]
this_num_storm_objects = len(this_storm_object_table.index)
this_storm_object_table = _polygons_from_latlng_to_xy(
storm_object_table=this_storm_object_table)
this_storm_object_table = _normalize_probs_by_polygon_area(
storm_object_table=this_storm_object_table,
prob_radius_for_grid_metres=prob_radius_for_grid_metres)
this_storm_object_table = _polygons_to_grid_points(
storm_object_table=this_storm_object_table,
grid_points_x_metres=grid_point_x_coords_metres,
grid_points_y_metres=grid_point_y_coords_metres)
this_probability_matrix_xy = numpy.full(
(num_xy_rows, num_xy_columns), 0.
)
this_num_forecast_matrix = numpy.full(
(num_xy_rows, num_xy_columns), 0, dtype=int
)
for this_lead_time_sec in lead_times_seconds:
print((
'Updating forecast grid for initial time {0:s}, lead time {1:d}'
' seconds...'
).format(init_time_strings[i], this_lead_time_sec))
this_extrap_storm_object_table = _extrapolate_polygons(
storm_object_table=this_storm_object_table,
lead_time_seconds=this_lead_time_sec)
this_extrap_storm_object_table = _extrap_polygons_to_grid_points(
orig_storm_object_table=this_storm_object_table,
extrap_storm_object_table=this_extrap_storm_object_table,
grid_spacing_x_metres=grid_spacing_x_metres,
grid_spacing_y_metres=grid_spacing_y_metres)
for j in range(num_buffers):
for k in range(this_num_storm_objects):
these_rows = this_extrap_storm_object_table[
buffer_row_list_columns[j]
].values[k]
if len(these_rows) == 0: # Outside of grid.
continue
these_columns = this_extrap_storm_object_table[
buffer_column_list_columns[j]
].values[k]
this_num_forecast_matrix[these_rows, these_columns] += 1
this_probability_matrix_xy[these_rows, these_columns] = (
this_probability_matrix_xy[these_rows, these_columns] +
this_storm_object_table[
buffer_forecast_columns[j]
].values[k]
)
this_probability_matrix_xy = (
this_probability_matrix_xy / this_num_forecast_matrix
)
if smoothing_method is not None:
print('Smoothing forecast grid for initial time {0:s}...'.format(
init_time_strings[i]
))
if smoothing_method == GAUSSIAN_SMOOTHING_METHOD:
this_probability_matrix_xy = grid_smoothing_2d.apply_gaussian(
input_matrix=this_probability_matrix_xy,
grid_spacing_x=grid_spacing_x_metres,
grid_spacing_y=grid_spacing_y_metres,
e_folding_radius=smoothing_e_folding_radius_metres,
cutoff_radius=smoothing_cutoff_radius_metres)
elif smoothing_method == CRESSMAN_SMOOTHING_METHOD:
this_probability_matrix_xy = grid_smoothing_2d.apply_cressman(
input_matrix=this_probability_matrix_xy,
grid_spacing_x=grid_spacing_x_metres,
grid_spacing_y=grid_spacing_y_metres,
cutoff_radius=smoothing_cutoff_radius_metres)
# gridded_forecast_dict[prediction_io.XY_PROBABILITIES_KEY][i] = (
# scipy.sparse.csr_matrix(this_probability_matrix_xy)
# )
gridded_forecast_dict[prediction_io.XY_PROBABILITIES_KEY][i] = (
this_probability_matrix_xy
)
if not interp_to_latlng_grid:
if i != num_init_times - 1:
print(MINOR_SEPARATOR_STRING)
continue
print((
'Interpolating forecast to lat-long grid for initial time '
'{0:s}...'
).format(
init_time_strings[i]
))
if i != num_init_times - 1:
print(MINOR_SEPARATOR_STRING)
(this_prob_matrix_latlng, these_latitudes_deg, these_longitudes_deg
) = _interp_probabilities_to_latlng_grid(
probability_matrix_xy=this_probability_matrix_xy,
grid_points_x_metres=grid_point_x_coords_metres,
grid_points_y_metres=grid_point_y_coords_metres,
latitude_spacing_deg=latitude_spacing_deg,
longitude_spacing_deg=longitude_spacing_deg)
# gridded_forecast_dict[prediction_io.LATLNG_PROBABILITIES_KEY][i] = (
# scipy.sparse.csr_matrix(this_prob_matrix_latlng)
# )
gridded_forecast_dict[prediction_io.LATLNG_PROBABILITIES_KEY][i] = (
this_prob_matrix_latlng
)
if i != 0:
continue
gridded_forecast_dict.update({
prediction_io.GRID_LATITUDES_KEY: these_latitudes_deg,
prediction_io.GRID_LONGITUDES_KEY: these_longitudes_deg
})
return gridded_forecast_dict
| 41.236842 | 80 | 0.718856 |
56e49e0d3837092ddd568580e2fa534c4574a070 | 451 | py | Python | python/fundir/du_jindutiao.py | harkhuang/harkcode | 1c9802bfc8d599e20ee9082eca14165a782ddf85 | [
"MIT"
] | 3 | 2015-10-27T00:49:46.000Z | 2019-04-19T08:14:46.000Z | python/fundir/du_jindutiao.py | harkhuang/harkcode | 1c9802bfc8d599e20ee9082eca14165a782ddf85 | [
"MIT"
] | 1 | 2018-11-05T07:54:55.000Z | 2018-11-05T07:54:55.000Z | python/fundir/du_jindutiao.py | harkhuang/harkcode | 1c9802bfc8d599e20ee9082eca14165a782ddf85 | [
"MIT"
] | 1 | 2015-12-19T08:47:53.000Z | 2015-12-19T08:47:53.000Z | import sys
import time
# Output example: [======= ] 75%
# width defines bar width
# percent defines current percentage
def progress(width, percent):
print "%s %d%%\r" % (('%%-%ds' % width) % (width * percent / 100 * '='), percent),
if percent >= 100:
print
sys.stdout.flush()
# Simulate doing something ...
for i in xrange(100):
progress(50, (i + 1))
time.sleep(0.01) # Slow it down for demo
| 23.736842 | 87 | 0.563193 |
ec098986397d79df0f19a66fa4f98f19ff0f2d90 | 5,069 | py | Python | scripts/synmap/dag_tools.py | LyonsLab/coge | 1d9a8e84a8572809ee3260ede44290e14de3bdd1 | [
"BSD-2-Clause"
] | 37 | 2015-02-24T18:58:30.000Z | 2021-03-07T21:22:18.000Z | scripts/synmap/dag_tools.py | LyonsLab/coge | 1d9a8e84a8572809ee3260ede44290e14de3bdd1 | [
"BSD-2-Clause"
] | 12 | 2016-06-09T21:57:00.000Z | 2020-09-11T18:48:51.000Z | scripts/synmap/dag_tools.py | LyonsLab/coge | 1d9a8e84a8572809ee3260ede44290e14de3bdd1 | [
"BSD-2-Clause"
] | 19 | 2016-03-26T08:15:17.000Z | 2021-04-12T05:03:29.000Z | #!/usr/bin/env python
from operator import itemgetter
import numpy
import re
try: import psyco; pysco.full()
except: pass
def dag_array(dagf):
recs = [] #collections.defaultdict(list)
fh = open(dagf, 'r')
qname_len = 0
sname_len = 0
qchr_len = 0
schr_len = 0
for line in fh:
if line[0] == '#': continue
qchr, qname, qstart, qstop, schr, sname, sstart, sstop, score = line.rstrip("*,\n,+").split("\t")[:9]
if len(qchr) > qchr_len: qchr_len = len(qchr)
if len(schr) > schr_len: schr_len = len(schr)
if len(qname) > qname_len: qname_len = len(qname)
if len(sname) > sname_len: sname_len = len(sname)
if not (qname, sname) in recs: recs[(qname, sname)] = []
recs[(qname, sname)].append([qchr, qname, int(qstart), int(qstop), schr, sname, int(sstart), int(sstop), float(score)])
fh.close()
arr = []
for k in sorted(recs, key=itemgetter(1)):
arr.extend([li for li in sorted(recs[k], itemgetter(8))])
dag_names = ('qchr', 'qname', 'qstart', 'qstop', 'schr', 'sname', 'sstart', 'sstop', 'score')
dag_types = ['S', 'S', 'i4', 'i4', 'S', 'S', 'i4', 'i4', 'f8']
dag_types[0] += str(qchr_len)
dag_types[4] += str(schr_len)
dag_types[1] += str(qname_len)
dag_types[5] += str(sname_len)
return numpy.rec.array(arr, names=dag_names, formats=dag_types)
chrre = re.compile("(\d+)")
def get_chr(line):
try:
return re.search(chrre, line).groups(0)[0]
except:
print >>sys.stderr, line
sys.exit(2)
def blast_to_dag(blast_file, query, subject, qdups, sdups, get_chr=get_chr, condense=True):
if qdups:
qdups = frozenset([x.strip() for x in open(qdups)])
if sdups:
sdups = frozenset([x.strip() for x in open(sdups)])
#if query == subject: subject += "2"
qorg = query + "_"
sorg = subject + "_"
seen = {}
n_qdups = 0
n_sdups = 0
for line in open(blast_file):
line = line.split("\t")
if line[0] in qdups: n_qdups += 1; continue
if line[1] in sdups: n_sdups += 1; continue
if condense:
key = line[0] + line[1]
eval, score = map(float, line[-2:])
if key in seen and (seen[key][0] < eval and seen[key][1] > score): continue
seen[key] = (eval, score)
qinfo = line[0].split("||")
sinfo = line[1].split("||")
# it wast just the name
if len(qinfo) > 1:
qchr = qinfo[0]
qlocs = [l.lstrip('0') for l in qinfo[1:3]]
if len(qinfo) > 4 and qinfo[4] == '-1':
qlocs.reverse()
else:
# a whole chromosome, use the locs it came with.
qlocs = line[6:8]
qchr = line[0]
# qchr = get_chr(line[0])
line[0] = line[0]+"||"+qlocs[0]+"||"+qlocs[1]
if len(sinfo) > 1:
schr = sinfo[0]
slocs = [l.lstrip('0') for l in sinfo[1:3]]
if len(sinfo) > 4 and sinfo[4] == '-1':
slocs.reverse()
else:
# a whole chromosome, use the locs it came with.
slocs = line[8:10]
schr = line[1]
# schr = get_chr(line[1])
line[1] = line[1]+"||"+slocs[0]+"||"+slocs[1]
print "\t".join([
qorg + qchr, line[0] + "||" + line[2], qlocs[0], qlocs[1]
,sorg + schr, line[1] + "||" + line[2], slocs[0], slocs[1], line[10]])
if qdups:
print >>sys.stderr, "removed %i dups from query " % n_qdups
if sdups:
print >>sys.stderr, "removed %i dups from subject" % n_sdups
if __name__ == "__main__":
import sys, os
import re
import cPickle
from optparse import OptionParser
usage = """
takes a tab-delimited blast file and converts it to the format used by
dagchainer and tandems.py. output is to STDOUT.
if (optional) files are given for query/subject_dups with format:
dupa_name
dupb_name
.
.
dupzzz_name
then any hits containing those are removed. from the output
"""
parser = OptionParser(usage)
parser.add_option("-b", "--blast_file", dest="blast_file", help="the name of the blast_file", default=False)
parser.add_option("-q", "--query", dest="query", help="the name of the query organism")
parser.add_option("-s", "--subject", dest="subject", help="the name of the subject organism")
parser.add_option("--query_dups", dest="query_dups", help="file containing list of query dups", default=[])
parser.add_option("--subject_dups", dest="subject_dups", help="file containing list of subject dups", default=[])
parser.add_option("-c","--condense", dest="condense", help="condense duplicate blast hits", action="store_false")
(options, _) = parser.parse_args()
condense=options.condense
if not options.blast_file:
sys.exit(parser.print_help())
blast_to_dag(options.blast_file, options.query, options.subject, options.query_dups, options.subject_dups, condense=condense)
| 34.719178 | 129 | 0.575853 |
69794be633b15aa9552c4427a027a10d5c44da9a | 2,231 | py | Python | bot/messenger.py | teamgalaxis/beercounter-bot | 00d632eca2b4c1bc943a95d156b9d6058d2f9834 | [
"MIT"
] | null | null | null | bot/messenger.py | teamgalaxis/beercounter-bot | 00d632eca2b4c1bc943a95d156b9d6058d2f9834 | [
"MIT"
] | null | null | null | bot/messenger.py | teamgalaxis/beercounter-bot | 00d632eca2b4c1bc943a95d156b9d6058d2f9834 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import random
logger = logging.getLogger(__name__)
class Messenger(object):
def __init__(self, slack_clients):
self.clients = slack_clients
def send_message(self, channel_id, msg):
# in the case of Group and Private channels, RTM channel payload is a complex dictionary
if isinstance(channel_id, dict):
channel_id = channel_id['id']
logger.debug('Sending msg: %s to channel: %s' % (msg, channel_id))
channel = self.clients.rtm.server.channels.find(channel_id)
channel.send_message(msg)
def write_help_message(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = '{}\n{}\n{}\n{}'.format(
"I'm your friendly Slack bot written in Python. I'll *_respond_* to the following commands:",
"> `hi <@" + bot_uid + ">` - I'll respond with a randomized greeting mentioning your user. :wave:",
"> `<@" + bot_uid + "> joke` - I'll tell you one of my finest jokes, with a typing pause for effect. :laughing:",
"> `<@" + bot_uid + "> attachment` - I'll demo a post with an attachment using the Web API. :paperclip:")
self.send_message(channel_id, txt)
def write_greeting(self, channel_id, user_id):
greetings = ['Hi', 'Hello', 'Nice to meet you', 'Howdy', 'Salutations']
txt = '{}, <@{}>!'.format(random.choice(greetings), user_id)
self.send_message(channel_id, txt)
def write_prompt(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = "I'm sorry, I didn't quite understand... Can I help you? (e.g. `<@" + bot_uid + "> help`)"
self.send_message(channel_id, txt)
def write_joke(self, channel_id):
question = "Why did the python cross the road?"
self.send_message(channel_id, question)
self.clients.send_user_typing_pause(channel_id)
answer = "To eat the chicken on the other side! :laughing:"
self.send_message(channel_id, answer)
def write_error(self, channel_id, err_msg):
txt = ":face_with_head_bandage: my maker didn't handle this error very well:\n>```{}```".format(err_msg)
self.send_message(channel_id, txt) | 44.62 | 125 | 0.639623 |
2c74166bf9ec4eb7b543ee3ea940751a6b3bfb0e | 2,425 | py | Python | baselines/deepq/experiments/atari/model.py | yenchenlin/rl-attack-detection | 13ff1765cf52dda150c4266d8e68fd3c4aa350ed | [
"MIT"
] | 66 | 2017-09-27T21:40:56.000Z | 2022-02-22T13:58:41.000Z | baselines/deepq/experiments/atari/model.py | haider4445/AdvDRL | c17ef8d6044c31a172884d2124e87d72f848dda2 | [
"MIT"
] | 4 | 2017-09-27T19:29:26.000Z | 2021-02-22T10:01:33.000Z | baselines/deepq/experiments/atari/model.py | haider4445/AdvDRL | c17ef8d6044c31a172884d2124e87d72f848dda2 | [
"MIT"
] | 14 | 2017-09-27T22:13:16.000Z | 2021-07-12T10:01:58.000Z | import tensorflow as tf
import tensorflow.contrib.layers as layers
def model(img_in, num_actions, scope, reuse=False, concat_softmax=False):
"""As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
if concat_softmax:
out = tf.nn.softmax(out)
return out
def dueling_model(img_in, num_actions, scope, reuse=False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("state_value"):
state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
with tf.variable_scope("action_value"):
actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)
return state_score + action_scores
| 52.717391 | 113 | 0.693196 |
c70b20988a0b1db8e34d3ab9b47456170f316371 | 1,783 | py | Python | qa/Rules/NamingConventionRules/FunctionNamingRule.py | tartarini/MAF3 | f9614d36591754544b23e3a670980799254dfd2c | [
"Apache-2.0"
] | 1 | 2021-05-10T19:01:48.000Z | 2021-05-10T19:01:48.000Z | qa/Rules/NamingConventionRules/FunctionNamingRule.py | examyes/MAF3 | f9614d36591754544b23e3a670980799254dfd2c | [
"Apache-2.0"
] | null | null | null | qa/Rules/NamingConventionRules/FunctionNamingRule.py | examyes/MAF3 | f9614d36591754544b23e3a670980799254dfd2c | [
"Apache-2.0"
] | 1 | 2018-02-06T03:51:57.000Z | 2018-02-06T03:51:57.000Z | from xml.dom import minidom as xd
import re
from AbstractRule import AbstractRule
class FunctionNamingRule(AbstractRule):
def __init__(self):
AbstractRule.__init__(self)
def execute(self):
self.dom = xd.parse(self.FullPathInputFile)
className = self.dom.getElementsByTagName('compounddef')[0].getElementsByTagName('compoundname')[0].firstChild.nodeValue
members = self.dom.getElementsByTagName('memberdef')
for member in members:
attrs = member.attributes
if(attrs["kind"].value == self.ParameterList[0]):
type = None
for memberChild in member.childNodes:
if(memberChild.nodeName == "type" and memberChild.firstChild):
type = str(memberChild.firstChild.nodeValue)
if(memberChild.nodeName == "name"):
x = re.compile(self.ParameterList[1])
if(type != None and re.match(x, str(memberChild.firstChild.nodeValue))):
#print className, memberChild.firstChild.nodeValue
if(memberChild.firstChild.nodeValue[:8] != "operator"):
line = member.getElementsByTagName('location')[0].attributes["line"].value
#self.MarkedList.append((str(className))+"::"+memberChild.firstChild.nodeValue+ " " + line)
self.MarkedList.append("<item>\n"\
+ " <class>" + str(className) + "</class>\n"\
+ " <function>" +memberChild.firstChild.nodeValue + "</function>\n"\
+ " <line>" + line + "</line>\n"\
+ "</item>")
return self.MarkedList
| 48.189189 | 128 | 0.550757 |
6f59749556298066b29379b2a96838661e424475 | 1,275 | py | Python | Src/Scripts/clean.py | Enerccio/ironpython26-fixed | e302db14f05396a378adb438565a829e66acbf94 | [
"MS-PL"
] | 1 | 2020-02-11T06:02:40.000Z | 2020-02-11T06:02:40.000Z | Src/Languages/IronPython/Scripts/clean.py | rudimk/dlr-dotnet | 71d11769f99d6ff1516ddbaed091a359eb46c670 | [
"MS-PL"
] | null | null | null | Src/Languages/IronPython/Scripts/clean.py | rudimk/dlr-dotnet | 71d11769f99d6ff1516ddbaed091a359eb46c670 | [
"MS-PL"
] | 1 | 2018-11-21T04:10:23.000Z | 2018-11-21T04:10:23.000Z | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import os
def is_binary(filename):
root, ext = os.path.splitext(filename)
return ext in ['.pyc', '.pyo', '.pdb', '.exe', '.dll', '.projdata']
def do_dir(dirname):
if dirname == BIN_DIR: return
for file in os.listdir(dirname):
filename = os.path.join(dirname, file)
if os.path.isdir(filename):
do_dir(filename)
elif is_binary(filename):
print 'deleting', filename
os.remove(filename)
TOP_DIR = "c:\\IronPython-0.7"
BIN_DIR = os.path.join(TOP_DIR, "bin")
do_dir(TOP_DIR)
| 31.097561 | 97 | 0.58902 |
3f761d68a19f2f12339f3b75f281c92a21e6c7d6 | 3,110 | py | Python | app/models.py | gabrielranulfo/projeto-integrador | 4b3dc72fa2a38697fd4b21b08ec5ad5e8c44e28f | [
"MIT"
] | null | null | null | app/models.py | gabrielranulfo/projeto-integrador | 4b3dc72fa2a38697fd4b21b08ec5ad5e8c44e28f | [
"MIT"
] | 19 | 2021-11-13T22:16:31.000Z | 2021-11-13T22:20:49.000Z | app/models.py | gabrielranulfo/projeto-integrador | 4b3dc72fa2a38697fd4b21b08ec5ad5e8c44e28f | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.db import models
#from django.contrib.auth.models import User
class Cad_empresa(models.Model):
nome_fantasia_empresa = models.CharField(max_length=80)
cnpj_empresa = models.CharField(max_length=18)
endereco_empresa = models.CharField(max_length=80)
cidade_empresa = models.CharField(max_length=80)
cep_empresa = models.CharField(max_length=9)
estado_empresa = models.CharField(max_length=40)
telefone_empresa = models.CharField(max_length=15)
from django.db.models import Count
class Cad_setores(models.Model):
setor_nome = models.CharField(max_length=30)
resposavel_setor = models.CharField(max_length=30)
cargo_setor = models.CharField(max_length=30)
contato_setor = models.CharField(max_length=30)
class Cad_equipes(models.Model):
nome = models.CharField(max_length=30)
telefone = models.CharField(max_length=30)
responsabilidade = models.CharField(max_length=30)
class Cad_fornecedores(models.Model):
fornecedor = models.CharField(max_length=30)
cnpj = models.CharField(max_length=30)
dpo = models.CharField(max_length=30)
telefone = models.CharField(max_length=30)
class Cad_dpo(models.Model):
nome = models.CharField(max_length=30)
cpf = models.CharField(max_length=30)
cargo = models.CharField(max_length=30)
contato = models.CharField(max_length=30)
empresa = models.CharField(max_length=30)
cnpj = models.CharField(max_length=30)
endereco = models.CharField(max_length=30)
cidade = models.CharField(max_length=30)
estado = models.CharField(max_length=30)
class Cad_dados_previos(models.Model):
questao_dados_previos = models.CharField(max_length=90)
resposta = models.CharField(max_length=10)
class Cad_itens_auditaveis(models.Model):
questao_itens_auditaveis = models.CharField(max_length=90)
il = models.IntegerField()
icn = models.IntegerField()
e = models.IntegerField()
fr = models.IntegerField()
@property
def fr(self):
return (self.il ** 2 + self.icn ** 2)*((7-self.e) ** 2)
class Cad_fator_de_risco(models.Model):
questao_fator_de_risco = models.CharField(max_length=90)
fator_de_risco = models.IntegerField()
#fator_de_risco = Cad_itens_auditaveis.fr()
##CALCULO DO FATOR DE RISCO fr = (il ** 2 + icn ** 2)*((7-e) ** 2)
class Cad_Mapeamento(models.Model):
dado = models.CharField(max_length=30)
tipo = models.CharField(max_length=30)
fonte = models.CharField(max_length=30)
motivo = models.CharField(max_length=30)
base_legal = models.CharField(max_length=30)
tratamento = models.CharField(max_length=30)
eliminacao = models.CharField(max_length=30)
compartilhamento = models.CharField(max_length=30)
necessario_consentimento = models.CharField(max_length=30)
possui_consentimento = models.CharField(max_length=30)
menor = models.CharField(max_length=30)
impacto_pessoal = models.CharField(max_length=30)
missao_critica = models.CharField(max_length=30) | 36.588235 | 71 | 0.739228 |
59efe88ffa92f1f7968cc71c174e468051bc56ef | 2,955 | py | Python | src/uams_platform/uams_manipulation/script/waypoint_generation_visualization.py | S-JingTao/ROS_Air_ground_simulation_model | 393aa2c881dd6d0fe5efdb94409800c2d161832a | [
"MIT"
] | null | null | null | src/uams_platform/uams_manipulation/script/waypoint_generation_visualization.py | S-JingTao/ROS_Air_ground_simulation_model | 393aa2c881dd6d0fe5efdb94409800c2d161832a | [
"MIT"
] | null | null | null | src/uams_platform/uams_manipulation/script/waypoint_generation_visualization.py | S-JingTao/ROS_Air_ground_simulation_model | 393aa2c881dd6d0fe5efdb94409800c2d161832a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
import rospy
import rosbag
import numpy as np
import matplotlib.pyplot as plt
import matplotlib; matplotlib.use('TkAgg')
from waypoint_generation_library import WaypointGen
from scipy import linalg
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import Imu
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from mpl_toolkits import mplot3d
WaypointGeneration = WaypointGen()
waypoints, desVel, desAcc, timeVec = WaypointGeneration.waypoint_calculation()
desiredPos = WaypointGeneration.desiredPos
# plot the waypoints
figPos = plt.figure()
axPos = plt.axes(projection = '3d')
axPos.plot3D(desiredPos[:,0], desiredPos[:,1], desiredPos[:,2], 'ro')
pnt3d = axPos.scatter(waypoints[:,0], waypoints[:,1], waypoints[:,2], c = timeVec)
cbar = plt.colorbar(pnt3d)
cbar.set_label("Time [sec]")
# label the axes and give title
axPos.set_xlabel('X-Axis [m]')
axPos.set_ylabel('Y-Axis [m]')
axPos.set_zlabel('Z-Axis [m]')
axPos.set_title('Minimum Jerk Position Waypoints')
# plot the desired kinematics
figOtherKinematics = plt.figure()
figOtherKinematics.suptitle('Desired Kinematics in Inertial Frame')
# desired position waypoints
axPos = plt.subplot(311)
axPos.plot(timeVec, waypoints[:,0], '-r', label = '$x_b$')
axPos.plot(timeVec, waypoints[:,1], '-k', label = '$y_b$')
axPos.plot(timeVec, waypoints[:,2], '-b', label = '$z_b$')
# add the yaw legend
axPos.plot(np.nan, '-g', label = 'yaw')
axPos.legend(loc = 0)
plt.grid()
plt.xlabel('Time [sec]')
plt.ylabel('Position [m]')
# plt.title('Desired Position in Inertial Frame')
# desired yaw
axYaw = axPos.twinx()
axYaw.plot(timeVec, waypoints[:,3], '-g')
axYaw.set_ylabel('Yaw [rad]')
# desired velocity waypoints
axVel = plt.subplot(312)
axVel.plot(timeVec, desVel[:,0], '-r', label = '$v_{x,b}$')
axVel.plot(timeVec, desVel[:,1], '-k', label = '$v_{y,b}$')
axVel.plot(timeVec, desVel[:,2], '-b', label = '$v_{z,b}$')
# add the yaw legend
axVel.plot(np.nan, '-g', label = '$yaw_{rate}$')
axVel.legend(loc = 0)
plt.grid()
plt.xlabel('Time [sec]')
plt.ylabel('Velocity [m/s]')
# plt.title('Desired Velocity in Inertial Frame')
# desired yaw
axYawRate = axVel.twinx()
axYawRate.plot(timeVec, desVel[:,3], '-g')
axYawRate.set_ylabel('Yaw [rad/s]')
# desired acceleration waypoints
axAcc = plt.subplot(313)
axAcc.plot(timeVec, desAcc[:,0], '-r', label = '$a_{x,b}$')
axAcc.plot(timeVec, desAcc[:,1], '-k', label = '$a_{y,b}$')
axAcc.plot(timeVec, desAcc[:,2], '-b', label = '$a_{z,b}$')
# add the yaw legend
axAcc.plot(np.nan, '-g', label = '$yaw_{acc}$')
axAcc.legend(loc = 0)
plt.grid()
plt.xlabel('Time [sec]')
plt.ylabel('Acceleration [$m/s^2$]')
# plt.title('Desired Acceleration in Inertial Frame')
# desired yaw
axYawRate = axAcc.twinx()
axYawRate.plot(timeVec, desAcc[:,3], '-g')
axYawRate.set_ylabel('Yaw [$rad/s^2$]')
plt.show()
| 33.202247 | 82 | 0.710321 |
c536ca9ef0c7bea3c88f7db8a210a5321b275010 | 430 | py | Python | tests/params.py | dendisuhubdy/mapper-tda | fa54ab7bd4aa383aa5101e31f5424c09b91d86c7 | [
"MIT"
] | 1 | 2017-06-05T12:25:43.000Z | 2017-06-05T12:25:43.000Z | tests/params.py | dendisuhubdy/mapper-tda | fa54ab7bd4aa383aa5101e31f5424c09b91d86c7 | [
"MIT"
] | null | null | null | tests/params.py | dendisuhubdy/mapper-tda | fa54ab7bd4aa383aa5101e31f5424c09b91d86c7 | [
"MIT"
] | null | null | null |
import os
# Info
DATASET_DIM = 3
# Code Path
EM_CODE_PATH = ''
DATA_PATH = os.getcwd() + '/tests/'
# Graph Plot Parameters
PLOT_PATH = os.getcwd() + '/plots/'
ANGLE = (20,170)
plot_type_str = ['spring', 'random' ,'shell' ,'spectral' ,'viz'][4]
# Clustering Parameters
CLUSTERING_PLOT_BOOL = True
CLUSTERING_BIN_NUMBER = 'auto'
# Filter Function Parameters
eccentricity_P = 2
# Mapper Paramteres
p = .1
N = 8
| 10.75 | 67 | 0.667442 |
f9b40ac93f7ae67a3ec57b9a5bf784740e1abff9 | 2,560 | py | Python | parameters.py | ash368/FaceParsing | 67f8bb7e9a4651c1be45687113d3d6d9e55e04bb | [
"MIT"
] | 138 | 2020-01-10T17:54:30.000Z | 2022-03-17T04:05:38.000Z | parameters.py | ash368/FaceParsing | 67f8bb7e9a4651c1be45687113d3d6d9e55e04bb | [
"MIT"
] | 21 | 2020-02-07T12:27:41.000Z | 2022-02-05T14:50:15.000Z | parameters.py | ash368/FaceParsing | 67f8bb7e9a4651c1be45687113d3d6d9e55e04bb | [
"MIT"
] | 28 | 2020-01-10T10:27:19.000Z | 2022-03-10T07:52:25.000Z | import argparse
def str2bool(v):
return v.lower() in ('true')
def get_parameters():
parser = argparse.ArgumentParser()
parser.add_argument('--imsize', type=int, default=512)
parser.add_argument(
'--arch', type=str, choices=['UNet', 'DFANet', 'DANet', 'DABNet', 'CE2P', 'FaceParseNet18',
'FaceParseNet34', "FaceParseNet50", "FaceParseNet101", "EHANet18"], required=True)
# Training setting
parser.add_argument('--epochs', type=int, default=200,
help='how many times to update the generator')
parser.add_argument('--pretrained_model', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--g_lr', type=float, default=0.001)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--classes', type=int, default=19)
# Testing setting
# parser.add_argument('--test_size', type=int, default=2824)
# parser.add_argument('--val_size', type=int, default=2993)
parser.add_argument('--model_name', type=str, default='model.pth')
# Misc
parser.add_argument('--train', type=str2bool, default=True)
parser.add_argument('--parallel', type=str2bool, default=False)
# Path
parser.add_argument('--img_path', type=str,
default='./Data_preprocessing/train_img')
parser.add_argument('--label_path', type=str,
default='./Data_preprocessing/train_label')
parser.add_argument('--model_save_path', type=str, default='./models')
parser.add_argument('--sample_path', type=str, default='./samples')
parser.add_argument('--val_img_path', type=str,
default='./Data_preprocessing/val_img')
parser.add_argument('--val_label_path', type=str,
default='./Data_preprocessing/val_label')
parser.add_argument('--test_image_path', type=str,
default='./Data_preprocessing/test_img')
parser.add_argument('--test_label_path', type=str,
default='./Data_preprocessing/test_label')
parser.add_argument('--test_color_label_path', type=str,
default='./test_color_visualize')
# Step size
parser.add_argument('--sample_step', type=int, default=200)
parser.add_argument('--tb_step', type=int, default=100)
return parser.parse_args()
| 43.389831 | 119 | 0.645313 |
967873951f8e30a56973275a38df007655282ded | 867 | py | Python | services/todo.py | gitrus/flask-rest-pylearn | b62d62d54b508d3e30e3c1a2e29658df0e2d86b4 | [
"MIT"
] | null | null | null | services/todo.py | gitrus/flask-rest-pylearn | b62d62d54b508d3e30e3c1a2e29658df0e2d86b4 | [
"MIT"
] | null | null | null | services/todo.py | gitrus/flask-rest-pylearn | b62d62d54b508d3e30e3c1a2e29658df0e2d86b4 | [
"MIT"
] | 1 | 2020-07-01T10:40:53.000Z | 2020-07-01T10:40:53.000Z | import datetime
import typing as t
from models.todo.todo import ToDo, ToDoStatus
class ToDoService:
def update(self, todo: ToDo, update_obj: dict) -> ToDo:
pass
def create(self, create_obj: dict) -> ToDo:
pass
def fetch_overdue_todos(self, date: datetime) -> t.List[ToDo]:
pass
def complete_todo(self, todo: ToDo) -> ToDo:
todo.status = ToDoStatus.DONE
return todo
def fetch_todos(self) -> t.List[ToDo]:
return [
ToDo('first', 'desc first'),
ToDo('second', 'desc second')
]
def fetch_by_id(self, uid: str) -> ToDo:
return ToDo(
uid,
uid
)
ToDoServiceSingleton = ToDoService()
"""
Rest API -> Adapter -> App logic (Service) -> Adapter db -> db
|
model
"""
| 20.162791 | 66 | 0.540946 |
f19fd783995fcf7ca441b4b0696658c263945fd6 | 3,779 | py | Python | allennlp/tests/models/reading_comprehension/qanet_fine_test.py | Whu-wxy/allennlp | c863900e3e1fe7be540b9a0632a7a032491fc3ab | [
"Apache-2.0"
] | 6 | 2019-05-27T03:24:30.000Z | 2021-01-23T14:32:45.000Z | allennlp/tests/models/reading_comprehension/qanet_fine_test.py | Whu-wxy/allennlp | c863900e3e1fe7be540b9a0632a7a032491fc3ab | [
"Apache-2.0"
] | null | null | null | allennlp/tests/models/reading_comprehension/qanet_fine_test.py | Whu-wxy/allennlp | c863900e3e1fe7be540b9a0632a7a032491fc3ab | [
"Apache-2.0"
] | 3 | 2019-09-05T05:55:14.000Z | 2021-06-20T05:12:06.000Z | # pylint: disable=no-self-use,invalid-name
from flaky import flaky
import numpy
from numpy.testing import assert_almost_equal
from allennlp.common import Params
from allennlp.data import DatasetReader, Vocabulary
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from allennlp.models import Model
from mymodel.QaNet_fine_grained import QaNet_fine_grained
class QaNetFineTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model('/home/ubuntu/MyFiles/nlp/config/qanet_fine.json',
'/home/ubuntu/MyFiles/nlp/config/dev-v1.1.json')
def test_forward_pass_runs_correctly(self):
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics['f1'] > 0
span_start_probs = output_dict['span_start_probs'][0].data.numpy()
span_end_probs = output_dict['span_start_probs'][0].data.numpy()
assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)
span_start, span_end = tuple(output_dict['best_span'][0].data.numpy())
assert span_start >= 0
assert span_start <= span_end
assert span_end < self.instances[0].fields['passage'].sequence_length()
assert isinstance(output_dict['best_span_str'][0], str)
@flaky
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-4)
def test_batch_predictions_are_consistent(self):
# The same issue as the bidaf test case.
# The CNN encoder has problems with this kind of test - it's not properly masked yet, so
# changing the amount of padding in the batch will result in small differences in the
# output of the encoder. So, we'll remove the CNN encoder entirely from the model for this test.
# Save some state.
# pylint: disable=protected-access,attribute-defined-outside-init
saved_model = self.model
saved_instances = self.instances
# Modify the state, run the test with modified state.
params = Params.from_file(self.param_file)
reader = DatasetReader.from_params(params['dataset_reader'])
reader._token_indexers = {'tokens': reader._token_indexers['tokens']}
self.instances = reader.read(self.FIXTURES_ROOT / 'data' / 'squad.json')
vocab = Vocabulary.from_instances(self.instances)
for instance in self.instances:
instance.index_fields(vocab)
del params['model']['text_field_embedder']['token_embedders']['token_characters']
params['model']['phrase_layer']['num_convs_per_block'] = 0
params['model']['modeling_layer']['num_convs_per_block'] = 0
self.model = Model.from_params(vocab=vocab, params=params['model'])
self.ensure_batch_predictions_are_consistent()
# Restore the state.
self.model = saved_model
self.instances = saved_instances
test = QaNetFineTest()
test.setUp()
test.test_forward_pass_runs_correctly() | 47.2375 | 104 | 0.705213 |
86d940c55389dc36574adc7bfec882bb42703d17 | 3,985 | py | Python | test/programytest/mappings/test_normalise.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | test/programytest/mappings/test_normalise.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | test/programytest/mappings/test_normalise.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | import os
import re
import unittest
from unittest.mock import patch
from programy.mappings.normal import NormalCollection
from programy.storage.factory import StorageFactory
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.file.config import FileStoreConfiguration
from programy.storage.stores.file.engine import FileStorageEngine
class NormaliseTests(unittest.TestCase):
def test_initialise_collection(self):
collection = NormalCollection()
self.assertIsNotNone(collection)
def test_collection_operations(self):
collection = NormalCollection()
self.assertIsNotNone(collection)
collection.add_to_lookup(".COM", [re.compile('(^\\.COM|\\.COM|\\.COM$)', re.IGNORECASE), ' DOT COM '])
self.assertTrue(collection.has_key(".COM"))
self.assertEqual([re.compile('(^\\.COM|\\.COM|\\.COM$)', re.IGNORECASE), ' DOT COM '], collection.value(".COM"))
self.assertEqual("keithsterling dot com", collection.normalise_string("keithsterling.COM"))
def test_load(self):
storage_factory = StorageFactory()
file_store_config = FileStorageConfiguration()
file_store_config._normal_storage = FileStoreConfiguration(file=os.path.dirname(__file__) + os.sep + "test_files" + os.sep + "normal.txt", fileformat="text", extension="txt", encoding="utf-8", delete_on_start=False)
storage_engine = FileStorageEngine(file_store_config)
storage_factory._storage_engines[StorageFactory.NORMAL] = storage_engine
storage_factory._store_to_engine_map[StorageFactory.NORMAL] = storage_engine
collection = NormalCollection()
self.assertIsNotNone(collection)
self.assertTrue(collection.load(storage_factory))
self.assertEqual(collection.normalise_string("keithsterling.COM"), "keithsterling dot com")
self.assertEquals([re.compile('(^\\.COM|\\.COM|\\.COM$)', re.IGNORECASE), ' DOT COM '], collection.normalise(".COM"))
self.assertEquals(None, collection.normalise(".XXX"))
def test_reload(self):
storage_factory = StorageFactory()
file_store_config = FileStorageConfiguration()
file_store_config._normal_storage = FileStoreConfiguration(file=os.path.dirname(__file__) + os.sep + "test_files" + os.sep + "normal.txt", fileformat="text", extension="txt", encoding="utf-8", delete_on_start=False)
storage_engine = FileStorageEngine(file_store_config)
storage_factory._storage_engines[StorageFactory.NORMAL] = storage_engine
storage_factory._store_to_engine_map[StorageFactory.NORMAL] = storage_engine
collection = NormalCollection()
self.assertIsNotNone(collection)
self.assertTrue(collection.load(storage_factory))
self.assertEqual(collection.normalise_string("keithsterling.COM"), "keithsterling dot com")
self.assertTrue(collection.reload(storage_factory))
self.assertEqual(collection.normalise_string("keithsterling.COM"), "keithsterling dot com")
def patch_load_collection(self, lookups_engine):
raise Exception("Mock Exception")
@patch("programy.mappings.normal.NormalCollection._load_collection", patch_load_collection)
def test_load_with_exception(self):
storage_factory = StorageFactory()
file_store_config = FileStorageConfiguration()
file_store_config._normal_storage = FileStoreConfiguration(file=os.path.dirname(__file__) + os.sep + "test_files" + os.sep + "normal.txt", fileformat="text", extension="txt", encoding="utf-8", delete_on_start=False)
storage_engine = FileStorageEngine(file_store_config)
storage_factory._storage_engines[StorageFactory.NORMAL] = storage_engine
storage_factory._store_to_engine_map[StorageFactory.NORMAL] = storage_engine
collection = NormalCollection()
self.assertIsNotNone(collection)
self.assertFalse(collection.load(storage_factory))
| 43.791209 | 223 | 0.738018 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.