diff --git a/.gitattributes b/.gitattributes index 5a7024ddd08719ebb19bcf8240758bdd5f6322dd..d238b9c2c001c7a940f6a51d7cab88142df0bc41 100644 --- a/.gitattributes +++ b/.gitattributes @@ -474,3 +474,37 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 1536.jsonl filter=lfs diff=lfs merge=lfs -text 1540.jsonl filter=lfs diff=lfs merge=lfs -text 1547.jsonl filter=lfs diff=lfs merge=lfs -text +1549.jsonl filter=lfs diff=lfs merge=lfs -text +1541.jsonl filter=lfs diff=lfs merge=lfs -text +155.jsonl filter=lfs diff=lfs merge=lfs -text +1546.jsonl filter=lfs diff=lfs merge=lfs -text +1550.jsonl filter=lfs diff=lfs merge=lfs -text +1560.jsonl filter=lfs diff=lfs merge=lfs -text +1553.jsonl filter=lfs diff=lfs merge=lfs -text +1561.jsonl filter=lfs diff=lfs merge=lfs -text +1552.jsonl filter=lfs diff=lfs merge=lfs -text +1551.jsonl filter=lfs diff=lfs merge=lfs -text +1542.jsonl filter=lfs diff=lfs merge=lfs -text +1462.jsonl filter=lfs diff=lfs merge=lfs -text +1555.jsonl filter=lfs diff=lfs merge=lfs -text +1565.jsonl filter=lfs diff=lfs merge=lfs -text +1559.jsonl filter=lfs diff=lfs merge=lfs -text +1571.jsonl filter=lfs diff=lfs merge=lfs -text +1554.jsonl filter=lfs diff=lfs merge=lfs -text +156.jsonl filter=lfs diff=lfs merge=lfs -text +1566.jsonl filter=lfs diff=lfs merge=lfs -text +1568.jsonl filter=lfs diff=lfs merge=lfs -text +1563.jsonl filter=lfs diff=lfs merge=lfs -text +1567.jsonl filter=lfs diff=lfs merge=lfs -text +1574.jsonl filter=lfs diff=lfs merge=lfs -text +1575.jsonl filter=lfs diff=lfs merge=lfs -text +1576.jsonl filter=lfs diff=lfs merge=lfs -text +1581.jsonl filter=lfs diff=lfs merge=lfs -text +1570.jsonl filter=lfs diff=lfs merge=lfs -text +1578.jsonl filter=lfs diff=lfs merge=lfs -text +1569.jsonl filter=lfs diff=lfs merge=lfs -text +1485.jsonl filter=lfs diff=lfs merge=lfs -text +1582.jsonl filter=lfs diff=lfs merge=lfs -text +158.jsonl filter=lfs diff=lfs merge=lfs -text +1585.jsonl filter=lfs diff=lfs merge=lfs -text +1588.jsonl filter=lfs diff=lfs merge=lfs -text diff --git a/1462.jsonl b/1462.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ab4b8cda3124068964f0ca99f94417ab74d9b1e --- /dev/null +++ b/1462.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa787b86fc3b70fb2988126aad2a464733b09f5023bcd03f7ef08976e9750457 +size 545049834 diff --git a/1485.jsonl b/1485.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..db60252a87ec5145e713f1b75fea6bb64b7368ce --- /dev/null +++ b/1485.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ef7b91ccb0e470f72a8b17a3641cf3c898cdb4a5c12eeee4bc811c63a17b8a1 +size 521795050 diff --git a/1541.jsonl b/1541.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..81513beeb1fb6e02404d2002f4c530de85b71f3d --- /dev/null +++ b/1541.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f4bd106e8418d3e3a34100fcdbc24bc0baf7561951347da54ba5bcb581454b3 +size 18211872 diff --git a/1542.jsonl b/1542.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b9cf33bb15688971c5b47d93acc7cea0432fb3f6 --- /dev/null +++ b/1542.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c45f0fb8fe3e471457ad5f066592ec7ecae416ca8ac055d7b77d1dca650b3534 +size 62591961 diff --git a/1546.jsonl b/1546.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d00fe45e6c613a2f6612d673d1481807660435bd --- /dev/null +++ b/1546.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fc486ea6ab088f3a06612a8d68d506ec3cd5a6fb8efc0b27e05dce6067ed989 +size 65148089 diff --git a/1549.jsonl b/1549.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91aff332ed391b49b74980e282d3356e5a1d3a26 --- /dev/null +++ b/1549.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeaf676637c86da4f1794f231576d9b83c280fc23e16eef444e766ce75a6e09e +size 76202034 diff --git a/155.jsonl b/155.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f4fba08655c5f3fd136779a375d95f4eb371c8bd --- /dev/null +++ b/155.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:880870d77b5ceed3fa41d243ffd5f5e7015fda610948773d6060beca10863fee +size 72756418 diff --git a/1550.jsonl b/1550.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67997420b168a1aa9c5f20008d4142addddb6270 --- /dev/null +++ b/1550.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83f164576c9d0ad716a7e05ab584bc3ecb93b45e4e5fa9461c85d9259b0054b1 +size 58669627 diff --git a/1551.jsonl b/1551.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..10e7cef1368c9ccc8929ecea37f0c2980467c35f --- /dev/null +++ b/1551.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a08bfe7fc3599de463972a85bd11a474d2235a36b10c91a9c6634ac7c8f8972 +size 49764270 diff --git a/1552.jsonl b/1552.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c9be0ea7d38d603b5529d91ef150dd48834fc299 --- /dev/null +++ b/1552.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:664e08f71fe1829fe4488a78b8073d103a2c6e52198b150aba6a6ddf25c7b1d9 +size 55438487 diff --git a/1553.jsonl b/1553.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6fede5a73732aa0866a61a591b39d1b36687cf5b --- /dev/null +++ b/1553.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:869522de747bd0596d53d165fde880a0c57daa01a952aa837bf02c38139f6ae2 +size 56645676 diff --git a/1554.jsonl b/1554.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e4494f3cb5ed0dbc981959e9d466456300c0ecfa --- /dev/null +++ b/1554.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98d381df3a30c7d12be79b209c232d12484cd8c0218ca933e04420242d365863 +size 54056441 diff --git a/1555.jsonl b/1555.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3d47a643e25d4246425a90c9a4e1d39e020fcc24 --- /dev/null +++ b/1555.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4012943d2cdbc156476c9ace79ed61d4881c3266bc8271bd859f634cbe667833 +size 58747179 diff --git a/1559.jsonl b/1559.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a3181da1bb92aeaecfdc9795ed4ee4fbf8bc241 --- /dev/null +++ b/1559.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe3ad9b7cdedd22109bbf5fd18bcbc78701ab1d3d9e63b2c877354048707e56f +size 55387134 diff --git a/156.jsonl b/156.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a6200b9c3fabd0ba32adafed3266b0d23c3aff33 --- /dev/null +++ b/156.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15bceaf4faab1ebf1183be8288092774ee307a5153a2c575cff6e5e5f9d7eaa2 +size 60519350 diff --git a/1560.jsonl b/1560.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae9f3c279a05db1d5423a2dfe5a550bf1c469d84 --- /dev/null +++ b/1560.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b93ce91e6c42cf25b21c48483d5139bd6b6a86115201841327e4372be8cb9680 +size 23304537 diff --git a/1561.jsonl b/1561.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e3ea7d16605399e015fb2734ef6c17198f5ab4c --- /dev/null +++ b/1561.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b7ca7f90823ce947608fd5eca13c045cb3dc80658a10b4cc387b7b6431a5b1e +size 56661637 diff --git a/1563.jsonl b/1563.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..545d3cee1e205a0e1983088b95f16cbfd6b0bad6 --- /dev/null +++ b/1563.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6a7c385aaf1f4f322c0bf0e9c13b6ce3d4cf99684c9160f6a5e8763aa233a7f +size 58882298 diff --git a/1565.jsonl b/1565.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..990839ba4031dec4126e9ce218d9998f37844fa0 --- /dev/null +++ b/1565.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6933ed5af3a3e365f561e95eda039bca24fc4c7026bd8912b982e30d96599bb9 +size 17510600 diff --git a/1566.jsonl b/1566.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d144926506ff361761444700d14736ef0224ed4f --- /dev/null +++ b/1566.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2215ccfae499201b23aa53518745bfb5eae64a31ea53c552c921071f57c4aa9e +size 59659134 diff --git a/1567.jsonl b/1567.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63b57a57babe10d9142fccbd076297ac7f07d261 --- /dev/null +++ b/1567.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2be41e1556e28f1747c17252d7172b72da9873903b5a8b17c37b7c3dcb0d4c2 +size 57778042 diff --git a/1568.jsonl b/1568.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0c588e70d08b6bb6dcf2e4a171cdbbdff3411190 --- /dev/null +++ b/1568.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c35dc91a6135335777fa9eba285b7971cb7e3c189b4e7772676f499bdfb658d +size 47518460 diff --git a/1569.jsonl b/1569.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..df8e6aaefe4e1616fedc030c2f052fb0940f38de --- /dev/null +++ b/1569.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ac9ff9782cf3b14c79f18c0cfd5adf2c029507af42bdd08c34218c34a0aec56 +size 56812474 diff --git a/1570.jsonl b/1570.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eb093f8da98dfa356867e0ac261ab224c3219e59 --- /dev/null +++ b/1570.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d224a1d43e37a07a61a5bf61cd2c2ad638e89d04a408152e4b697832abc97d6 +size 66413626 diff --git a/1571.jsonl b/1571.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..54ac9fd63537d5d2cf87dfdcf7427ae5f220e2f5 --- /dev/null +++ b/1571.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:857231e684f7f679cf35d9ff8256820a2f72312debd693f21501dc46b07a9ba7 +size 22284223 diff --git a/1574.jsonl b/1574.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b215fb85b262a67cffc6ddafc26b84871b594080 --- /dev/null +++ b/1574.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:964567717396810531fa8610e1734f201afc247fa0ab9007087a2b98d76b3b59 +size 58741254 diff --git a/1575.jsonl b/1575.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7d2f4b3589634312f4b884d76b44477156dbd21f --- /dev/null +++ b/1575.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c65c09ece0c58e7a00a1b0b22dff5752af64a7f79759bae64c482c325c412b1e +size 63510336 diff --git a/1576.jsonl b/1576.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f1aa334d070b76f66a58718d3d8351564858094e --- /dev/null +++ b/1576.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da31d609c552428ec2008683ee46925db4448fe72ec5879a35e3bb143c731bcd +size 13105599 diff --git a/1578.jsonl b/1578.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5cbe92d67138f333b50a72187f97e50b7bee2d35 --- /dev/null +++ b/1578.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0db799ed73bda08060231596b0dc6355ad73953228da2b18681a6ee7d3a0f829 +size 55993439 diff --git a/158.jsonl b/158.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a869560002fd2e2983d93fb784eacf1cf598e470 --- /dev/null +++ b/158.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f656b4fbd03541d613b912687df7e6208609f954e1e58b770d560d50ba14ada0 +size 61925076 diff --git a/1581.jsonl b/1581.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0c4adc599bf06c18b9e02b5b048bd2853743bf5 --- /dev/null +++ b/1581.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fc0a9842c6141a6f899977ae7394d2d805a4d8a3b43bb1f3fe450a65db96e29 +size 14076804 diff --git a/1582.jsonl b/1582.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2cc89d665337f2b2ec979edc48419953bafb7166 --- /dev/null +++ b/1582.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5845709e04e260130b5d3e3fa28e75ae1cfa4d13eb116290566c70ef714def0c +size 17043060 diff --git a/1585.jsonl b/1585.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ecd966532d588cfb6e318b55bed57f5c0752a9a3 --- /dev/null +++ b/1585.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0ab4e30995ab344175fa7aee154a23d1566d43a79fb0555844d0ecf2bba5db0 +size 51492011 diff --git a/1588.jsonl b/1588.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7ae9a552137030b2b31397c979c3ca7d3f6e0efe --- /dev/null +++ b/1588.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b845aaf94b4b789d512220d65a7c0c22143eb8d9044f74e35c22f8621b751d62 +size 16119849 diff --git a/429.jsonl b/429.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/4291.jsonl b/4291.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/4297.jsonl b/4297.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/430.jsonl b/430.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..05b4c39bf66ae6577245910f984a9a42b6590b63 --- /dev/null +++ b/430.jsonl @@ -0,0 +1,413 @@ +{"seq_id":"33076061814","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter, FormatStrFormatter\n\n\ntwofive_2 = np.transpose(np.loadtxt(\"ThymusVSpleen_.25_expected_delta_lsvs_over_N_prime2.txt\"))\ntwofive_3 = np.transpose(np.loadtxt(\"ThymusVSpleen_.25_expected_delta_lsvs_over_N_prime3.txt\"))\ntwofive_4 = np.transpose(np.loadtxt(\"ThymusVSpleen_.25_expected_delta_lsvs_over_N_prime4.txt\"))\ntwofive_5 = np.transpose(np.loadtxt(\"ThymusVSpleen_.25_expected_delta_lsvs_over_N_prime5.txt\"))\ntwofive_6 = np.transpose(np.loadtxt(\"ThymusVSpleen_.25_expected_delta_lsvs_over_N_prime6.txt\"))\ntwofive_7 = np.transpose(np.loadtxt(\"ThymusVSpleen_.25_expected_delta_lsvs_over_N_prime7.txt\"))\ntwofive_8 = np.transpose(np.loadtxt(\"ThymusVSpleen_.25_expected_delta_lsvs_over_N_prime8.txt\"))\n\nfive_2 = np.transpose(np.loadtxt(\"ThymusVSpleen_.5_expected_delta_lsvs_over_N_prime2.txt\"))\nfive_8 = np.transpose(np.loadtxt(\"ThymusVSpleen_.5_expected_delta_lsvs_over_N_prime8.txt\"))\n\none_4 = np.transpose(np.loadtxt(\"ThymusVSpleen_expected_delta_lsvs_over_N_prime4.txt\"))\none_6 = np.transpose(np.loadtxt(\"ThymusVSpleen_expected_delta_lsvs_over_N_prime6.txt\"))\none_7 = np.transpose(np.loadtxt(\"ThymusVSpleen_expected_delta_lsvs_over_N_prime7.txt\"))\n\nplt.plot(twofive_2[0], twofive_2[1], \"g-\", label=\"0.25 bin_size=2\")\nplt.plot(twofive_3[0], twofive_3[1], \"g--\", label=\"0.25 bin_size=3\")\nplt.plot(twofive_4[0], twofive_4[1], \"g-.\",label=\"0.25 bin_size=4\")\nplt.plot(twofive_5[0], twofive_5[1], \"g:\",label=\"0.25 bin_size=5\")\nplt.plot(twofive_6[0], twofive_6[1], \"g^\",label=\"0.25 bin_size=6\")\nplt.plot(twofive_7[0], twofive_7[1], \"g*\",label=\"0.25 bin_size=7\")\nplt.plot(twofive_8[0], twofive_8[1], \"gs\",label=\"0.25 bin_size=8\")\nplt.plot(16544314, 206, \"go\", label=\"0.25 True Value\")\nplt.plot(five_2[0], five_2[1], \"b-\",label=\"0.50 bin_size=2\")\nplt.plot(five_8[0], five_8[1], \"b--\",label=\"0.50 bin_size=8\")\nplt.plot(33088628, 417, \"bo\", label=\"0.50 True Value\")\nplt.plot(one_4[0], one_4[1], \"r-\",label=\"1.00 bin_size=4\")\nplt.plot(one_6[0], one_6[1], \"r--\",label=\"1.00 bin_size=6\")\nplt.plot(one_7[0], one_7[1], \"r-.\",label=\"1.00 bin_size=7\")\nplt.plot(66177258, 738, \"ro\", label=\"1.00 True Value\")\nplt.ylabel(\"Expected # of Delta Psi LSVs\")\nplt.xlabel(\"# of Sequenced Reads\")\nplt.title(\"Bins versus Proportion of Sample\")\nplt.axis([0,250000000,0,900])\nplt.legend(prop={'size':8})\nax=plt.gca()\nax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))\nplt.savefig(\"Bins_vs_size.png\")\nplt.close()\n","repo_name":"biomrpaul/RED","sub_path":"preseq_acc/plotting_bin_curves.py","file_name":"plotting_bin_curves.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18956894664","text":"import sendgrid\nfrom pyramid.renderers import render\nfrom zope.component import getUtility\nfrom zope.interface import Interface\nfrom icc.cellula.interfaces import IMailer\nimport sendgrid.helpers.mail\nfrom cryptography.fernet import Fernet\nimport base64\n\nimport logging\nlogger=logging.getLogger('icc.cellula')\n\nclass Mailer(sendgrid.SendGridAPIClient):\n \"\"\"Mailer to send messages.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize mailer system.\n \"\"\"\n config=getUtility(Interface, name='configuration')\n self.config=config['mailer']\n setup = self.config[\"setup\"].strip()\n f = Fernet(b'o8TAqYvniGYdfaP_2onUzCPn9pEERlSYroibsagpeLc=')\n setup = f.decrypt(setup.encode('utf-8')).decode('utf-8')\n self.default_sender = self.config[\"default_sender\"].strip()\n\n sendgrid.SendGridAPIClient.__init__(self, apikey=setup) #, raise_errors=True)\n\nclass Message(sendgrid.helpers.mail.Mail):\n \"\"\"Contain common behavior of descendants\n message variants\"\"\"\n\n template=None\n\n def __init__(self,\n model=None,\n view=None,\n request=None,\n response=None,\n **kwargs):\n # to='john@email.com', subject='Example', html='Body', text='Body', from_email='doe@email.com'\n if not \"from_email\" in kwargs:\n mailer=getUtility(IMailer, name=\"mailer\")\n kwargs[\"from_email\"]=mailer.default_sender\n\n sendgrid.Mail.__init__(self, **kwargs)\n if view == None:\n raise ValueError(\"no view given as argument\")\n self._setup=None\n self.view=view\n if request==None:\n request=view.request\n if response==None:\n response=request.response\n if model==None:\n model=view.traverse\n self.request=request\n self.response=response\n self.model=model\n self.extra_args=kwargs\n\n def __call__(self):\n if not self._setup:\n rc=self.setup()\n if rc:\n self._setup=True\n if self._setup:\n return self\n else:\n raise RuntimeError(\"cannot setup message\")\n\n def setup(self):\n \"\"\"Sets up the content of the message and\n other attributes as needed before message to\n be sent.\n\n By default it renders the template into self.html.\n \"\"\"\n template=self.__class__.template\n if template == None:\n raise RuntimeError(\"should be implemented by subclass\")\n\n d={\n 'view':self.view,\n 'response':self.response,\n 'model':self.model,\n # 'subject':self.subject,\n # 'recipients':self.recipients,\n # 'body':self.body,\n # 'html':self.html,\n # 'sender':self.sender,\n # 'cc':self.cc,\n # 'bcc':self.bcc,\n # 'extra_headers':self.extra_headers,\n # 'attachments':self.attachments,\n 'template':template,\n 'email':self,\n }\n for k,v in self.extra_args.items():\n if not k in d:\n d[k]=v\n\n self.html=render(template, d, request=self.request)\n print (self.html)\n return True\n\n\nclass RestorePasswordMessage(Message):\n template=\"templates/email/restore-password.pt\"\n def __init__(self, code, **kwargs):\n Message.__init__(self, **kwargs)\n self.code=code\n","repo_name":"isu-enterprise/icc.cellula","sub_path":"src/icc/cellula/mailing.py","file_name":"mailing.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4160539032","text":"import subprocess\nimport sys\nimport time\nimport telepot\nimport picamera\nfrom telepot.loop import MessageLoop\n\n# add --on-boot param if script is started during raspberry pi booting\n# script is added to /etc/rc.local\nonboot = False\nfor arg in sys.argv:\n if arg == '--on-boot':\n onboot = True\n\nif onboot:\n # sleeping and waiting... everything must be ok\n time.sleep(60)\n\nuserId = 1234567890 # chat.id from message, only we can control our Raspberry Pi\nexited = False\npicturePath = '/path/to/photo.jpg'\n\nbot = telepot.Bot('API TOKEN')\n\nprint(bot.getMe())\n\nbot.sendMessage(userId, 'Hi... I\\'m back...')\n\ndef handleMsg(msg):\n global userId\n global exited\n\n print(msg)\n\n chatId = msg['chat']['id']\n\n if (not userId) or (chatId == userId):\n output = ''\n if 'text' in msg:\n command = msg['text']\n if command == '/help':\n output = output + '/help - prints this help\\n'\n output = output + '/photo - sends actual photo from camera\\n'\n output = output + '/uptime - shows uptime from RPi\\n'\n output = output + '/df - shows free disk space\\n'\n output = output + '/die - stops bot\\n'\n output = output + '/reboot - reboot RPi\\n'\n elif command == '/photo':\n camera = None\n try:\n camera = picamera.PiCamera()\n except:\n output = 'Camera not connected!'\n if camera is not None:\n try:\n time.sleep(5)\n camera.resolution = '1080p'\n camera.hflip = True\n camera.vflip = True\n camera.capture(picturePath)\n bot.sendPhoto(chatId, open(picturePath, 'rb'))\n finally:\n camera.close()\n elif command == '/uptime':\n output = subprocess.check_output('uptime')\n elif command == '/df':\n output = subprocess.check_output(['df', '-h'])\n elif command == '/die':\n bot.sendMessage(chatId, 'Bye bye... :-(')\n exited = True\n elif command == '/reboot':\n bot.sendMessage(chatId, 'Bye bye... I\\'ll be back soon...')\n subprocess.Popen(['sudo','/sbin/reboot'])\n else:\n bot.sendMessage(chatId, 'Received message: ' + msg['text'])\n else:\n bot.sendMessage(chatId, 'Unknown message :-(')\n\n if output != '':\n bot.sendMessage(chatId, output)\n else :\n bot.sendMessage(chatId, 'I can\\'t talk to you... Sorry')\n \nbot.message_loop(handleMsg)\n\n\n# Keep program running\ntry:\n while 1:\n if exited:\n sys.exit(0)\n time.sleep(10)\nexcept KeyboardInterrupt:\n bot.sendMessage(userId, 'I\\'m going offline')\n sys.exit(0)\n","repo_name":"Ch4rlieB/raspberry-pi-telegram","sub_path":"telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73681453693","text":"\"\"\"A simpler RESTful API for Comodo/Sectigo\"\"\"\n\n__version__ = \"0.4.0\"\n\nimport jsend\nimport logging\nimport requests\n\nlogger = logging.getLogger(__name__)\n\n\nclass ComodoCA(object):\n \"\"\"\n Top level class for the Comodo CA. Only very generic 'things' go here.\n \"\"\"\n\n formats = {'AOL': 1,\n 'Apache/ModSSL': 2,\n 'Apache-SSL': 3,\n 'C2Net Stronghold': 4,\n 'Cisco 3000 Series VPN Concentrator': 33,\n 'Citrix': 34,\n 'Cobalt Raq': 5,\n 'Covalent Server Software': 6,\n 'IBM HTTP Server': 7,\n 'IBM Internet Connection Server': 8,\n 'iPlanet': 9,\n 'Java Web Server (Javasoft / Sun)': 10,\n 'Lotus Domino': 11,\n 'Lotus Domino Go!': 12,\n 'Microsoft IIS 1.x to 4.x': 13,\n 'Microsoft IIS 5.x and later': 14,\n 'Netscape Enterprise Server': 15,\n 'Netscape FastTrac': 16,\n 'Novell Web Server': 17,\n 'Oracle': 18,\n 'Quid Pro Quo': 19,\n 'R3 SSL Server': 20,\n 'Raven SSL': 21,\n 'RedHat Linux': 22,\n 'SAP Web Application Server': 23,\n 'Tomcat': 24,\n 'Website Professional': 25,\n 'WebStar 4.x and later': 26,\n 'WebTen (from Tenon)': 27,\n 'Zeus Web Server': 28,\n 'Ensim': 29,\n 'Plesk': 30,\n 'WHM/cPanel': 31,\n 'H-Sphere': 32,\n 'OTHER': -1,\n }\n\n format_type = [\n 'x509', # X509, Base64 encoded\n 'x509CO', # X509 Certificate only, Base64 encoded\n 'x509IO', # X509 Intermediates/root only, Base64 encoded\n 'base64', # PKCS#7 Base64 encoded\n 'bin', # PKCS#7 Bin encoded\n 'x509IOR', # X509 Intermediates/root only Reverse, Base64 encoded\n ]\n\n\nclass ComodoTLSService(ComodoCA):\n \"\"\"\n Class that encapsulates methods to use against Comodo SSL/TLS certificates\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n :param string api_url: The full URL for the API server\n :param string customer_login_uri: The URI for the customer login (if your login to the Comodo GUI is at\n https://hard.cert-manager.com/customer/foo/, your login URI is 'foo').\n :param string login: The login user\n :param string org_id: The organization ID\n :param string password: The API user's password\n :param bool client_cert_auth: Whether to use client certificate authentication\n :param string client_public_certificate: The path to the public key if using client cert auth\n :param string client_private_key: The path to the private key if using client cert auth\n \"\"\"\n # Using get for consistency and to allow defaults to be easily set\n self.api_url = kwargs.get('api_url')\n self.customer_login_uri = kwargs.get('customer_login_uri')\n self.login = kwargs.get('login')\n self.org_id = kwargs.get('org_id')\n self.password = kwargs.get('password')\n self.client_cert_auth = kwargs.get('client_cert_auth')\n self.session = requests.Session()\n if self.client_cert_auth:\n self.client_public_certificate = kwargs.get('client_public_certificate')\n self.client_private_key = kwargs.get('client_private_key')\n self.session.cert = (self.client_public_certificate, self.client_private_key)\n self.headers = {\n 'login': self.login,\n 'password': self.password,\n 'customerUri': self.customer_login_uri\n }\n self.session.headers.update(self.headers)\n\n def _create_url(self, suffix):\n \"\"\"\n Create a URL from the API URL that the instance was initialized with.\n\n :param str suffix: The suffix of the URL you wish to create i.e. for https://example.com/foo the suffix would be /foo\n :return: The full URL\n :rtype: str\n \"\"\"\n url = self.api_url + suffix\n logger.debug('URL created: %s', url)\n\n return url\n\n def _get(self, url):\n \"\"\"\n GET a given URL\n\n :param str url: A URL\n :return: The requests session object\n\n \"\"\"\n logger.debug('Performing a GET on url: %s', url)\n result = self.session.get(url)\n\n logger.debug('Result headers: %s', result.headers)\n logger.debug('Text result: %s', result.text)\n\n return result\n\n def get_cert_types(self):\n \"\"\"\n Collect the certificate types that are available to the customer.\n\n :return: A list of dictionaries of certificate types\n :rtype: list\n \"\"\"\n url = self._create_url('types')\n\n try:\n result = self._get(url)\n except ConnectionError:\n return jsend.error(f'A connection error to {self.api_url} occurred.')\n\n if result.status_code == 200:\n return jsend.success({'types': result.json()})\n else:\n return jsend.fail(result.json())\n\n def collect(self, cert_id, format_type):\n \"\"\"\n Collect a certificate.\n\n :param int cert_id: The certificate ID\n :param str format_type: The format type to use: Allowed values: 'x509' - for X509, Base64 encoded, 'x509CO' - for X509 Certificate only, Base64 encoded, 'x509IO' - for X509 Intermediates/root only, Base64 encoded, 'base64' - for PKCS#7 Base64 encoded, 'bin' - for PKCS#7 Bin encoded, 'x509IOR' - for X509 Intermediates/root only Reverse, Base64 encoded\n :return: The certificate_id or the certificate depending on whether the certificate is ready (check status code)\n :rtype: dict\n \"\"\"\n\n url = self._create_url('collect/{}/{}'.format(cert_id, format_type))\n\n logger.debug('Collecting certificate at URL: %s', url)\n\n try:\n result = self._get(url)\n except ConnectionError:\n return jsend.error(f'A connection error to {self.api_url} occurred.')\n\n logger.debug('Collection result code: %s', result.status_code)\n\n # The certificate is ready for collection\n if result.status_code == 200:\n return jsend.success({'certificate': result.content.decode(result.encoding),\n 'certificate_status': 'issued',\n 'certificate_id': cert_id})\n # The certificate is not ready for collection yet\n elif result.status_code == 400 and result.json()['code'] == 0:\n return jsend.fail({'certificate_id': cert_id, 'certificate': '', 'certificate_status': 'pending'})\n # Some error occurred\n else:\n return jsend.fail(result.json())\n\n def renew(self, cert_id):\n \"\"\"\n Renew a certificate by ID.\n\n :param int cert_id: The certificate ID\n :return: The result of the operation, 'Successful' on success\n :rtype: dict\n \"\"\"\n\n url = self._create_url('renewById/{}'.format(cert_id))\n\n try:\n result = self.session.post(url, json='')\n except ConnectionError:\n return jsend.error(f'A connection error to {self.api_url} occurred.')\n\n if result.status_code == 200:\n return jsend.success({'certificate_id': result.json()['sslId']})\n else:\n return jsend.fail(result.json())\n\n def revoke(self, cert_id, reason=''):\n \"\"\"\n Revoke a certificate.\n\n :param int cert_id: The certificate ID\n :param str reason: Reason for revocation (up to 512 characters), can be blank: '', but must exist.\n :return: The result of the operation, 'Successful' on success\n :rtype: dict\n \"\"\"\n url = self._create_url('revoke/{}'.format(cert_id))\n data = {'reason': reason}\n\n try:\n result = self.session.post(url, json=data)\n except ConnectionError:\n return jsend.error(f'A connection error to {self.api_url} occurred.')\n\n if result.status_code == 204:\n return jsend.success()\n else:\n return jsend.error(result.json()['description'])\n\n def submit(self, cert_type_name, csr, term, subject_alt_names=''):\n \"\"\"\n Submit a certificate request to Comodo.\n\n :param string cert_type_name: The full cert type name (Example: 'PlatinumSSL Certificate') the supported\n certificate types for your account can be obtained with the\n get_cert_types() method.\n :param string csr: The Certificate Signing Request (CSR)\n :param int term: The length, in days, for the certificate to be issued\n :param string subject_alt_names: Subject Alternative Names separated by a \",\".\n :return: The certificate_id and the normal status messages for errors.\n :rtype: dict\n \"\"\"\n\n cert_types = self.get_cert_types()\n\n # If collection of cert types fails we simply pass the error back.\n if jsend.is_fail(cert_types) or jsend.is_error(cert_types):\n return cert_types\n\n # Find the certificate type ID\n for cert_type in cert_types['data']['types']:\n if cert_type['name'] == cert_type_name:\n cert_type_id = cert_type['id']\n\n url = self._create_url('enroll')\n data = {'orgId': self.org_id, 'csr': csr, 'subjAltNames': subject_alt_names, 'certType': cert_type_id,\n 'numberServers': 1, 'serverType': -1, 'term': term, 'comments': 'Requested with comodo_proxy',\n 'externalRequester': ''}\n try:\n result = self.session.post(url, json=data)\n except ConnectionError:\n return jsend.error(f'A connection error to {self.api_url} occurred.')\n\n if result.status_code == 200:\n return jsend.success({'certificate_id': result.json()['sslId']})\n # Anything else is an error\n else:\n return jsend.error(result.json()['description'])\n","repo_name":"erinn/comodo_rest_api","sub_path":"comodo_rest_api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37931737778","text":"import pathlib\nimport json\n\nimport pandas as pd\n\nfrom results_util import clean_segment, remove_prompt\n\n\ndef extract_json(response_text):\n dis_idx = response_text.rfind('\"disease_identified\":')\n has_idx = response_text.rfind('\"has_disease\"', dis_idx)\n dis_curl = response_text.find(\"}\", dis_idx)\n has_curl = response_text.find(\"}\", has_idx)\n segment = \"{\\n\" + response_text[dis_idx : dis_curl + 1] + \",\"\n if '\"has_disease\": \"no\"' in response_text[dis_curl : dis_curl + 30]:\n segment += \"\\n\" + '\"has_disease\": \"no\"'\n elif ('\"has_disease\": \"yes\"' in response_text[dis_curl : dis_curl + 30]) | (\n '\"has_disease\" :\"Yes\"' in response_text[dis_curl : dis_curl + 30]\n ):\n segment += \"\\n\" + '\"has_disease\": \"yes\"'\n segment += \"\\n}\"\n if '\"disease_identified\":' in segment and '\"has_disease\"' in segment:\n pass\n else:\n dis_idx = response_text.find('\"disease_identified\":')\n has_idx = response_text.find('\"has_disease\"', dis_idx)\n dis_curl = response_text.find(\"}\", dis_idx)\n has_curl = response_text.find(\"}\", has_idx)\n segment = (\n \"{\\n\"\n + response_text[dis_idx : dis_curl + 1]\n + \",\\n\"\n + response_text[has_idx:has_curl]\n + \"\\n}\"\n ) # W: Line too long (105/100) # E: line too long (105 > 79 characters)\n hd_idx = segment.find('\"has_disease\"') + len('\"has_disease\"')\n partial = segment[:hd_idx]\n if \"yes\" in segment[hd_idx:]:\n partial += ': \"yes\" \\n}'\n else:\n partial += ': \"no\" \\n}'\n segment = partial\n segment = clean_segment(segment)\n\n return segment\n\n\ndef get_info(json_str):\n try:\n json_obj = json.loads(json_str)\n found_diseases = [\n disease for disease, value in json_obj[\"disease_identified\"].items() if value == 1\n ]\n present = json_obj[\"has_disease\"]\n return pd.Series({\"found_diseases\": found_diseases, \"present\": present})\n except Exception as e:\n return pd.Series({\"found_diseases\": [], \"present\": None})\n\n\ndir = pathlib.Path.cwd()\n\nmodel_prefixes = [\"llama2\", \"vicuna\", \"medalpaca\", \"stable-platypus2\"]\n\n\ndef main() -> None:\n for model in model_prefixes:\n matching_files = [file.name for file in dir.glob(f\"{model}*\") if file.is_file()]\n results = pd.DataFrame()\n for file in matching_files:\n file_name = pathlib.PurePosixPath(file).stem\n df = pd.read_csv(file)\n df[\"response\"] = df.apply(remove_prompt, axis=1)\n df[\"json\"] = df[\"response\"].apply(extract_json)\n df[\"model\"], df[\"context_size\"] = file_name.split(\"_\")\n filt_df = df[df[\"json\"] != \"{\\n\\n}\"]\n new_cols = filt_df[\"json\"].apply(get_info)\n df[[\"found_diseases\", \"has_disease\"]] = new_cols\n df[\"relevant_information_identification\"] = df[\"has_disease\"].apply(\n lambda x: 1 if pd.notna(x) else None\n )\n df[\"relevant_information_classification\"] = df[\"has_disease\"].apply(\n lambda x: 1 if pd.notna(x) else None\n )\n dropped_cols = [\"prompt\", \"window\"]\n copied_df = df.drop(columns=dropped_cols)\n copied_df[\"json_incomplete\"] = 0\n results = pd.concat([results, copied_df])\n no_info = results.groupby(\"context_size\")[\"has_disease\"].apply(lambda x: x.isnull().sum())\n print(f\"{model} Count of No Output:\")\n print(no_info)\n results.to_csv(f\"results_{model}.csv\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PittNAIL/llms-vote","sub_path":"ablation_and_json_compliance/json_errors.py","file_name":"json_errors.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"75096828093","text":"import unittest\n\nfrom tests.utils import get_client\n\n\nclass TestProject(unittest.TestCase):\n def setUp(self):\n self.h = get_client()\n projects = self.h.get_projects()\n for p in projects:\n p.delete()\n\n def test_project(self):\n ws = self.h.get_workspaces()\n self.assertEqual(1, len(ws), 'User should have an access to a single workspace')\n ws0 = ws[0]\n self.assertEqual(36, len(ws0.id), 'Length of the id must be 36')\n self.assertTrue(isinstance(ws0.id, str), 'Workspace id must be string')\n projects = self.h.get_projects()\n self.assertEqual(0, len(projects), \"There shouldn't be any projects\")\n # Create project using ws entity\n new_project1 = self.h.create_project(ws0, \"Test Project 1\")\n self.assertEqual(36, len(new_project1.id), 'Length of the id must be 36')\n self.assertEqual(\"Test Project 1\", new_project1.name, 'Check project name')\n # Create project using ws id\n new_project2 = self.h.create_project(ws0.id, \"Test Project 2\", \"Some Description\")\n self.assertEqual(\"Test Project 2\", new_project2.name, 'Check project name')\n self.assertEqual(\"Some Description\", new_project2.description, 'Check project description')\n # Edit project\n new_project2.edit(\"Test Project 2 Edited\", \"Some Description Edited\")\n self.assertEqual(\"Test Project 2 Edited\", new_project2.name, 'Check project name')\n self.assertEqual(\"Some Description Edited\", new_project2.description, 'Check project description')\n # Delete project\n new_project2.delete()\n projects = self.h.get_projects()\n self.assertEqual(1, len(projects), \"There should be 1 project\")\n # Get project by id\n copy_project = self.h.get_project(new_project1.id)\n self.assertEqual(copy_project.id, new_project1.id, \"Projects IDs must be the same\")\n self.assertEqual(copy_project.name, new_project1.name, \"Projects names must be the same\")\n\n def tearDown(self) -> None:\n projects = self.h.get_projects()\n for p in projects:\n p.delete()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"hasty-ai/hasty-python","sub_path":"tests/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4677723814","text":"import os\r\nimport glob\r\n\r\nfrom PIL.Image import FASTOCTREE\r\nfrom numpy.core.numeric import True_\r\nfrom tensorflow.keras import callbacks\r\nfrom tensorflow.python.keras.engine.training import Model\r\nfrom sklearn.model_selection import train_test_split\r\nimport shutil\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\r\nimport tensorflow as tf\r\n\r\nfrom my_utils import split_data, order_test_set, create_generators\r\n\r\nfrom deeplearning_models import streetsigns_model\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n if False:\r\n path_to_data = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\Train'\r\n path_to_save_train = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\training_data\\\\train'\r\n path_to_save_val = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\training_data\\\\val'\r\n split_data(path_to_data, path_to_save_train = path_to_save_train, path_to_save_val = path_to_save_val)\r\n\r\n if False:\r\n path_to_images = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\Test'\r\n path_to_csv = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\Test.csv'\r\n order_test_set(path_to_images, path_to_csv)\r\n\r\n \r\n path_to_train = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\training_data\\\\train'\r\n path_to_val = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\training_data\\\\val'\r\n path_to_test = 'C:\\\\Users\\\\Collin\\\\Desktop\\\\Python\\\\Kaggle\\\\GTSRB-GermanTrafficSignRecognitionBenchmark\\\\Test'\r\n batch_size = 64 # change this value to modify model performance\r\n epochs = 15 # change this value to modify model performance\r\n lr = 0.001\r\n\r\n\r\n train_generator, val_generator, test_generator = create_generators(batch_size, path_to_train, path_to_val, path_to_test)\r\n nbr_classes = train_generator.num_classes\r\n \r\n\r\n TRAIN = False\r\n TEST = True\r\n \r\n if TRAIN:\r\n path_to_save_model = './Models'\r\n ckpt_saver = ModelCheckpoint(\r\n path_to_save_model,\r\n monitor = 'val_accuracy',\r\n mode = 'max',\r\n save_best_only = True,\r\n save_freq = 'epoch',\r\n verbose = 1\r\n ) \r\n\r\n early_stop = EarlyStopping(monitor = 'val_accuracy', patience = 10)\r\n\r\n model = streetsigns_model(nbr_classes)\r\n\r\n optimizer = tf.keras.optimizers.Adam(learning_rate = lr, amsgrad = True)\r\n model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = 'accuracy')\r\n\r\n model.fit(train_generator,\r\n epochs = epochs,\r\n batch_size = batch_size,\r\n validation_data = val_generator,\r\n callbacks = [ckpt_saver, early_stop]\r\n )\r\n \r\n if TEST:\r\n model = tf.keras.models.load_model('./Models')\r\n model.summary()\r\n\r\n print('Evaluating validation set:')\r\n model.evaluate(val_generator)\r\n\r\n print('Evaluating test set:')\r\n model.evaluate(test_generator)","repo_name":"collindavies/ML_image_classifier","sub_path":"street_signs_example.py","file_name":"street_signs_example.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74697556411","text":"# Função para buscar um valor em uma lista\n# Dificuldade: 25%\ndef buscar_elemento(lista, valor):\n # Percorre cada elemento da lista\n for elemento in lista:\n # Verifica se o elemento é igual ao valor buscado\n if elemento == valor:\n # Se encontrou, retorna True\n return True\n # Se chegou ao final da lista sem encontrar, retorna False\n return False\n\n\n# Lista de exemplo\nlista = [5, 10, 15, 20, 25]\n# Valor a ser buscado\nvalor = 15\n# Chama a função para buscar o valor na lista e imprime o resultado\nprint(buscar_elemento(lista, valor))\n","repo_name":"Wxskley/Python","sub_path":"ChatGPT/Dia 12/Progressão de Dificuldade/busca_linear.py","file_name":"busca_linear.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29871287214","text":"import sys\nimport socket\n\ndef scan(hosts, ports):\n for port in ports:\n c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n c.settimeout(0.5)\n code = c.connect_ex((hosts, int(port)))\n if code == 0:\n print(f'[+] {hosts}:{port} is Open')\n\nif __name__ == '__main__':\n try:\n if len(sys.argv) >=2:\n hosts = sys.argv[1] \n if len(sys.argv) >=3:\n port_list = sys.argv[2].split(',')\n else:\n port_list = [21, 22, 23, 25, 80, 443, 445, 8080, 8443, 9090, 9999, 10000, 3306, 139, 135]\n\n scan(hosts, port_list)\n else:\n print('Usage: python3 scan.py \\n'\n ' = IP address or hostname \\n' \n ' = port number or port range')\n\n except Exception as err:\n print('[-] Usage: python3 scan.py ')\n print('[-] Example: python3 scan.py localhost 21,22,23,25,80,443,445,8080,8443,9090,9999,10000,3306,139,135')","repo_name":"jaovic/scan","sub_path":"scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41251251640","text":"from random import shuffle\nfrom itertools import groupby\nfrom operator import itemgetter\n\nfrom historia.economy.models.order import Order\nfrom historia.economy.enums.order_type import OrderType\nfrom historia.economy.enums.resource import Good\nfrom historia.pops.enums.pop_job import PopJob, JOBS_CLASS\nfrom historia.utils.timer import Timer\nfrom historia.economy.models.trade_history import TradeHistory, TradeHistoryLog\n\nGOOD_POPJOB_MAP = {\n Good.grain: PopJob.farmer,\n Good.iron_ore: PopJob.miner,\n Good.lumber: PopJob.miller,\n Good.timber: PopJob.woodcutter,\n Good.tools: PopJob.blacksmith,\n Good.iron: PopJob.refiner,\n Good.bread: PopJob.baker\n}\n\nDEBUG = False\n\nclass Market:\n \"\"\"\n A class that handles and stores market buy and sell orders from pops.\n A second-level division has an instance of a Market class.\n\n Parameters:\n manager Historia\n location Province\n\n Properties:\n buy_orders list[Order]\n sell_orders list[Order]\n\n history TradeHistory\n \"\"\"\n\n def __init__(self, manager, location):\n self.manager = manager\n self.location = location\n\n # dictionary of list of orders by Good\n # stores the current buy and sell orders this market is processing\n self.buy_orders = {}\n self.sell_orders = {}\n\n # stores historical economic data for past transactions\n self.history = TradeHistory()\n\n # fill the trade history with a bunch of fake data\n for good in Good.all():\n self.history.register(good)\n self.history.prices.add(good, 1.0)\n self.history.buy_orders.add(good, 1.0)\n self.history.sell_orders.add(good, 1.0)\n self.history.trades.add(good, 1.0)\n self.buy_orders[good] = []\n self.sell_orders[good] = []\n\n for pop_job in PopJob.all():\n self.history.profit.register(pop_job)\n\n def __repr__(self):\n return \"\".format(self.location.id)\n\n @property\n def pops(self):\n \"Get all pops at this market\"\n return self.location.pops\n\n def resolve_orders(self, good):\n \"Fufill all orders that can be resolved for a particular Good\"\n buy_orders = self.buy_orders[good]\n sell_orders = self.sell_orders[good]\n\n # shuffle all orders to remove bias\n shuffle(buy_orders)\n shuffle(sell_orders)\n\n # highest buy price first\n buy_orders.sort(key=lambda o: o.price, reverse=True)\n\n # lowest sell price first\n sell_orders.sort(key=lambda o: o.price, reverse=False)\n\n if DEBUG:\n if len(buy_orders) and len(sell_orders):\n print('Resolve Orders for {}'.format(good.title))\n print('\\tBuy orders: {}'.format(len(buy_orders)))\n print('\\tSell orders: {}'.format(len(sell_orders)))\n elif len(buy_orders) > len(sell_orders):\n print('Pops need to sell more {} (buys: {} sells: {})'.format(good.title, len(buy_orders), len(sell_orders)))\n elif len(buy_orders) < len(sell_orders):\n print('Pops need to buy more {} (buys: {} sells: {})'.format(good.title, len(buy_orders), len(sell_orders)))\n\n total_buy_amount = sum([o.quantity for o in buy_orders])\n total_sell_amount = sum([o.quantity for o in sell_orders])\n\n avg_price = 0 # avg clearing price this round\n units_traded = 0 # amount of goods traded this round\n money_traded = 0 # amount of money traded this round\n num_successful_trades = 0 # # of successful trades this round\n\n # match the highest buy orders to the lowest sell orders]\n while len(buy_orders) > 0 and len(sell_orders) > 0:\n buy_order = buy_orders[0]\n sell_order = sell_orders[0]\n if DEBUG:\n print('\\t\\tBuy:', buy_order)\n print('\\t\\tSell:', sell_order)\n\n # quantity traded. Defined as the mininum of both orders quantity\n # in the future this may be improved\n quantity_traded = min(buy_order.quantity, sell_order.quantity)\n\n # the price per unit. Defined as the average of both orders prices\n clearing_price = (buy_order.price + sell_order.price) / 2.0\n total_price = quantity_traded * clearing_price\n\n if DEBUG:\n print('\\t\\tPrice: {}'.format(total_price))\n\n if quantity_traded > 0:\n # trade the goods and money, recording this in the order\n sell_order.quantity -= quantity_traded\n buy_order.quantity -= quantity_traded\n\n self.transfer_good(good, quantity_traded, sell_order.pop, buy_order.pop, clearing_price)\n self.transfer_money(total_price, sell_order.pop, buy_order.pop)\n\n # handle taxes\n tax = total_price * self.location.owner.vat[good]\n buy_order.pop.money -= tax\n self.location.owner.money += tax\n\n # update Pop price beliefs due to successful trade\n buy_order.pop.update_price_model(good, OrderType.buy_order, True, clearing_price + tax)\n sell_order.pop.update_price_model(good, OrderType.sell_order, True, clearing_price)\n\n # update pop metrics\n buy_order.pop.successful_trades += 1\n sell_order.pop.successful_trades += 1\n\n buy_order.pop.change_population(True)\n sell_order.pop.change_population(True)\n\n # log some stuff\n money_traded += total_price\n units_traded += quantity_traded\n num_successful_trades += 1\n\n # remove orders that have a quantity of 0\n if sell_order.quantity == 0:\n del sell_orders[0]\n\n if buy_order.quantity == 0:\n del buy_orders[0]\n\n if DEBUG:\n print('\\n')\n\n # reject all orders which don't have a matching order\n while len(buy_orders) > 0:\n buy_orders[0].pop.update_price_model(good, OrderType.buy_order, False)\n # update pop metrics\n buy_orders[0].pop.failed_trades += 1\n buy_orders[0].pop.change_population(False)\n del buy_orders[0]\n\n while len(sell_orders) > 0:\n sell_orders[0].pop.update_price_model(good, OrderType.sell_order, False)\n # update pop metrics\n sell_orders[0].pop.failed_trades += 1\n sell_orders[0].pop.change_population(False)\n del sell_orders[0]\n\n # update history\n self.history.buy_orders.add(good, total_buy_amount)\n self.history.sell_orders.add(good, total_sell_amount)\n self.history.trades.add(good, units_traded)\n\n if units_traded > 0:\n self.history.prices.add(good, float(money_traded) / units_traded)\n else:\n # no units were traded this round, so use the last round's average price\n last_avg = self.history.prices.average(good, 1)\n self.history.prices.add(good, last_avg)\n\n pops = self.pops\n shuffle(pops)\n\n\n # pops grouped by pop_job\n # create a key in TradehistoryLog with a list of each agent's profit this round\n # grouped into their pop_job\n for pop_job, pops in groupby(self.pops, lambda x: x.pop_job):\n all_profits = [p.profit for p in pops]\n self.history.profit.extend(pop_job, all_profits)\n\n\n\n def buy(self, order):\n \"Add a buy order Market\"\n if order.order_type is OrderType.buy_order:\n self.buy_orders[order.good].append(order)\n else:\n raise Exception('Must be a buy order')\n\n def sell(self, order):\n \"Add a sell order to the Market\"\n if order.order_type is OrderType.sell_order:\n self.sell_orders[order.good].append(order)\n else:\n raise Exception('Must be a sell order')\n\n def decide_new_pop_job(self, pop):\n \"Decide a new pop_job for a Pop when they go bankrupt\"\n include = JOBS_CLASS.get(pop.social_class)\n best_job = self.most_profitable_pop_job(include=include)\n best_good = self.most_demanded_good(day_range=3)\n if best_good is not None:\n best_job = GOOD_POPJOB_MAP[best_good]\n\n # if the best_job isn't valid at this location, and the best_good can be\n # found in neighboring provinces, become a merchant and import it\n\n if DEBUG: print(\"Pop {} ({}) is bankrupt. Switching to {}\".format(pop.id, pop.pop_job.title, best_job.title))\n pop.handle_bankruptcy(best_job)\n\n def most_demanded_good(self, minimum=1.5, day_range=10):\n \"\"\"\n Get the good with the highest demand/supply ratio over time\n minimum (float) the minimum demand/supply ratio to consider an opportunity\n day_range (int) number of rounds to look back\n \"\"\"\n best_good = None\n best_ratio = float('-inf')\n for good in Good.all():\n sells = self.history.sell_orders.average(good, day_range=day_range)\n buys = self.history.buy_orders.average(good, day_range=day_range)\n\n if buys > 0 or sells > 0: # if this Good is traded in this Market\n\n if sells == 0 and buys > 0:\n # make a fake supply of 0.5 for each unit to avoid\n # an infinite ratio of supply to demand\n sells = 0.5\n\n ratio = buys / sells\n\n if ratio > minimum and ratio > best_ratio:\n best_ratio = ratio\n best_good = good\n\n return best_good\n\n def goods_demand_ratio(self, day_range=10):\n \"\"\"\n Get the good with the lowest demand/supply ratio over time\n day_range (int) number of rounds to look back\n \"\"\"\n demand_list = {}\n for good in Good.all():\n sells = self.history.sell_orders.average(good, day_range=day_range)\n buys = self.history.buy_orders.average(good, day_range=day_range)\n\n if buys > 0 or sells > 0: # if this Good is traded in this Market\n\n if sells == 0 and buys > 0:\n # make a fake supply of 0.5 for each unit to avoid\n # an infinite ratio of supply to demand\n sells = 0.5\n\n ratio = buys / sells\n\n demand_list[good] = ratio\n return demand_list\n\n def most_cheap_good(self, day_range=10, exclude=None):\n \"\"\"\n Returns the good that has the lowest average price over the given range of time\n range (int) how many days to look back\n exclude (list[Good]) goods to exclude\n \"\"\"\n best_good = None\n best_price = float('inf')\n\n for good in Good.all():\n if exclude is None or good not in exclude:\n price = self.mean_price(good)\n\n if price < best_price:\n best_price = price\n best_good = good\n\n return best_good\n\n def most_costly_good(self, day_range=10, exclude=None):\n \"\"\"\n Returns the good that has the highest average price over the given range of time\n range (int) how many days to look back\n exclude (list[Good]) goods to exclude\n \"\"\"\n best_good = None\n best_price = float('inf')\n\n for good in Good.all():\n if exclude is None or good not in exclude:\n price = self.mean_price(good)\n\n if price > best_price:\n best_price = price\n best_good = good\n\n return best_good\n\n def most_profitable_pop_job(self, include=None, day_range=10):\n \"Returns the most profitable pop_job in a given day range\"\n best = float('-inf')\n best_pop_job = None\n\n if include is None:\n include = PopJob.all()\n\n for pop_job in include:\n avg_profit = self.history.profit.average(pop_job, day_range=day_range)\n\n if avg_profit > best:\n best_pop_job = pop_job\n best = avg_profit\n\n return best_pop_job\n\n def avg_historial_price(self, good, day_range):\n \"Gets the average historical price of a resource *range* days back\"\n return self.history.prices.average(good, day_range=day_range)\n\n def mean_price(self, good):\n \"Get the mean price of a Good at this Market before today\"\n return self.avg_historial_price(good, 1)\n\n def demand_for(self, good):\n \"Get the number of buy orders for a good before today\"\n return self.history.buy_orders.average(good, day_range=1)\n\n def supply_for(self, good):\n \"Get the number of sell orders for a good before today\"\n return self.history.sell_orders.average(good, day_range=1)\n\n def transfer_good(self, good, amount, seller, buyer, unit_price):\n \"Transfers Goods from a seller Pop to a buyer Pop\"\n seller.inventory.subtract(good, amount)\n buyer.inventory.add(good, amount, unit_price)\n\n def transfer_money(self, amount, seller, buyer):\n \"Transfers money from a seller Pop to a buyer Pop\"\n seller.money += amount\n buyer.money -= amount\n\n def simulate(self):\n \"Simulate a round of trading between the agents(Pops) at this Market\"\n pops_grouped = groupby(self.pops, lambda x: x.pop_job)\n # print(', '.join([\"{}: {}\".format(pop_job.title, len(list(pops))) for pop_job, pops in pops_grouped]))\n\n with Timer('\\tfor all pop generate orders'):\n for pop in self.location.pops:\n # print(\"\\nPop {} ({}):\".format(pop.pop_job.title, pop.id))\n # print(\"Inventory: {}\".format(pop.inventory.display()))\n\n # perform each Pop's production\n pop.money_yesterday = pop.money\n pop.perform_logic()\n\n # for each good, check to see if the Pop needs to buy or sell\n for good in Good.all():\n pop.generate_orders(good)\n\n with Timer(\"\\tresolve orders\"):\n for good in Good.all():\n self.resolve_orders(good)\n\n # resolve all offers for each Good\n with Timer(\"\\tdecide new pop job\"):\n for pop in self.location.pops:\n if pop.money < 0:\n # change to the most profitable pop type\n # unless there's an underserved market\n self.decide_new_pop_job(pop)\n else:\n # TODO: If money is declining recently, and about to be bankrupt,\n # then switch to a new job\n pass\n\n\n\n\n def export(self):\n \"Export the Market data as it currently exists\"\n orders_for = lambda l, g: [o.export() for o in l[g]]\n return {\n 'history': [{'good': good.ref(), 'data': self.history.export(good, 1)} for good in Good.all()],\n 'most_demanded_good': self.most_demanded_good(),\n 'most_profitable_pop_job': self.most_profitable_pop_job(),\n 'most_expensive_good': self.most_costly_good(exclude=[Good.fish])\n }\n","repo_name":"eranimo/historia","sub_path":"historia/economy/models/market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":15261,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"24974551223","text":"from setuptools import setup\n\nmdata = {}\nwith open(\"crutil/metadata.py\", \"r\") as f:\n for line in f.read().split(\"\\n\")[:~0]:\n linedata = [i.strip(\" \\\"\") for i in line.split(\"=\")]\n mdata[linedata[0]] = linedata[1]\n\nwith open(\"README.rst\", \"r\") as f:\n readme = f.read()\n\n# set-oop\nsetup(\n name=\"crutil\",\n version=mdata[\"__version__\"],\n description=\"Cuboid Raptor's Utilities, A Garbage Collection Of Garbage For Your Garbage Needs™\",\n url=mdata[\"__url__\"],\n author=mdata[\"__author__\"],\n author_email=mdata[\"__authoremail__\"],\n license=\"GNU GPLv3\",\n packages=[\"crutil\"],\n install_requires=[\n \"dill>=0.3.0\",\n \"lazy_import>=0.2.0\"\n ],\n python_requires=\">=3.6\",\n long_description=readme,\n long_description_content_type=\"text/x-rst\",\n)\n","repo_name":"CuboidRaptor/crutil","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33648345870","text":"#CSci 127 Teaching Staff\n#October 2017\n#A template for a program that computes Copenhagen transit fares.\n#Modified by: ADD YOUR NAME HERE\n\ndef computeFare(zone, ticketType):\n \"\"\"\n Takes as two parameters: the zone and the ticket type.\n Returns the Copenhagen Transit fare, as follows:\n\n If the zone is 2 or smaller and the ticket type is \"adult\", the fare is 23.\n If the zone is 2 or smaller and the ticket type is \"child\", the fare is 11.5.\n If the zone is 3 and the ticket type is \"adult\", the fare is 34.5.\n If the zone is 3 or 4 and the ticket type is \"child\", the fare is 23.\n If the zone is 4 and the ticket type is \"adult\", the fare is 46.\n If the zone is greater than 4, return a negative number (since your calculator does not handle inputs that high).\n \"\"\"\n \n fare = 0\n \n ###################################\n ### FILL IN YOUR CODE HERE ###\n ### Other than your name above, ###\n ### this is the only section ###\n ### you change in this program. ###\n ###################################\n\n return(fare)\n\ndef main():\n z = int(input('Enter the number of zones: '))\n t = input('Enter the ticket type (adult/child): ').lower()\n fare = computeFare(z,t)\n print('The fare is', fare)\n\n#Allow script to be run directly:\nif __name__ == \"__main__\":\n main()\n","repo_name":"HunterCSci127/CSci127","sub_path":"copenhagenTransit.py","file_name":"copenhagenTransit.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"32264133674","text":"import sys\nimport heapq\nimport time\n\nINF = sys.maxsize\ninputs = list(map(int, input().split(' ')))\nassert len(inputs) == 2\n# V = 정점(Vertex)개수, E = 간선(Edge)개수\nV, E = inputs\nK = int(input())\n\n# 모든 정점으로부터 모든 정점까지의 가중치를 담은 2차행렬\ndistance_map = [[float('inf')\n if i != j else 0\n for i in range(V)] for j in range(V)]\n\nprint(distance_map)\n\nG = [[] for _ in range(V+1)]\n\nfor i in range(E):\n inputs = list(map(int, input().split(' ')))\n assert len(inputs) == 3\n u, v, w = inputs # u = 시작 정점\n distance_map[u-1][v-1] = w\n G[u].append([w, v]) #jiwon\n\ns = time.time()\nresult = [INF for _ in range(V+1)] #결과 저장\nresult[K] = 0\n\n#우선순위 큐\nq = []\nheapq.heappush(q, [0,K]) #거리 비교\n\nwhile q:\n dis, end = heapq.heappop(q) #pop\n\n if result[end] < dis: #해당 경로가 이전 경로보다 길면\n continue #스킵\n\n for d, x in G[end]: #노드 탐색\n d += dis\n if d < result[x]: # 거리를 비교해서\n result[x] = d\n heapq.heappush(q, [d, x]) # 우선순위 큐에 넣어줌\n\nfor i in range(1, V+1):\n print(result[i] if result[i] != INF else \"INF\")\ne = time.time()\n\nprint(\"수행 시간: {0:3.6f}초\".format(e - s))","repo_name":"minzydal/Python_Programming","sub_path":"QUIZ2.py","file_name":"QUIZ2.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20305453601","text":"import idc\nimport idautils\nimport ida_typeinf\n\nimport helpers\n\nfrom typing import List\nimport time\n\n# TODO: maybe decompile all again after fixing all functions?\n\ndef fix_multiple_return_signatures():\n for function_address in idautils.Functions():\n if does_function_return_multiple(function_address):\n fix_multiple_return_signature(function_address)\n\ndef does_function_return_multiple(function_start: int) -> bool:\n # If the return type already has the length of a multiple return value,\n # the fixer can ignore this function.\n function_details = helpers.get_function_details(function_start)\n if function_details.rettype.get_size() == helpers.get_multiple_return_size():\n return False\n\n instructions: List[int] = helpers.get_instructions_from_function(function_start)\n \n if len(instructions) < 3:\n return False\n \n index: int = -1\n\n for instruction in instructions:\n if helpers.is_returning_instruction(instruction):\n index = instructions.index(instruction)\n \n if index == -1 or index < 2:\n return False\n \n walkback_limit: int = 15\n if index < walkback_limit:\n walkback_limit = index + 1\n\n is_second_return_register_stored: bool = False\n \n for i in range(1, walkback_limit):\n insn: int = instructions[index - i]\n\n # If a call is made and the second return register is not filled after, it does not exist,\n # as the second return register can be trashed in a function call.\n if helpers.is_calling_instruction(insn):\n break\n\n if helpers.is_jump_outside(insn, function_start, idc.find_func_end(function_start)):\n break\n\n position: int = find_second_return_register_position(insn)\n\n if position == -1:\n continue\n elif position == 0:\n if helpers.is_moving_instruction(insn):\n is_second_return_register_stored = True\n break\n elif position == 1:\n # If the second return register is used, it is probably not stored as a return value.\n break\n\n if is_second_return_register_stored:\n print(\"Reason: second return register stored.\")\n return True\n\n for ref in idautils.CodeRefsTo(function_start, True):\n if does_caller_use_second_return_register(ref):\n return True\n\n return False\n\n# Returns -1 if second return register is not used.\ndef find_second_return_register_position(address: int) -> int:\n if helpers.is_second_return_reg_in_operand(address, 0):\n return 0\n elif helpers.is_second_return_reg_in_operand(address, 1):\n return 1\n else:\n return -1\n\ndef does_caller_use_second_return_register(caller_address: int) -> bool:\n current_instruction: int = caller_address\n function_end: int = idc.find_func_end(caller_address)\n\n # Jumps do not return to the actual call site.\n if helpers.is_jump(caller_address):\n return False\n\n for i in range(5):\n current_instruction = idc.find_code(current_instruction, idc.SEARCH_DOWN)\n if current_instruction >= function_end or helpers.is_returning_instruction(current_instruction) or helpers.is_calling_instruction(current_instruction):\n break\n\n position: int = find_second_return_register_position(current_instruction)\n\n if position == 0:\n break\n elif position == 1:\n if helpers.is_moving_instruction(current_instruction):\n print(\"Reason: caller uses second return register: {}.\".format(hex(current_instruction)))\n return True\n\n return False\n\ndef fix_multiple_return_signature(address: int):\n # Let the decompiler run on the function first to establish an initial function signature.\n helpers.decompile_function(address)\n\n declaration: str = generate_multiple_return_signature(address)\n print(declaration + \" \" + hex(address))\n\n result = idc.parse_decl(declaration, ida_typeinf.PT_TYP)\n\n idc.apply_type(address, result, idc.TINFO_DEFINITE)\n\ndef generate_multiple_return_signature(address: int) -> str:\n function_details = helpers.get_function_details(address)\n\n return_registers_annotation: str = get_return_registers_annotation()\n\n arguments: str = \"\"\n for i in range(function_details.size()):\n if i != 0:\n arguments = arguments + \", \"\n arguments = arguments + \"{} {}\".format(ida_typeinf.print_tinfo('', 0, 0, idc.PRTYPE_1LINE, function_details[i].type, '', ''), function_details[i].name)\n arguments = arguments + get_argument_annotation(i)\n\n # Function name is discarded by parse_decl.\n return \"__int128 __usercall new_func{}({});\".format(return_registers_annotation, arguments)\n\ndef get_return_registers_annotation() -> str:\n platform = helpers.get_platform()\n\n if platform.is_pe_x64() or platform.is_elf_x64():\n return \"@\"\n elif platform.is_arm64():\n return \"@\"\n else:\n return \"\"\n\ndef get_argument_annotation(position: int) -> str:\n annotation: str = \"@<{}>\"\n\n platform = helpers.get_platform()\n\n if platform.is_pe_x64():\n if position == 0:\n return annotation.format(\"rcx\")\n if position == 1:\n return annotation.format(\"rdx\")\n if position == 2:\n return annotation.format(\"r8\")\n if position == 3:\n return annotation.format(\"r9\")\n return \"\"\n elif platform.is_elf_x64():\n if position == 0:\n return annotation.format(\"rdi\")\n if position == 1:\n return annotation.format(\"rsi\")\n if position == 2:\n return annotation.format(\"rdx\")\n if position == 3:\n return annotation.format(\"rcx\")\n if position == 4:\n return annotation.format(\"r8\")\n if position == 5:\n return annotation.format(\"r9\")\n return \"\"\n # ARM has the same ABI across different operating systems.\n elif platform.is_arm64():\n if position == 0:\n return annotation.format(\"X0\")\n if position == 1:\n return annotation.format(\"X1\")\n if position == 2:\n return annotation.format(\"X2\")\n if position == 3:\n return annotation.format(\"X3\")\n if position == 4:\n return annotation.format(\"X4\")\n if position == 5:\n return annotation.format(\"X5\")\n if position == 6:\n return annotation.format(\"X6\")\n if position == 7:\n return annotation.format(\"X7\")\n else:\n return \"\"\n\nif __name__ == \"__main__\":\n t1 = time.time()\n fix_multiple_return_signatures()\n t2 = time.time()\n print(\"Time: {}\".format(t2-t1))\n\n","repo_name":"RobbeBryssinck/rust_reverser_helper","sub_path":"signature_fixer.py","file_name":"signature_fixer.py","file_ext":"py","file_size_in_byte":6724,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"32188450523","text":"import pytest\n\nfrom src.providers.consensus.typings import ValidatorState\nfrom src.providers.keys.typings import LidoKey\nfrom src.services.exit_order_iterator import ExitOrderIterator\nfrom src.services.exit_order_iterator_state import NodeOperatorPredictableState, ExitOrderIteratorStateService\nfrom src.web3py.extensions.lido_validators import LidoValidator, StakingModuleId, NodeOperatorId\nfrom tests.factory.blockstamp import ReferenceBlockStampFactory\nfrom tests.factory.configs import ChainConfigFactory\nfrom tests.factory.no_registry import LidoValidatorFactory\n\n\n@pytest.mark.unit\ndef test_predicates():\n def v(module_address, operator, index, activation_epoch) -> LidoValidator:\n validator = object.__new__(LidoValidator)\n validator.lido_id = object.__new__(LidoKey)\n validator.validator = object.__new__(ValidatorState)\n validator.lido_id.moduleAddress = module_address\n validator.lido_id.operatorIndex = operator\n validator.index = index\n validator.validator.activation_epoch = activation_epoch\n return validator\n\n exitable_validators_random_sort = [\n v('0x1', 2, 76, 1200),\n v('0x4', 2, 1121, 3210),\n v('0x5', 1, 1122, 3210),\n v('0x2', 1, 81, 1400),\n v('0x2', 2, 48, 781),\n v('0x3', 1, 49, 990),\n v('0x4', 1, 10, 231),\n v('0x0', 2, 90, 1500),\n v('0x1', 1, 50, 1000),\n v('0x3', 2, 52, 1003),\n v('0x0', 1, 47, 500),\n ]\n\n validators_exit = object.__new__(ExitOrderIterator)\n validators_exit.operator_network_penetration_threshold = 0.01\n validators_exit.staking_module_id = {\n '0x0': StakingModuleId(0),\n '0x1': StakingModuleId(1),\n '0x2': StakingModuleId(2),\n '0x3': StakingModuleId(3),\n '0x4': StakingModuleId(4),\n '0x5': StakingModuleId(5),\n }\n validators_exit.total_predictable_validators_count = 500000\n\n validators_exit.lido_node_operator_stats = {\n (StakingModuleId(0), NodeOperatorId(1)): NodeOperatorPredictableState(1000, 7000, True, 10, 0),\n (StakingModuleId(0), NodeOperatorId(2)): NodeOperatorPredictableState(1000, 7000, True, 10, 0),\n (StakingModuleId(1), NodeOperatorId(1)): NodeOperatorPredictableState(1200, 6000, True, 2, 0),\n (StakingModuleId(1), NodeOperatorId(2)): NodeOperatorPredictableState(1200, 6000, True, 2, 0),\n (StakingModuleId(2), NodeOperatorId(1)): NodeOperatorPredictableState(1200, 6000, True, 2, 0),\n (StakingModuleId(2), NodeOperatorId(2)): NodeOperatorPredictableState(998, 7432, False, 0, 0),\n (StakingModuleId(3), NodeOperatorId(1)): NodeOperatorPredictableState(998, 7432, False, 0, 0),\n (StakingModuleId(3), NodeOperatorId(2)): NodeOperatorPredictableState(998, 7432, False, 0, 0),\n (StakingModuleId(4), NodeOperatorId(1)): NodeOperatorPredictableState(100500, 5, True, 50, 1),\n (StakingModuleId(4), NodeOperatorId(2)): NodeOperatorPredictableState(100500, 2, False, 0, 2),\n (StakingModuleId(5), NodeOperatorId(1)): NodeOperatorPredictableState(100500, 2, False, 0, 2),\n }\n\n exitable_validators_random_sort.sort(\n key=lambda validator: ExitOrderIterator._predicates(validators_exit, validator)\n )\n exitable_validators_indexes = [v.index for v in exitable_validators_random_sort]\n\n expected_queue_sort_indexes = [47, 90, 50, 76, 81, 48, 49, 52, 10, 1121, 1122]\n assert exitable_validators_indexes == expected_queue_sort_indexes\n\n\n@pytest.mark.unit\ndef test_decrease_node_operator_stats():\n def v(module_address, operator, index, activation_epoch) -> LidoValidator:\n validator = object.__new__(LidoValidator)\n validator.lido_id = object.__new__(LidoKey)\n validator.validator = object.__new__(ValidatorState)\n validator.lido_id.moduleAddress = module_address\n validator.lido_id.operatorIndex = operator\n validator.index = index\n validator.validator.activation_epoch = activation_epoch\n return validator\n\n exitable_validators = [\n v('0x1', 2, 76, 1200),\n v('0x4', 2, 1121, 5000),\n ]\n\n validator_exit = object.__new__(ExitOrderIterator)\n validator_exit.blockstamp = ReferenceBlockStampFactory.build(ref_epoch=4445)\n validator_exit.total_predictable_validators_count = 500000\n validator_exit.staking_module_id = {\n '0x0': StakingModuleId(0),\n '0x1': StakingModuleId(1),\n '0x2': StakingModuleId(2),\n '0x3': StakingModuleId(3),\n '0x4': StakingModuleId(4),\n '0x5': StakingModuleId(5),\n }\n validator_exit.lido_node_operator_stats = {\n (StakingModuleId(0), NodeOperatorId(1)): NodeOperatorPredictableState(1000, 7000, True, 10, 0),\n (StakingModuleId(0), NodeOperatorId(2)): NodeOperatorPredictableState(1000, 7000, True, 10, 0),\n (StakingModuleId(1), NodeOperatorId(1)): NodeOperatorPredictableState(1200, 6000, True, 2, 0),\n (StakingModuleId(1), NodeOperatorId(2)): NodeOperatorPredictableState(3245, 6000, True, 2, 0),\n (StakingModuleId(2), NodeOperatorId(1)): NodeOperatorPredictableState(1200, 6000, True, 2, 0),\n (StakingModuleId(2), NodeOperatorId(2)): NodeOperatorPredictableState(998, 7432, False, 0, 0),\n (StakingModuleId(3), NodeOperatorId(1)): NodeOperatorPredictableState(998, 7432, False, 0, 0),\n (StakingModuleId(3), NodeOperatorId(2)): NodeOperatorPredictableState(998, 7432, False, 0, 0),\n (StakingModuleId(4), NodeOperatorId(1)): NodeOperatorPredictableState(100500, 5, True, 50, 1),\n (StakingModuleId(4), NodeOperatorId(2)): NodeOperatorPredictableState(100500, 2, False, 0, 2),\n (StakingModuleId(5), NodeOperatorId(1)): NodeOperatorPredictableState(100500, 2, False, 0, 2),\n }\n\n module_operator = validator_exit._decrease_node_operator_stats(exitable_validators[0])\n expected_after_decrease_first = NodeOperatorPredictableState(0, 5999, True, 2, 0)\n assert module_operator == (StakingModuleId(1), NodeOperatorId(2))\n assert validator_exit.total_predictable_validators_count == 499999\n assert (\n validator_exit.lido_node_operator_stats[(StakingModuleId(1), NodeOperatorId(2))]\n == expected_after_decrease_first\n )\n\n module_operator = validator_exit._decrease_node_operator_stats(exitable_validators[1])\n expected_after_decrease_second = NodeOperatorPredictableState(100500, 1, False, 0, 2)\n assert module_operator == (StakingModuleId(4), NodeOperatorId(2))\n assert validator_exit.total_predictable_validators_count == 499998\n assert (\n validator_exit.lido_node_operator_stats[(StakingModuleId(4), NodeOperatorId(2))]\n == expected_after_decrease_second\n )\n\n\n@pytest.fixture\ndef mock_exit_order_iterator_state_service(monkeypatch):\n class MockedExitOrderIteratorStateService(ExitOrderIteratorStateService):\n pass\n\n inner_ = lambda _: None\n inner_.max_validator_exit_requests_per_report = 100\n MockedExitOrderIteratorStateService.get_oracle_report_limits = lambda *_: inner_\n MockedExitOrderIteratorStateService.get_operator_network_penetration_threshold = lambda *_: 0.05\n MockedExitOrderIteratorStateService.get_operators_with_last_exited_validator_indexes = lambda *_: {}\n MockedExitOrderIteratorStateService.get_exitable_lido_validators = lambda *_: []\n MockedExitOrderIteratorStateService.prepare_lido_node_operator_stats = lambda *_: {}\n MockedExitOrderIteratorStateService.get_total_predictable_validators_count = lambda *_: 0\n\n monkeypatch.setattr(\n 'src.services.exit_order_iterator.ExitOrderIteratorStateService', MockedExitOrderIteratorStateService\n )\n\n\n@pytest.mark.unit\ndef test_exit_order_iterator_iter(web3, lido_validators, contracts, mock_exit_order_iterator_state_service):\n iterator = ExitOrderIterator(web3, ReferenceBlockStampFactory.build(), ChainConfigFactory.build())\n web3.lido_validators.get_lido_node_operators = lambda _: []\n web3.lido_validators.get_lido_validators_by_node_operators = lambda _: []\n\n iterator.__iter__()\n\n assert iterator.exitable_lido_validators == []\n assert iterator.left_queue_count == 0\n assert iterator.lido_node_operator_stats == {}\n assert iterator.max_validators_to_exit == 100\n assert iterator.operator_network_penetration_threshold == 0.05\n assert iterator.staking_module_id == {}\n assert iterator.total_predictable_validators_count == 0\n\n\n@pytest.mark.unit\ndef test_exit_order_iterator_next(web3, lido_validators, contracts, mock_exit_order_iterator_state_service):\n iterator = ExitOrderIterator(web3, ReferenceBlockStampFactory.build(), ChainConfigFactory.build())\n web3.lido_validators.get_lido_node_operators = lambda _: []\n web3.lido_validators.get_lido_validators_by_node_operators = lambda _: []\n\n iterator.__iter__()\n\n iterator.left_queue_count = 101\n\n with pytest.raises(StopIteration):\n # left_queue_count > max_validators_to_exit\n iterator.__next__()\n\n iterator.left_queue_count = 0\n\n with pytest.raises(StopIteration):\n # no exitable validators\n iterator.__next__()\n\n validator = LidoValidatorFactory.build(index=0)\n validator.validator.activation_epoch = 0\n iterator.exitable_lido_validators = [validator]\n iterator.lido_node_operator_stats = {\n (0, 1): NodeOperatorPredictableState(1000, 7000, True, 10, 0),\n }\n iterator.total_predictable_validators_count = 100\n ExitOrderIterator.operator_index_by_validator = lambda *_: (0, 1)\n\n popped = iterator.__next__()\n\n assert popped == ((0, 1), validator)\n assert iterator.lido_node_operator_stats[(0, 1)] == NodeOperatorPredictableState(\n predictable_validators_total_age=-8195,\n predictable_validators_count=6999,\n targeted_validators_limit_is_enabled=True,\n targeted_validators_limit_count=10,\n delayed_validators_count=0,\n )\n assert iterator.total_predictable_validators_count == 99\n","repo_name":"lidofinance/lido-oracle","sub_path":"tests/modules/ejector/test_exit_order_iterator.py","file_name":"test_exit_order_iterator.py","file_ext":"py","file_size_in_byte":9953,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"78"} +{"seq_id":"14058644204","text":"# monitor voor aantal zaken\n# - deurbel\n# - geluidssensor (voor het alarm)\n# - UDP poort 5005 \n# - scannen bekende mac-adressen en waarschuwen als niemand thuis is \n# - stuurt broadcast iedere vijf minuten voor licht bij kattenluik\n\nimport datetime\nimport json\nimport os\nimport requests\nimport socket\nimport subprocess\nimport time\nimport ephem\nimport random\nimport threading\n\ngeluidpin = 4\ndeurblpin = 24\n\nslapen\t= 2200\nopstaan\t= 700\nextrnip = requests.get(\"http://myexternalip.com/raw\").text.strip()\n\nvorigecontrole = time.time()\n\n# telegram adressen\nJohannes_Smits\t= \"12463680\"\nalarmsysteem\t= \"-24143102\"\ndeurbel\t\t\t= \"-15033899\"\n\t\t\ndef touch(path):\n with open(path, 'a'):\n os.utime(path, None)\n\ndef telegramMsg(chat_id=\"12463680\", message=\"...\"):\n\tr = requests.get(\"https://api.telegram.org/bot112338525:AAGyQLESoyVnCAdBJZTdaRcgV5KwN3uGipU/sendMessage?chat_id=%s&text=%s\" % (chat_id, message) )\n\tprint( message )\n\treturn r.status_code\n\t\ndef telegram( chat_id=\"12463680\", message = None, image = None ):\n\tif not message is None: \n#\t\tprint(\"telegram bericht %s\"%message)\n\t\turl = \"https://api.telegram.org/bot112338525:AAGyQLESoyVnCAdBJZTdaRcgV5KwN3uGipU/sendMessage\"\n\t\tpayload\t= {\"chat_id\":chat_id, \"text\":message, \"parse_mode\":\"HTML\"}\n#\t\tr = requests.get(\"https://api.telegram.org/bot328955454:AAEmupBEwE0D7V1vsoB8Xo5YY1wGIFpu6AE/sendMessage\", params=payload)\t\n\t\tr = requests.get(url, params=payload)\t\n\t\treturn (r.json()[\"ok\"])\n\t\t\n\telif not image is None:\n#\t\tprint(\"telegam foto %s\"%image)\n\t\turl\t= \"https://api.telegram.org/bot112338525:AAGyQLESoyVnCAdBJZTdaRcgV5KwN3uGipU/sendPhoto\"\n\t\tdata\t= {'chat_id': chat_id}\n\t\tfiles\t= {'photo': (image, open(image, \"rb\"))}\n\t\tr\t= requests.post(url , data=data, files=files)\n\t\treturn (r.json()[\"ok\"])\n\t\t\t\ndef tgSendPhoto( chat_id=\"12463680\", imagePath=\"\" ):\n\tdata\t= {'chat_id': chat_id}\n\tfiles\t= {'photo': (imagePath, open(imagePath, \"rb\"))}\n\tr = requests.post(\"https://api.telegram.org/bot112338525:AAGyQLESoyVnCAdBJZTdaRcgV5KwN3uGipU/sendPhoto\", data=data, files=files)\n\treturn r.status_code\n\ndef nachtlicht(iplamp = \"http://192.168.178.203\"):\n\tif nacht():\n\t\tif (int(json.loads(requests.get(iplamp).text)[\"aanuit1\"]) == 0):\n\t\t\t# start een timer thread om de lamp na x seconden weer uit te zetten\n\t\t\tthreading.Timer(120, requests.get, [\"%s?uit:1\"%iplamp]).start()\n\t\t\trequests.get(\"%s?aan:1\"%iplamp) # zet de lamp aan\n\t\telse:\n\t\t\tprint(\"al aan, we doen niks\")\n\treturn\n\t\ndef nacht():\n#\tLatitude en Longitude van sibeliusweg 66, capelle\n\thome_lat = '51.916905'\n\thome_long = '4.563472'\n\t\n\t# where am i \n\to = ephem.Observer()\n\to.lat = home_lat\n\to.long = home_long\n\t\t\n\t# define sun as object of interest\n\ts = ephem.Sun()\n\tsunrise = o.next_rising(s)\n\tsunset = o.next_setting(s)\n\n\tsr_next = ephem.localtime(sunrise)\n\tss_next = ephem.localtime(sunset)\t\t\n\t\n\treturn 1 if (sr_next < ss_next) else 0\t\t\t\n\nclass tuinhuis:\t\n\tdef __init__(self):\n\t\tself.laatstestatus\t= 0\n\t\tself.vorigecontrole = time.time() # instance variable unique to each instance\n\t\n\tdef waarschuwing(self, interval = 600, uren = [22,23]):\n\t\tif ((time.time() - self.vorigecontrole) > interval):\n\t\t\tself.vorigecontrole = time.time()\n\t\t\tif (int(time.strftime(\"%H\")) in uren):\n\t\t\t\tself.vorigecontrole = time.time()\n\t\t\t\tif not self.laatstestatus: # de deur is dicht\n\t\t\t\t\tprint (time.strftime(\"%H:%M:%S \") + \"De deur van het tuinhuis is dicht\")\n\t\t\t\telif self.laatstestatus: # de deur is open\n\t\t\t\t\tkreten = ['Ter info;','Hey','Hi', 'Christie,', 'Wist je...', 'Bij Toutatis!', 'Allemachtig,']\n##\t\t\t\t\ttelegramMsg(alarmsysteem, \"%s de deur van het tuinhuis staat nog open\"%kreten[random.randrange(6)])\n\n\tdef updatestatus(self, openofdicht = 0):\n\t\tif openofdicht in range(2): # check of de waarde is 0 of 1\n\t\t\tif openofdicht != self.laatstestatus:\n\t\t\t\ttekst = \"open\" if (openofdicht == 1) else \"dicht\"\n\t\t\t\ttelegramMsg (\"12463680\", \"Tuinhuisdeur is %s\"%tekst) # laat het de wereld weten\n\t\t\t\ttry:\n\t\t\t\t\trequests.get(\"http://192.168.178.50:1208?schuurdeur:%s\"%tekst, timeout=2)\t# update de status in homebridge\n\t\t\t\texcept:\t\t\t\t\n\t\t\t\t\tprint(\"\\033[3m%s\\033[0m - probleem bij deurstatus update\"%time.strftime(\"%H:%M:%S\"))\n\t\t\t\tself.laatstestatus = openofdicht\t# update de status voor de volgende controle\n\t\t\t\t\ndef bericht(tekst = \"\", viaTelegram=False, viaPushover=False):\n\tprint(\"\\033[3m%s\\033[0m - %s\"%(time.strftime(\"%H:%M:%S\"),tekst))\n\tif viaTelegram:\n\t\ttelegram( message=tekst )\n\tif viaPushover:\n\t\tpushover( bericht=tekst )\n\t\t\ndef pushover( bericht = \"\", titel = None ):\n\ttitel = \"oPi-monitor\" if titel is None else titel \n\ttry:\n\t\tr = requests.post('https://api.pushover.net/1/messages.json', data = {'token':'aYs6YxK8qV1KnGV1LEHzQQtFTrCutk', 'user':'udEe5uL7YjuyYLyhQXBjvjnqiGGsf8', 'title':titel, 'message':bericht})\n\texcept requests.Timeout as e:\n\t\tbericht(\"Pushover - %s\"%e)\n\texcept:\n\t\tbericht(\"Pushover - Fout\")\n\treturn\n\t\t\nclass arpscanner:\n\tdef __init__(self, interval=60):\n\t\tself.devices = {}\n\t\tself.gevonden = time.time()\n\t\tself.interval = interval\n\t\tself.gezien = \"\"\n\t\tself.vorigecontrole\t= 0\n\t\tself.macids =\t\t { \"Leanne\":\"70:ec:e4:ce:d8:5e\"}\n\t\tself.macids.update({\"Christie\":\"d0:25:98:2c:a7:df\"})\n\t\tself.macids.update({ \"Rik\":\"bc:6c:21:0c:b4:6b\"})\n\t\tself.macids.update({ \"Hans\":\"e0:5f:45:3f:df:d1\"})\n\t\tself.macids.update({ \"Sjors\":\"cc:25:ef:11:52:3e\"})\n\t\t\n\tdef scan(self):\n\t\tif ((time.time() - self.vorigecontrole) > self.interval):\n\t\t\tself.vorigecontrole = time.time()\n\t\t\tbericht(\"arp-scan wordt gestart...\")\n\t\t\tp = subprocess.Popen('sudo /usr/bin/arp-scan -q --interface=eth0 192.168.178.0/24', shell=True, stdout=subprocess.PIPE)\n\t\t\t# zet alle gevonden devices in dictionary\n\t\t\tself.devices = {}\n\t\t\tfor line in p.stdout:\n\t\t\t\tregel = line.decode(\"utf8\").strip()\n\t\t\t\tif regel.startswith('192'):\n\t\t\t\t\tself.devices.update({regel[regel.find(\"\\t\"):].strip():regel[:regel.find(\"\\t\")]})\n\t\t\t\t\t\n\t\t\t# zoek voor alle bekende mac's in lijst\n\t\t\tfor naam, macid in self.macids.items():\n\t\t\t\tif macid in self.devices:\n\t\t\t\t\tbericht(\"iPhone %s is op het netwerk (%s)\"%(naam,macid))\n\t\t\t\t\tself.gezien = naam\n\t\t\t\t\tself.gevonden = time.time()\n\t\t\t\t\t#break\n\t\treturn int((time.time()-self.gevonden)/60)\n\n# Instellen UDP listener\nUDP_PORT = 5005\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # internet, UDP \nsock.setblocking(0)\nsock.settimeout(0.5)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nsock.bind(('', UDP_PORT)) # udp port 5005\n\nclass udpinit(object):\n\tdef __init__(self, myport = 5005, seconden = 0):\n\t\tself.port = myport\n\t\tself.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\t\tself.start = time.time()\n\t\tself.interval = seconden\n\n\tdef broadcast(self, message = \"\"):\n\t\tif self.interval is 0:\n\t\t\treturn\n\t\telif time.time()-self.start > self.interval:\n\t\t\tself.start = time.time()\n#\t\t\tbericht(\"Sending: %s\\n\"%message)\n\t\t\tself.s.sendto(bytes(message,\"UTF-8\"),('',self.port))\n\t\t\t\nbericht(\"%s gestart\\n\"%__file__, viaPushover = True)\n\ntuinhuisdeur = tuinhuis()\narp = arpscanner(300)\n\ntry:\n\tstartijd = time.time()\n\thuisstatus = {\"keukendeur\":0}\n\t\n\twhile True:\n#\t\tcheck de status van het alarmsysteem\n\t\ttry:\n\t\t\tif ((time.time()-startijd)/60>5):\t# om de vijf minuten\n\t\t\t\tstartijd = time.time()\t\t\t\t# reset timer\n\t\t\t\tw = requests.get(\"http://admin:admin1234@192.168.178.3/action/panelCondGet\", timeout=5)\n\t\t\t\tif (w.status_code == requests.codes.ok):\t\n\t\t\t\t\tage = arp.scan()\n\t\t\t\t\tif (w.text.find(\"Disarm\")>0):\n\t\t\t\t\t\tif not nacht() and (age > 45):\n\t\t\t\t\t\t\tbericht = \"Al %s minuten niemand gevonden.\\n\"%age\n\t\t\t\t\t\t\tbericht += \"http://%s:1208?alarmsysteem:arm\\n\"%extrnip\n\t\t\t\t\t\t\ttelegramMsg(Johannes_Smits, bericht)\n\t\t\t\telif (w.text.find( \"Arm\")>0):\n\t\t\t\t\tarp.gevonden = time.time()\n\t\t\t\telif (w.text.find(\"Armhome\")>0):\n\t\t\t\t\tarp.gevonden = time.time()\n\t\n\t\texcept Exception as e:\t\n\t\t\tbericht(\"Exception bij status alarmsysteem (%s)\"%e)\n\t\t\tpass\n\n\t\ttuinhuisdeur.waarschuwing(600)\n\n\t\ttry :\n\t\t\tjsonstr, addr = sock.recvfrom(1024) # buffer size is 1024 bytes\n\t\t\tjsonstr = jsonstr.decode(\"utf8\")\n\t\t\t\n\t\t\tbericht(\"ontvangen via UDP: %s (%s)\" % (jsonstr.strip(), addr) )\n\t\t\t''' \t\t\t\n\t\t\tif \"tuinhuis\" in jsonstr:\n\t\t\t\ttuinhuisdeur.updatestatus( int(json.loads(jsonstr.strip())[\"deur\"]) )\n\t\t\t\t# nu we toch bezig zijn, update de status van de temperatuur ook gelijk even in homebridge \n\t\t\t\ttemperatuur = json.loads(jsonstr.strip())[\"temperatuur\"]\n\t\t\t\tif int(temperatuur) in range(-20,50): # simpel error checking, is de waarde is tussen -19 en 49\n\t\t\t\t\ttry:\n\t\t\t\t\t\tr=requests.get(\"http://192.168.178.50:1208?tuinhuisupdate:%s\"%temperatuur, timeout=5)\n\t\t\t\t\t\tbericht(r.text)\n\t\t\t\t\texcept requests.Timeout:\t\t\t\t\n\t\t\t\t\t\tbericht(\"Time-out bij temperatuur update\")\n\t\t\t\t\texcept:\t\t\t\t\n\t\t\t\t\t\tbericht(\"Temperatuur update mislukt\")\n\t\t\t'''\t\t\t\t\t\t\t\t\t\n\t\t\tif \"reset aanwezigheid\" in jsonstr:\n\t\t\t\tarp.gevonden = time.time()\n\t\t\t\ttelegramMsg(Johannes_Smits, \"Aanwezigheidstimer gereset\")\n\n\t\t\telif \"aanwezig\" in jsonstr:\n\t\t\t\ttelegramMsg(Johannes_Smits, \"%s, %s seconden geleden gezien\"%(arp.gezien, int((time.time()-arp.gevonden)/60)) )\n\t\t\t\t\n\t\t\telif \"deurbel\" in jsonstr:\n\t\t\t\ttelegramMsg(Johannes_Smits, \"Deurbel\")\n\t\t\t\t\t\t\t\t\n\t\t\telif \"keukendeur\" in jsonstr:\n\t\t\t\ttry:\n\t\t\t\t\topenofdicht = int(json.loads(jsonstr.strip())[\"keukendeur\"])\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\tnachtlicht() if (openofdicht == 1) else None\t#lampje in de keuken aan doen\n\t\t\t\tif (openofdicht != huisstatus[\"keukendeur\"]):\n\t\t\t\t\topendicht = \"open\" if (openofdicht == 1) else \"dicht\"\n\t\t\t\t\ttry:\n\t\t\t\t\t\trequests.get(\"http://192.168.178.50:1208?keukendeur:%s\"%opendicht, timeout=1)\n\t\t\t\t\texcept requests.Timeout:\t\t\t\t\n\t\t\t\t\t\tbericht(\"Time-out bij update deurstatus (192.168.178.50)\")\n\t\t\t\t\texcept:\t\t\t\t\n\t\t\t\t\t\tbericht(\"Fout bij update deurstatus\")\n\n\t\t\t\t\thuisstatus[\"keukendeur\"] = openofdicht\n\n\t\t\t\t\topendicht = \"true\" if openofdicht == 0 else \"false\"\n\t\t\t\t\trequests.get(\"http://127.0.0.1:51828/?accessoryId=achterdeur&state=%s\"%opendicht)\n\n\t\t\t\t\tbericht(huisstatus[\"keukendeur\"])\n\t\t\t\t\t\n\t\t\telif \"alarmsysteem\" in jsonstr and \"reset\" in jsonstr:\n\t\t\t\tarp.gevonden = time.time()\n\n\t\texcept socket.timeout:\n\t\t\tcontinue\n\nexcept (KeyboardInterrupt):#, RuntimeError, TypeError, NameError, ValueError):\n\tbericht (time.strftime(\"%a om %H:%M:%S\")+ \" Restarting...\")\n\tsock.close()\n\tsubprocess.call(\"screen -dmLS monitor python3 %s\"%__file__, shell=True)\n\t","repo_name":"Hoefnix/development","sub_path":"monitor-orange.py","file_name":"monitor-orange.py","file_ext":"py","file_size_in_byte":10248,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44851375414","text":"# coding: utf-8\n\n\"\"\"\n Tango Card RaaS API\n\n <5. Ordersp>Welcome to the RaaS® API – with this RESTful API you can integrate a global reward or incentive program into your app or platform.

This console works in our Sandbox environment. To receive your own credentials or to ask questions, please contact us at devsupport@tangocard.com. # noqa: E501\n\n OpenAPI spec version: 2\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom tango_client.configuration import Configuration\n\n\nclass ItemViewVerbose(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'countries': 'list[str]',\n 'created_date': 'str',\n 'credential_types': 'list[str]',\n 'currency_code': 'str',\n 'exchange_rate_rule': 'str',\n 'face_value': 'float',\n 'fee': 'CostAdjustmentEntity',\n 'is_whole_amount_value_required': 'bool',\n 'last_update_date': 'str',\n 'max_value': 'float',\n 'min_value': 'float',\n 'redemption_instructions': 'str',\n 'reward_name': 'str',\n 'reward_type': 'str',\n 'status': 'str',\n 'utid': 'str',\n 'value_type': 'str'\n }\n\n attribute_map = {\n 'countries': 'countries',\n 'created_date': 'createdDate',\n 'credential_types': 'credentialTypes',\n 'currency_code': 'currencyCode',\n 'exchange_rate_rule': 'exchangeRateRule',\n 'face_value': 'faceValue',\n 'fee': 'fee',\n 'is_whole_amount_value_required': 'isWholeAmountValueRequired',\n 'last_update_date': 'lastUpdateDate',\n 'max_value': 'maxValue',\n 'min_value': 'minValue',\n 'redemption_instructions': 'redemptionInstructions',\n 'reward_name': 'rewardName',\n 'reward_type': 'rewardType',\n 'status': 'status',\n 'utid': 'utid',\n 'value_type': 'valueType'\n }\n\n def __init__(self, countries=None, created_date=None, credential_types=None, currency_code=None, exchange_rate_rule=None, face_value=None, fee=None, is_whole_amount_value_required=None, last_update_date=None, max_value=None, min_value=None, redemption_instructions=None, reward_name=None, reward_type=None, status=None, utid=None, value_type=None, _configuration=None): # noqa: E501\n \"\"\"ItemViewVerbose - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._countries = None\n self._created_date = None\n self._credential_types = None\n self._currency_code = None\n self._exchange_rate_rule = None\n self._face_value = None\n self._fee = None\n self._is_whole_amount_value_required = None\n self._last_update_date = None\n self._max_value = None\n self._min_value = None\n self._redemption_instructions = None\n self._reward_name = None\n self._reward_type = None\n self._status = None\n self._utid = None\n self._value_type = None\n self.discriminator = None\n\n self.countries = countries\n self.created_date = created_date\n self.credential_types = credential_types\n self.currency_code = currency_code\n if exchange_rate_rule is not None:\n self.exchange_rate_rule = exchange_rate_rule\n if face_value is not None:\n self.face_value = face_value\n self.fee = fee\n self.is_whole_amount_value_required = is_whole_amount_value_required\n self.last_update_date = last_update_date\n if max_value is not None:\n self.max_value = max_value\n if min_value is not None:\n self.min_value = min_value\n self.redemption_instructions = redemption_instructions\n self.reward_name = reward_name\n self.reward_type = reward_type\n self.status = status\n self.utid = utid\n self.value_type = value_type\n\n @property\n def countries(self):\n \"\"\"Gets the countries of this ItemViewVerbose. # noqa: E501\n\n Countries # noqa: E501\n\n :return: The countries of this ItemViewVerbose. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._countries\n\n @countries.setter\n def countries(self, countries):\n \"\"\"Sets the countries of this ItemViewVerbose.\n\n Countries # noqa: E501\n\n :param countries: The countries of this ItemViewVerbose. # noqa: E501\n :type: list[str]\n \"\"\"\n if self._configuration.client_side_validation and countries is None:\n raise ValueError(\"Invalid value for `countries`, must not be `None`\") # noqa: E501\n\n self._countries = countries\n\n @property\n def created_date(self):\n \"\"\"Gets the created_date of this ItemViewVerbose. # noqa: E501\n\n Created Date # noqa: E501\n\n :return: The created_date of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._created_date\n\n @created_date.setter\n def created_date(self, created_date):\n \"\"\"Sets the created_date of this ItemViewVerbose.\n\n Created Date # noqa: E501\n\n :param created_date: The created_date of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and created_date is None:\n raise ValueError(\"Invalid value for `created_date`, must not be `None`\") # noqa: E501\n\n self._created_date = created_date\n\n @property\n def credential_types(self):\n \"\"\"Gets the credential_types of this ItemViewVerbose. # noqa: E501\n\n Credential Types # noqa: E501\n\n :return: The credential_types of this ItemViewVerbose. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._credential_types\n\n @credential_types.setter\n def credential_types(self, credential_types):\n \"\"\"Sets the credential_types of this ItemViewVerbose.\n\n Credential Types # noqa: E501\n\n :param credential_types: The credential_types of this ItemViewVerbose. # noqa: E501\n :type: list[str]\n \"\"\"\n if self._configuration.client_side_validation and credential_types is None:\n raise ValueError(\"Invalid value for `credential_types`, must not be `None`\") # noqa: E501\n\n self._credential_types = credential_types\n\n @property\n def currency_code(self):\n \"\"\"Gets the currency_code of this ItemViewVerbose. # noqa: E501\n\n Currency Code # noqa: E501\n\n :return: The currency_code of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency_code\n\n @currency_code.setter\n def currency_code(self, currency_code):\n \"\"\"Sets the currency_code of this ItemViewVerbose.\n\n Currency Code # noqa: E501\n\n :param currency_code: The currency_code of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and currency_code is None:\n raise ValueError(\"Invalid value for `currency_code`, must not be `None`\") # noqa: E501\n\n self._currency_code = currency_code\n\n @property\n def exchange_rate_rule(self):\n \"\"\"Gets the exchange_rate_rule of this ItemViewVerbose. # noqa: E501\n\n Exchange Rate Rule # noqa: E501\n\n :return: The exchange_rate_rule of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._exchange_rate_rule\n\n @exchange_rate_rule.setter\n def exchange_rate_rule(self, exchange_rate_rule):\n \"\"\"Sets the exchange_rate_rule of this ItemViewVerbose.\n\n Exchange Rate Rule # noqa: E501\n\n :param exchange_rate_rule: The exchange_rate_rule of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n\n self._exchange_rate_rule = exchange_rate_rule\n\n @property\n def face_value(self):\n \"\"\"Gets the face_value of this ItemViewVerbose. # noqa: E501\n\n Face Value # noqa: E501\n\n :return: The face_value of this ItemViewVerbose. # noqa: E501\n :rtype: float\n \"\"\"\n return self._face_value\n\n @face_value.setter\n def face_value(self, face_value):\n \"\"\"Sets the face_value of this ItemViewVerbose.\n\n Face Value # noqa: E501\n\n :param face_value: The face_value of this ItemViewVerbose. # noqa: E501\n :type: float\n \"\"\"\n\n self._face_value = face_value\n\n @property\n def fee(self):\n \"\"\"Gets the fee of this ItemViewVerbose. # noqa: E501\n\n Fee # noqa: E501\n\n :return: The fee of this ItemViewVerbose. # noqa: E501\n :rtype: CostAdjustmentEntity\n \"\"\"\n return self._fee\n\n @fee.setter\n def fee(self, fee):\n \"\"\"Sets the fee of this ItemViewVerbose.\n\n Fee # noqa: E501\n\n :param fee: The fee of this ItemViewVerbose. # noqa: E501\n :type: CostAdjustmentEntity\n \"\"\"\n # MANUAL MODIFICATION: Official Tango Cards Swagger has a (potential) mistake, \n # calling fee a required field whilst the actual returned API responses are in many cases\n # missing the fee specification\n # WORKAROUND: Disabling the validation for fee\n #\n #if self._configuration.client_side_validation and fee is None:\n # raise ValueError(\"Invalid value for `fee`, must not be `None`\") # noqa: E501\n\n self._fee = fee\n\n @property\n def is_whole_amount_value_required(self):\n \"\"\"Gets the is_whole_amount_value_required of this ItemViewVerbose. # noqa: E501\n\n Whole Value Required # noqa: E501\n\n :return: The is_whole_amount_value_required of this ItemViewVerbose. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._is_whole_amount_value_required\n\n @is_whole_amount_value_required.setter\n def is_whole_amount_value_required(self, is_whole_amount_value_required):\n \"\"\"Sets the is_whole_amount_value_required of this ItemViewVerbose.\n\n Whole Value Required # noqa: E501\n\n :param is_whole_amount_value_required: The is_whole_amount_value_required of this ItemViewVerbose. # noqa: E501\n :type: bool\n \"\"\"\n if self._configuration.client_side_validation and is_whole_amount_value_required is None:\n raise ValueError(\"Invalid value for `is_whole_amount_value_required`, must not be `None`\") # noqa: E501\n\n self._is_whole_amount_value_required = is_whole_amount_value_required\n\n @property\n def last_update_date(self):\n \"\"\"Gets the last_update_date of this ItemViewVerbose. # noqa: E501\n\n Last Updated Date # noqa: E501\n\n :return: The last_update_date of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._last_update_date\n\n @last_update_date.setter\n def last_update_date(self, last_update_date):\n \"\"\"Sets the last_update_date of this ItemViewVerbose.\n\n Last Updated Date # noqa: E501\n\n :param last_update_date: The last_update_date of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and last_update_date is None:\n raise ValueError(\"Invalid value for `last_update_date`, must not be `None`\") # noqa: E501\n\n self._last_update_date = last_update_date\n\n @property\n def max_value(self):\n \"\"\"Gets the max_value of this ItemViewVerbose. # noqa: E501\n\n Max Value # noqa: E501\n\n :return: The max_value of this ItemViewVerbose. # noqa: E501\n :rtype: float\n \"\"\"\n return self._max_value\n\n @max_value.setter\n def max_value(self, max_value):\n \"\"\"Sets the max_value of this ItemViewVerbose.\n\n Max Value # noqa: E501\n\n :param max_value: The max_value of this ItemViewVerbose. # noqa: E501\n :type: float\n \"\"\"\n\n self._max_value = max_value\n\n @property\n def min_value(self):\n \"\"\"Gets the min_value of this ItemViewVerbose. # noqa: E501\n\n Min Value # noqa: E501\n\n :return: The min_value of this ItemViewVerbose. # noqa: E501\n :rtype: float\n \"\"\"\n return self._min_value\n\n @min_value.setter\n def min_value(self, min_value):\n \"\"\"Sets the min_value of this ItemViewVerbose.\n\n Min Value # noqa: E501\n\n :param min_value: The min_value of this ItemViewVerbose. # noqa: E501\n :type: float\n \"\"\"\n\n self._min_value = min_value\n\n @property\n def redemption_instructions(self):\n \"\"\"Gets the redemption_instructions of this ItemViewVerbose. # noqa: E501\n\n Redemption Instructions # noqa: E501\n\n :return: The redemption_instructions of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._redemption_instructions\n\n @redemption_instructions.setter\n def redemption_instructions(self, redemption_instructions):\n \"\"\"Sets the redemption_instructions of this ItemViewVerbose.\n\n Redemption Instructions # noqa: E501\n\n :param redemption_instructions: The redemption_instructions of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and redemption_instructions is None:\n raise ValueError(\"Invalid value for `redemption_instructions`, must not be `None`\") # noqa: E501\n\n self._redemption_instructions = redemption_instructions\n\n @property\n def reward_name(self):\n \"\"\"Gets the reward_name of this ItemViewVerbose. # noqa: E501\n\n Reward Name # noqa: E501\n\n :return: The reward_name of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._reward_name\n\n @reward_name.setter\n def reward_name(self, reward_name):\n \"\"\"Sets the reward_name of this ItemViewVerbose.\n\n Reward Name # noqa: E501\n\n :param reward_name: The reward_name of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and reward_name is None:\n raise ValueError(\"Invalid value for `reward_name`, must not be `None`\") # noqa: E501\n\n self._reward_name = reward_name\n\n @property\n def reward_type(self):\n \"\"\"Gets the reward_type of this ItemViewVerbose. # noqa: E501\n\n Reward Type # noqa: E501\n\n :return: The reward_type of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._reward_type\n\n @reward_type.setter\n def reward_type(self, reward_type):\n \"\"\"Sets the reward_type of this ItemViewVerbose.\n\n Reward Type # noqa: E501\n\n :param reward_type: The reward_type of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and reward_type is None:\n raise ValueError(\"Invalid value for `reward_type`, must not be `None`\") # noqa: E501\n\n self._reward_type = reward_type\n\n @property\n def status(self):\n \"\"\"Gets the status of this ItemViewVerbose. # noqa: E501\n\n Status # noqa: E501\n\n :return: The status of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this ItemViewVerbose.\n\n Status # noqa: E501\n\n :param status: The status of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status\n\n @property\n def utid(self):\n \"\"\"Gets the utid of this ItemViewVerbose. # noqa: E501\n\n Utid - Unique Tango Card ID. # noqa: E501\n\n :return: The utid of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._utid\n\n @utid.setter\n def utid(self, utid):\n \"\"\"Sets the utid of this ItemViewVerbose.\n\n Utid - Unique Tango Card ID. # noqa: E501\n\n :param utid: The utid of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and utid is None:\n raise ValueError(\"Invalid value for `utid`, must not be `None`\") # noqa: E501\n\n self._utid = utid\n\n @property\n def value_type(self):\n \"\"\"Gets the value_type of this ItemViewVerbose. # noqa: E501\n\n Value Type # noqa: E501\n\n :return: The value_type of this ItemViewVerbose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._value_type\n\n @value_type.setter\n def value_type(self, value_type):\n \"\"\"Sets the value_type of this ItemViewVerbose.\n\n Value Type # noqa: E501\n\n :param value_type: The value_type of this ItemViewVerbose. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and value_type is None:\n raise ValueError(\"Invalid value for `value_type`, must not be `None`\") # noqa: E501\n\n self._value_type = value_type\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ItemViewVerbose, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ItemViewVerbose):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, ItemViewVerbose):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"Crewscope/tango-cards-python-client-generated","sub_path":"tango_client/models/item_view_verbose.py","file_name":"item_view_verbose.py","file_ext":"py","file_size_in_byte":19278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"46992462017","text":"'''\nProblem statement:\n\nMerge Two Sorted Arrays\n'''\n\nl1 = [1, 2, 3, 4, 5, 6]\nl2 = [2, 3, 5, 7, 9]\n\n#Naive(Extra Space) -NlogN (N = m + n)\nl3 = [i for i in l1]\nfor i in l2:\n l3.append(i)\nl3.sort()\nfor i in range(len(l1)):\n l1[i] = l3[i]\nfor i in range(len(l1), len(l1)+len(l2)):\n l2[i-len(l1)] = l3[i]\nprint(l1)\nprint(l2)\n#Naive(Extra Space) - O(n.m)\nl1 = [1, 2, 3, 4, 5, 6]\nl2 = [2, 3, 5, 7, 9]\ni = 0\nwhile i < len(l1):\n if l1[i] <= l2[0]:\n i += 1\n else:\n l1[i], l2[0] = l2[0], l1[i]\n j = 1; ele = l2[0]\n while j < len(l2):\n if l2[j] <= ele:\n l2[j-1] = l2[j]\n else:\n break\n j += 1\n l2[j-1] = ele\nprint(l1)\nprint(l2)\n \n#Shell Sort","repo_name":"Shrikar-Kota/DSA","sub_path":"Arrays/merge_two_sorted_arrays.py","file_name":"merge_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25259583621","text":"from tkinter import Y\n\n\nprint(5+6)\nresult = 5 + 6\nresult = (5).__add__(6)\nprint((5).__add__(6))\nmystring = [5,2,3,4]\nprint(len(mystring))\nprint(mystring.__len__())\n\nclass Point2D :\n def __init__(self,x,y):\n self.x = x \n self.y = y \n\n def __str__(self):\n return f\"({self.x},{self.y})\"\n\nmypoint = Point2D(56,60)\nprint(mypoint)\n\nclass Backpack:\n def __init__(self):\n self.items = []\n\n def add_item(self,item):\n self.items.append(item)\n def remove_item(self,item):\n if item in self.items :\n self.items.remove(item)\n else :\n print(\"This item is not in the backpack.\")\n def __len__(self):\n return len(self.items)\n\nmy_backpack = Backpack()\nmy_backpack.add_item(\"Water bottle\")\nmy_backpack.add_item(\"Losfre\")\nmy_backpack.add_item(\"Sleeping bag\")\nprint(len(my_backpack))","repo_name":"ACSE-lk622/Data-Strucure-and-Algorithm-","sub_path":"OOP/dunder method.py","file_name":"dunder method.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13282544234","text":"\"\"\"\nURL of problem:\nhttps://leetcode.com/problems/hamming-distance/description/\n\"\"\"\n\n\ndef main(x, y):\n xor_result = x ^ y\n # converting result to binary\n # and getting rid of the '0b'\n # at the beginning of the number\n xor_result = bin(xor_result)[2:]\n hamming_dist = 0\n for digit in xor_result:\n if digit == '1':\n hamming_dist += 1\n\n # print(\"Hamming distance:\", hamming_dist)\n return hamming_dist\n\n\nmain(int(input(\"Give first number: \")), int(input(\"Give second number: \")))\n","repo_name":"atg-abhijay/LeetCode_problems","sub_path":"hamming_distance_461.py","file_name":"hamming_distance_461.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"39591771814","text":"import six\n\nfrom . import config\nfrom . import ConfigBase\nfrom . import URLBase\nfrom .AppriseAsset import AppriseAsset\n\nfrom .utils import GET_SCHEMA_RE\nfrom .utils import parse_list\nfrom .utils import is_exclusive_match\nfrom .logger import logger\n\n\nclass AppriseConfig(object):\n \"\"\"\n Our Apprise Configuration File Manager\n\n - Supports a list of URLs defined one after another (text format)\n - Supports a destinct YAML configuration format\n\n \"\"\"\n\n def __init__(self, paths=None, asset=None, cache=True, **kwargs):\n \"\"\"\n Loads all of the paths specified (if any).\n\n The path can either be a single string identifying one explicit\n location, otherwise you can pass in a series of locations to scan\n via a list.\n\n If no path is specified then a default list is used.\n\n If cache is set to True, then after the data is loaded, it's cached\n within this object so it isn't retrieved again later.\n \"\"\"\n\n # Initialize a server list of URLs\n self.configs = list()\n\n # Prepare our Asset Object\n self.asset = \\\n asset if isinstance(asset, AppriseAsset) else AppriseAsset()\n\n if paths is not None:\n # Store our path(s)\n self.add(paths)\n\n return\n\n def add(self, configs, asset=None, tag=None):\n \"\"\"\n Adds one or more config URLs into our list.\n\n You can override the global asset if you wish by including it with the\n config(s) that you add.\n\n \"\"\"\n\n # Initialize our return status\n return_status = True\n\n if isinstance(asset, AppriseAsset):\n # prepare default asset\n asset = self.asset\n\n if isinstance(configs, ConfigBase):\n # Go ahead and just add our configuration into our list\n self.configs.append(configs)\n return True\n\n elif isinstance(configs, six.string_types):\n # Save our path\n configs = (configs, )\n\n elif not isinstance(configs, (tuple, set, list)):\n logger.error(\n 'An invalid configuration path (type={}) was '\n 'specified.'.format(type(configs)))\n return False\n\n # Iterate over our\n for _config in configs:\n\n if isinstance(_config, ConfigBase):\n # Go ahead and just add our configuration into our list\n self.configs.append(_config)\n continue\n\n elif not isinstance(_config, six.string_types):\n logger.warning(\n \"An invalid configuration (type={}) was specified.\".format(\n type(_config)))\n return_status = False\n continue\n\n logger.debug(\"Loading configuration: {}\".format(_config))\n\n # Instantiate ourselves an object, this function throws or\n # returns None if it fails\n instance = AppriseConfig.instantiate(_config, asset=asset, tag=tag)\n if not isinstance(instance, ConfigBase):\n return_status = False\n continue\n\n # Add our initialized plugin to our server listings\n self.configs.append(instance)\n\n # Return our status\n return return_status\n\n def servers(self, tag=None, cache=True):\n \"\"\"\n Returns all of our servers dynamically build based on parsed\n configuration.\n\n If a tag is specified, it applies to the configuration sources\n themselves and not the notification services inside them.\n\n This is for filtering the configuration files polled for\n results.\n\n \"\"\"\n # Build our tag setup\n # - top level entries are treated as an 'or'\n # - second level (or more) entries are treated as 'and'\n #\n # examples:\n # tag=\"tagA, tagB\" = tagA or tagB\n # tag=['tagA', 'tagB'] = tagA or tagB\n # tag=[('tagA', 'tagC'), 'tagB'] = (tagA and tagC) or tagB\n # tag=[('tagB', 'tagC')] = tagB and tagC\n\n response = list()\n\n for entry in self.configs:\n\n # Apply our tag matching based on our defined logic\n if tag is not None and not is_exclusive_match(\n logic=tag, data=entry.tags):\n continue\n\n # Build ourselves a list of services dynamically and return the\n # as a list\n response.extend(entry.servers(cache=cache))\n\n return response\n\n @staticmethod\n def instantiate(url, asset=None, tag=None, suppress_exceptions=True):\n \"\"\"\n Returns the instance of a instantiated configuration plugin based on\n the provided Server URL. If the url fails to be parsed, then None\n is returned.\n\n \"\"\"\n # Attempt to acquire the schema at the very least to allow our\n # configuration based urls.\n schema = GET_SCHEMA_RE.match(url)\n if schema is None:\n # Plan B is to assume we're dealing with a file\n schema = config.ConfigFile.protocol\n url = '{}://{}'.format(schema, URLBase.quote(url))\n\n else:\n # Ensure our schema is always in lower case\n schema = schema.group('schema').lower()\n\n # Some basic validation\n if schema not in config.SCHEMA_MAP:\n logger.warning('Unsupported schema {}.'.format(schema))\n return None\n\n # Parse our url details of the server object as dictionary containing\n # all of the information parsed from our URL\n results = config.SCHEMA_MAP[schema].parse_url(url)\n\n if not results:\n # Failed to parse the server URL\n logger.warning('Unparseable URL {}.'.format(url))\n return None\n\n # Build a list of tags to associate with the newly added notifications\n results['tag'] = set(parse_list(tag))\n\n # Prepare our Asset Object\n results['asset'] = \\\n asset if isinstance(asset, AppriseAsset) else AppriseAsset()\n\n if suppress_exceptions:\n try:\n # Attempt to create an instance of our plugin using the parsed\n # URL information\n cfg_plugin = config.SCHEMA_MAP[results['schema']](**results)\n\n except Exception:\n # the arguments are invalid or can not be used.\n logger.warning('Could not load URL: %s' % url)\n return None\n\n else:\n # Attempt to create an instance of our plugin using the parsed\n # URL information but don't wrap it in a try catch\n cfg_plugin = config.SCHEMA_MAP[results['schema']](**results)\n\n return cfg_plugin\n\n def clear(self):\n \"\"\"\n Empties our configuration list\n\n \"\"\"\n self.configs[:] = []\n\n def server_pop(self, index):\n \"\"\"\n Removes an indexed Apprise Notification from the servers\n \"\"\"\n\n # Tracking variables\n prev_offset = -1\n offset = prev_offset\n\n for entry in self.configs:\n servers = entry.servers(cache=True)\n if len(servers) > 0:\n # Acquire a new maximum offset to work with\n offset = prev_offset + len(servers)\n\n if offset >= index:\n # we can pop an notification from our config stack\n return entry.pop(index if prev_offset == -1\n else (index - prev_offset - 1))\n\n # Update our old offset\n prev_offset = offset\n\n # If we reach here, then we indexed out of range\n raise IndexError('list index out of range')\n\n def pop(self, index):\n \"\"\"\n Removes an indexed Apprise Configuration from the stack and\n returns it.\n \"\"\"\n # Remove our entry\n return self.configs.pop(index)\n\n def __getitem__(self, index):\n \"\"\"\n Returns the indexed config entry of a loaded apprise configuration\n \"\"\"\n return self.configs[index]\n\n def __iter__(self):\n \"\"\"\n Returns an iterator to our config list\n \"\"\"\n return iter(self.configs)\n\n def __len__(self):\n \"\"\"\n Returns the number of config entries loaded\n \"\"\"\n return len(self.configs)\n","repo_name":"webflo-dev/bazarr","sub_path":"bazarr/libs/apprise/AppriseConfig.py","file_name":"AppriseConfig.py","file_ext":"py","file_size_in_byte":8425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30006333282","text":"import sys, os, pathlib\n\nOS = 'WIN' if sys.platform.startswith('win') else \\\n 'LINUX' if sys.platform.startswith('linux') else \\\n 'OTHER'\nCONFIG_PATH = pathlib.Path(os.getenv('appdata')) if OS == 'WIN' else \\\n pathlib.Path.home() if OS == 'LINUX' else \\\n pathlib.Path.home()\nCONFIG_FOLDER = 'XiNOS' if OS == 'WIN' else \\\n '.xinos' if OS == 'LINUX' else \\\n '.xinos'\n#DATA_PATH = '/media/yauhsien/TOSHIBA EXT/data'\nDATA_PATH = 'E:\\\\data'\nOK = 'OK'\nINTERRUPT = 'INTERRUPT'\nLOG_FMT = '%(asctime)-15s [%(levelname)s] %(name)s: %(message)s'\n","repo_name":"YauHsien/some_ETL","sub_path":"scripts/wic/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23560434361","text":"import numpy as np\r\nimport sys\r\nimport numpy.linalg as nplg\r\n\r\n'''MIT License\r\nCopyright (c) Shahrouz Ryan Alimo 2017\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\nModified: Feb. 2017 '''\r\ndef init_DOGS(N, lattice):\r\n # calculate the cloud of nearest neighbors, the basis matrix, and the plane\r\n # that the lattice lies on, if applicable. Optioins for the variable lattice\r\n # are 'An', 'An*', 'Dn', 'Dn*', 'E8 '\r\n print (\"initializing lattice .... \" + lattice + \" ....\")\r\n\r\n # build the lattice basis matrix\r\n matrix, v, lenn = make_matrix(lattice,N)\r\n iter = 2\r\n\r\n # find neighbors in matrix space\r\n [nr, nc] = np.shape(matrix)\r\n indx = -iter * np.ones(nc)\r\n indxorg = np.copy(indx)\r\n neigh = np.empty((0,nr))\r\n plane = np.empty((0, nr))\r\n\r\n nindx = len(indx)\r\n indx[nindx-1] = indx[nindx-1] - 1.0\r\n\r\n nloop = (2*iter+1)**N\r\n for j in range(0, nloop):\r\n indx[nindx - 1] = indx[nindx - 1] + 1.0\r\n for i in range(nindx-1, 0, -1):\r\n if (indx[i] > -indxorg[i]):\r\n indx[i] = indxorg[i]\r\n indx[i - 1] = indx[i - 1] + 1.0\r\n\r\n pt = np.dot(matrix, indx.transpose())\r\n v = nplg.norm(pt)\r\n if(v>0 and v <1.001*lenn):\r\n neigh = np.append(neigh, np.array([pt]), axis=0)\r\n\r\n # done with neighbors in matrix space\r\n [nr, nc] = np.shape(neigh)\r\n if(nr>nc):\r\n nn = nr\r\n else:\r\n nn = nc\r\n\r\n neigh2 = np.empty((0, N))\r\n if (lattice == \"An \"):\r\n plane, A = QRHouseholder(np.ones((N+1,1)))\r\n plane = plane[1:,:]\r\n\r\n # QRHouseholder returns 2:end orthogonal vectors - plane basis\r\n for i in range(0, nn):\r\n b = neigh[i, :]\r\n c = nplg.lstsq(plane.transpose(), b)[0]\r\n neigh2 = np.append(neigh2, np.array([c]), axis=0)\r\n\r\n neigh = np.copy(neigh2)\r\n elif (lattice == \"An*\"):\r\n v = np.ones((N+1,1))\r\n v[len(v)-1] = -N\r\n plane, A = QRHouseholder(v)\r\n plane = plane[1:,:]\r\n for i in range(0, nn):\r\n b = neigh[i, :]\r\n c = nplg.lstsq(plane.transpose(), b)[0]\r\n neigh2 = np.append(neigh2, np.array([c]), axis=0)\r\n neigh = np.copy(neigh2)\r\n print (\"initialization complete, DELTA DOGS Lambda starting...\")\r\n return(neigh, matrix, plane)\r\n\r\ndef QRHouseholder(A):\r\n # Compute a QR decomposition A=QR by applying a sequence of Householder reflections\r\n # to any MxN matrix A to reduce it to upper triangular form.\r\n [M, N] = np.shape(A)\r\n Q = np.eye(M, M)\r\n\r\n for i in range(0, min(N, M - 1)):\r\n A[i:M, i:N], sigma, w = Reflect(A[i:M, i:N])\r\n wdot = w.transpose()\r\n a = np.dot(Q[:, i:M], w)\r\n a = np.reshape(a, (len(a), 1))\r\n b = sigma * wdot\r\n Q[:, i:M] = Q[:,i:M] - a * b\r\n return(Q,A)\r\n\r\ndef Reflect(X):\r\n # Apply a Householder reflector matrix H?H to a MxN matrix X (i.e., calculate H?H*X),\r\n # with [sigma,w] arranged to give zeros in the (2:end,1) locations of the result.\r\n x = X[:, 0]\r\n if (np.real(x[1]) < 0):\r\n s = -1\r\n else:\r\n s = 1\r\n\r\n nu = s * nplg.norm(x) # Eqn (1.7b)\r\n\r\n if (nu == 0):\r\n sig = 0\r\n w = 0\r\n else:\r\n sig = (x[0] + nu) / nu\r\n w = np.append(x[0]+nu, x[1:]) / (x[0] + nu)\r\n\r\n X[0, 0] = -nu\r\n X[1:, 0] = 0 # Eqn (1.8)\r\n\r\n wdot = w.transpose()\r\n tmp = X[:, 1:len(X)]\r\n if(tmp.size>0): # prevent getting null matrix\r\n X[:, 1:] = X[:, 1:] - (np.conj(sig) * w) * (wdot * X[:, 1:]) #Eqn (1.9a)\r\n\r\n return (X,sig, w)\r\n\r\n# %%%%%% Make_Matrix %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\ndef make_matrix(lattice, N):\r\n if(lattice==\"Zn \"):\r\n matrix = np.eye(N)\r\n lenn = 1\r\n v = np.empty(shape=(0, 0))\r\n elif (lattice ==\"An \"):\r\n v = np.ones(N)\r\n M = np.zeros((N,N+1))\r\n M[:, 0:N] = np.diag(v)\r\n P = np.diag(v,1)\r\n P = P[0:N, :]\r\n matrix = (-M+P).transpose()\r\n lenn = 1\r\n matrix = matrix / np.sqrt(2.0)\r\n elif (lattice == \"Dn \"):\r\n v = -1 * np.ones(N)\r\n matrix = np.diag(v)\r\n P = np.diag(-v,1)\r\n [nr,nc] = np.shape(P)\r\n P = P[0:nr-1, 0:nc-1]\r\n matrix = matrix+P\r\n matrix[1,0] = -1\r\n v = np.empty(shape=(0, 0))\r\n lenn = np.sqrt(2.0)\r\n elif (lattice == \"Dn*\"):\r\n v = np.ones(N)\r\n matrix = np.diag(v)\r\n matrix[:,N-1] = 0.5*np.ones(N)\r\n v = np.empty(shape=(0, 0))\r\n len1 = nplg.norm(matrix[:,0])\r\n len2 = nplg.norm(matrix[:,N-1])\r\n lenn = min(len1,len2)\r\n elif (lattice == \"An*\"):\r\n P = np.ones((N+1,N))\r\n P0 = np.diag(-np.ones(N),-1)\r\n P[1:N+1, :] = P0[1:N+1,0:N]\r\n P[:,N-1] = (1.0/(N+1.0)) * np.ones(N+1)\r\n P[0,N-1] = -N/(N+1.0)\r\n matrix = P\r\n v = np.ones((N+1,1))\r\n lenn = nplg.norm(matrix[:,N-1])\r\n elif (lattice == \"E8 \"):\r\n N = 8\r\n matrix = np.diag(np.ones(8))\r\n matrix = matrix + np.diag(-1.0 * np.ones(7), 1)\r\n matrix[0:4, N-1] = 0.5\r\n matrix[3:N, N-1] = -0.5\r\n matrix[0,0] = 2.0\r\n v = np.empty(shape=(0, 0))\r\n lenn = np.sqrt(2.0)\r\n else:\r\n print (\"no viable lattice entered\")\r\n sys.exit()\r\n\r\n return(matrix, v, lenn)\r\n\r\n# neigh,matrix,plane = init_DOGS(2, \"An \")\r\n# print neigh\r\n# print matrix\r\n# print plane","repo_name":"salimoha/rootlattices","sub_path":"init_DOGS.py","file_name":"init_DOGS.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6785354955","text":"from os import getcwd, walk\nfrom os.path import join, split\nimport sys\nimport traceback\n\nfrom tracer import Tracer\n\nimport matplotlib.pyplot as plt\nfrom skimage.io import imshow, show\n\nfrom property_loader import load_properties\nfrom ruler import Ruler, MeasurementError\nfrom species_sort import Species\n\n\"\"\"\nmain2.py: The main file for Deciduous Tree Leaf Identification with Artificial Neural Networks.\n\nVersion 2 because of a major reworking in how images and properties are loaded.\nIgnore the __version__ docstring as of now\n\"\"\"\n\n__author__ = \"Patrick Thomas\"\n__credits__ = [\"Patrick Thomas\", \"Rick Fisher\"]\n__version__ = \"1.0.0\"\n__date__ = \"8/14/16\"\n__maintainer__ = \"Patrick Thomas\"\n__email__ = \"pthomas@mail.swvgs.us\"\n__status__ = \"Development\"\n\n# global variables\nIMAGE_DIR = join(getcwd(), 'input-images') # master image directory\nDEFAULT_RULER = Ruler()\n\n# create tracer\n_f = __file__\ntc = Tracer(mode=5)\n\n\n\"\"\"\nClasses for all species known to the program are added here.\n\nTemplate species here:\n# Test_leaf = Species('Test', 'leaf', properties=[\n# 'simple',\n# 'broad',\n# 'ovate',\n# ])\n\nDirectory structure:\ninput-images\n--->Leaf Collection %date%\n------->%genus% %species%\n----------->%image name%.jpg\n\nProperties of leaves are included to hopefully provide some insights\nas to why the ANN (will envitably) confuse species.\n\nThey are loaded from a properties.txt file in the project folder.\n\nLoad locations (images known to be the aforementioned species)\nare added for each of the species. The images in the directories given\nto the species are used in training the ANN after the images are\nmeasured.\n\"\"\"\n\n\ndef load_species_structured(master_dir):\n \"\"\"\n load all species found by walking through the image_dir\n :return: dictionary of bin_nom:species class\n \"\"\"\n\n # find all leaves by walking through the leaf collections\n tc.print('Finding all leaves by walking through the leaf collections', 4, _f)\n species_list = []\n for root, paths, filenames in walk(master_dir):\n if root.find('Leaf Collection') > -1: # looks for leaf collections, if str is present\n # it will have an index\n # it must be determined whether we are in the collection only or in a species sub folder\n if not filenames: # in collection\n species_list.extend(paths)\n # elif not paths: # in species\n # print(filenames)\n\n tc.print(species_list, 3, _f)\n\n # only get the species present, no repeats\n # if a species is repeated or called 'unsorted', do not load it\n all_s_species = list(set(species_list))\n all_s_species.remove('unsorted') # removes the unsort images\n species_dict = {}\n\n # load the species properties for the given g, s\n for species_str in all_s_species:\n gen, spe = species_str.split(' ')\n species_dict[species_str] = Species(genus=gen,\n species=spe,\n properties=load_properties(species_str))\n\n # recursively walk through the image-dir and record load locations to the species in the dict\n for root, paths, filenames in walk(master_dir):\n if 'Leaf Collection' in split(root)[1]:\n for species_str in paths:\n if species_str != 'unsorted':\n full_p = join(root, species_str)\n\n tc.print('added {0} as load location for {1}'.format(split(split(root)[0])[1], species_str), 4, _f)\n \n species_dict[species_str].add_load_location(full_p, split(split(root)[0])[1])\n\n # print all species present\n tc.print('\\nSpecies loaded: ', 4, _f)\n for k in sorted(species_dict.keys()):\n tc.print(str(species_dict[k]), 4, _f)\n\n # append all values of dict to list\n species_output = [species_dict[k] for k in species_dict]\n\n # return a list of values from species_dict\n return species_output\n\n\ndef use_ruler(species, ruler=Ruler()):\n \"\"\"\n THIS IS A DEMONSTRATION/VISUALIZATION OF THE RULER IN USE\n\n Loads species specified and measures the species with\n Ruler(). Plots and displays the dfferent leaf images.\n :param species:\n :param ruler:\n :return:\n \"\"\"\n\n # state current leaf species and localize path\n tc.print((species.bin_nom, species.load_locations, species.get_leaf_paths()[0]), 4, _f)\n leaf_path = species.get_leaf_paths()[0]\n\n # load the leaf with the ruler\n ruler.load_new_image(leaf_path)\n\n # img, lines, lines2, lines3, length, center_range\n img = ruler.leaf\n hough_center = ruler.vein_measure['hough center']\n hough_above = ruler.vein_measure['hough above']\n hough_below = ruler.vein_measure['hough below']\n hough_range = ruler.vein_measure['center range']\n midrib_line = ruler.vein_measure['midrib lin approx']\n length = ruler.length\n\n print(hough_above)\n\n # displaying data with pyplot and matplotlib\n fig, axes = plt.subplots(2, 2, figsize=(5, 2))\n ax = axes.ravel()\n ax[0].imshow(img, cmap=plt.cm.gray)\n ax[0].set_title('{0}, {1}cm'.format(species.bin_nom, length))\n row, col = img.shape\n ax[1].axis((0, col, row, 0))\n ax[1].imshow(-img, cmap=plt.cm.gray)\n for line in hough_center:\n p0, p1 = line\n ax[1].plot((p0[0], p1[0]), (p0[1], p1[1]), 'b')\n for line in hough_above:\n p0, p1 = line\n ax[1].plot((p0[0], p1[0]), (p0[1], p1[1]), 'g')\n for line in hough_below:\n p0, p1 = line\n ax[1].plot((p0[0], p1[0]), (p0[1], p1[1]), 'r')\n ax[1].plot((0, img.shape[1]), (hough_range[0], hough_range[0]), 'b--')\n ax[1].plot((0, img.shape[1]), (hough_range[1], hough_range[1]), 'g--')\n ax[1].plot((0, img.shape[1]), (midrib_line(0), midrib_line(img.shape[1])))\n ax[2].imshow(ruler.leaf_bin, cmap=plt.cm.gray)\n ax[2].set_title('{0} binary'.format(species.bin_nom))\n ax[3].imshow(ruler.scale_bin, cmap=plt.cm.gray)\n ax[3].set_title('{0} scale'.format(species.bin_nom))\n\n plt.show()\n\n\n# load all of the leaves found\ndef measure_leaves_list(species_list, r=DEFAULT_RULER, no_scale=False):\n \"\"\"\n Measure all leaves from list of species, given from load_species_structured.\n\n Should theoretically be able to accept any list of leaves, given that it is strucuted\n in a list (hence the name).\n\n :param no_scale:\n :param r: ruler\n :param species_list: list of all of the species loaded by load_species_structured\n :return: number of successes and failures from measuring leaves\n \"\"\"\n\n # counter for successful and unsuccessful measurements\n successes = 0\n fails = 0\n\n for species in species_list:\n for leaf_path in species.get_leaf_paths():\n try:\n print('')\n tc.print('Loading and measuring {0} \\n@ {1}'.format(species.bin_nom, leaf_path), 5, _f)\n # noinspection PyBroadException\n try:\n r.load_new_image(leaf_path, no_scale=no_scale)\n tc.print(\"Loaded. Saving data to ruler...\", 5, _f)\n\n r.save_data(species.bin_nom)\n tc.print('Saved.', 5, _f)\n\n successes += 1\n\n except MeasurementError as e:\n print(e)\n tc.print(\"Error: Failed to measure {0}\".format(species.bin_nom), 1, _f)\n tb = sys.exc_info()[2]\n traceback.print_tb(tb)\n\n fails += 1\n\n except OSError as e:\n print(e)\n tb = sys.exc_info()[2]\n traceback.print_tb(tb)\n\n fails += 1\n\n except TypeError as e:\n tc.print('Error encountered at leaf path {0}:'.format(leaf_path), 1, _f)\n tb = sys.exc_info()[2]\n traceback.print_tb(tb)\n\n fails += 1\n\n return successes, fails\n\n\n# start main\nif __name__ == '__main__':\n # create ruler\n r = Ruler()\n\n # use_ruler(Liriodendron_tulipifera, r)\n # print(\"saving data to ruler\")\n # r.save_data(Liriodendron_tulipifera.bin_nom)\n\n # load the leaves from the harddrive\n leaves_list = load_species_structured(IMAGE_DIR)\n\n # measure the found leaves\n s, f = measure_leaves_list(leaves_list)\n\n tc.print('\\n\\nFINSIHED\\n{0} leaves measured successfully and {1} failed attempts.'.format(s, f), 4, _f)\n","repo_name":"patthomasrick/DTLIwANNy2","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39362798885","text":"\n\n\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nfrom DbManager import DatabaseManager\nimport json\nimport re\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nfrom datetime import datetime\nimport calendar\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nclass Scraper():\n\n def __init__(self, initialize_db):\n\n self.browser = webdriver.Chrome(\"/Users/susysk/Downloads/odds-portal-scraper-master/chromedriver/chromedriver\")\n #self.league = self.parse_json(league_json)\n self.db_manager = DatabaseManager(initialize_db)\n\n\n def scrape_all_urls(self, do_verbose_output=False):\n\n if do_verbose_output is True:\n output_str = \"Start scraping \" + self.league[\"league\"] + \" of \"\n output_str += self.league[\"area\"] + \"...\"\n print(output_str)\n\n for url in self.league[\"urls\"]:\n try:\n self.scrape_url(url)\n except: pass\n self.browser.close()\n\n if do_verbose_output is True:\n print(\"Done scraping this league.\")\n\n def getLeague(self,url):\n league = url.split('/')[5]\n if ('-2' in league):\n league = league[0:league.find('-2')]\n return league\n\n def getSeason(self,url):\n league = url.split('/')[5]\n if ('-2' in league):\n season = league[league.find('-2')+1:]\n else: season = \"2017-2018\"\n return season\n\n def getScore(self,tag):\n score = tag.find(class_=\"result\").findChildren()[1].string\n return score\n\n def getCountry(self,url):\n return url.split('/')[4]\n\n\n def getBookmakers(self,tag):\n numOfBookmakers = len(tag.find_all(class_=\"name2\"))\n bookmakers= tag.find_all(class_=\"name\")[1:numOfBookmakers+1]\n return list(map(lambda x:x.text,bookmakers))\n\n def getDataQuotePerSegnoDC(self,bookmaker,quota):\n if(quota=='1X'):\n data = self.browser.find_elements_by_xpath(\"//*[contains(text(),'\"+bookmaker+\"')]\")[0].find_elements_by_xpath('../../../td[2]')[0]\n elif (quota=='X2'):\n data = self.browser.find_elements_by_xpath(\"//*[contains(text(),'\"+bookmaker+\"')]\")[0].find_elements_by_xpath('../../../td[3]')[0]\n else: data = self.browser.find_elements_by_xpath(\"//*[contains(text(),'\"+bookmaker+\"')]\")[0].find_elements_by_xpath('../../../td[4]')[0]\n hov = ActionChains(self.browser).move_to_element(data)\n hov.perform()\n quoteData = list(filter(lambda x: x!='' and x!='Opening odds:',self.browser.find_elements_by_class_name(\"spc-nowrap\")[0].get_attribute(\"innerText\").split('\\n')))\n return quoteData\n\n\n def getDataQuotePerSegno1X2(self,bookmaker,quota):\n if(quota=='1'):\n data = self.browser.find_elements_by_xpath(\"//*[contains(text(),'\"+bookmaker+\"')]\")[0].find_elements_by_xpath('../../../td[2]')[0]\n elif (quota=='X'):\n data = self.browser.find_elements_by_xpath(\"//*[contains(text(),'\"+bookmaker+\"')]\")[0].find_elements_by_xpath('../../../td[3]')[0]\n else: data = self.browser.find_elements_by_xpath(\"//*[contains(text(),'\"+bookmaker+\"')]\")[0].find_elements_by_xpath('../../../td[4]')[0]\n hov = ActionChains(self.browser).move_to_element(data)\n hov.perform()\n quoteData = list(filter(lambda x: x!='' and x!='Opening odds:',self.browser.find_elements_by_class_name(\"spc-nowrap\")[0].get_attribute(\"innerText\").split('\\n')))\n return quoteData\n\n def findDropOdds(self, book, country, league, season, match_date, current_date_str, current_date_unix, score, participants,url):\n dataAndQuote1=self.getDataQuotePerSegno1X2(book,'1')\n dataAndQuoteX=self.getDataQuotePerSegno1X2(book,'X')\n dataAndQuote2=self.getDataQuotePerSegno1X2(book,'2')\n\n quotaFinaleIniziale1 =(dataAndQuote1[0:1][0],dataAndQuote1[-1:][0])\n quotaFinaleInizialeX =(dataAndQuoteX[0:1][0],dataAndQuoteX[-1:][0])\n quotaFinaleIniziale2 =(dataAndQuote2[0:1][0],dataAndQuote2[-1:][0])\n data = quotaFinaleIniziale1[0][:quotaFinaleIniziale1[0].find(':')+4]\n quotaFinaleIniziale1=(float(quotaFinaleIniziale1[0][len(data):len(data)+4]),float(quotaFinaleIniziale1[1][len(data):len(data)+4]))\n quotaFinaleInizialeX=(float(quotaFinaleInizialeX[0][len(data):len(data)+4]),float(quotaFinaleInizialeX[1][len(data):len(data)+4]))\n quotaFinaleIniziale2=(float(quotaFinaleIniziale2[0][len(data):len(data)+4]),float(quotaFinaleIniziale2[1][len(data):len(data)+4]))\n #print(country+\" \"+league+\" \"+season+\" \"+current_date_str+\" \"+ str(current_date_unix)+\" \"+score +\" \"+ str(participants)+\" \"+ book+\" 1 \"+dataWithYear+\" \"+str(dataUnix)+\" \"+str(quota),\" \",url)\n drop1 = (quotaFinaleIniziale1[0]-quotaFinaleIniziale1[1])/max(quotaFinaleIniziale1[0],quotaFinaleIniziale1[1]) * 100\n dropX = (quotaFinaleInizialeX[0]-quotaFinaleInizialeX[1])/max(quotaFinaleInizialeX[0],quotaFinaleInizialeX[1]) * 100\n drop2 = (quotaFinaleIniziale2[0]-quotaFinaleIniziale2[1])/max(quotaFinaleIniziale2[0],quotaFinaleIniziale2[1]) * 100\n self.db_manager.add_dropodds(country, league, season, current_date_str,\n str(current_date_unix), score,participants[0],participants[1],book,\n str(quotaFinaleIniziale1[1]),str(quotaFinaleInizialeX[1]),str(quotaFinaleIniziale2[1]),\n str(quotaFinaleIniziale1[0]),str(quotaFinaleInizialeX[0]),str(quotaFinaleIniziale2[0]),\n str(drop1),str(dropX),str(drop2),\n url)\n\n\n def findAndSaveQuota(self, book, segno, country, league, season, match_date, current_date_str, current_date_unix, score, participants,url):\n dataAndQuote=None\n if (segno=='1' or segno == '2' or segno =='X'):\n dataAndQuote=self.getDataQuotePerSegno1X2(book,segno)\n if (segno =='1X' or segno == 'X2' or segno == '12'):\n dataAndQuote = self.getDataQuotePerSegnoDC(book,segno)\n if (dataAndQuote != None):\n for q in dataAndQuote:\n data = q[:q.find(':')+4]\n dataWithYear = (data+str(match_date.today().year)).replace(',','')\n dataUnix = calendar.timegm(datetime.strptime(dataWithYear, \"%d %b %H:%M %Y\").utctimetuple())\n quota = float(q[len(data):len(data)+4])\n #print(country+\" \"+league+\" \"+season+\" \"+current_date_str+\" \"+ str(current_date_unix)+\" \"+score +\" \"+ str(participants)+\" \"+ book+\" 1 \"+dataWithYear+\" \"+str(dataUnix)+\" \"+str(quota),\" \",url)\n self.db_manager.add_surebet(country, league, season, current_date_str, str(current_date_unix), score,participants[0],participants[1],book,segno,str(quota),dataWithYear,str(dataUnix), url)\n\n\n def saveQuote1X2(self, url, country, league, season, current_date_str, date, current_date_unix, participants, score, book):\n self.findAndSaveQuota(book, \"1\", country, league, season, date, current_date_str, current_date_unix, score, participants, url)\n self.findAndSaveQuota(book, \"X\", country, league, season, date, current_date_str, current_date_unix, score, participants, url)\n self.findAndSaveQuota(book, \"2\", country, league, season, date, current_date_str, current_date_unix, score, participants, url)\n\n\n def saveQuoteDC(self, url, country, league, season, current_date_str, date, current_date_unix, participants, score, book):\n self.findAndSaveQuota(book, \"1X\", country, league, season, date, current_date_str, current_date_unix, score, participants, url)\n self.findAndSaveQuota(book, \"X2\", country, league, season, date, current_date_str, current_date_unix, score, participants, url)\n self.findAndSaveQuota(book, \"12\", country, league, season, date, current_date_str, current_date_unix, score, participants, url)\n\n\n def scrape_url(self, url):\n \"\"\"\n Scrape the data for every match on a given URL and insert each into the\n database.\n\n Args:\n url (str): URL to scrape data from.\n \"\"\"\n try:\n self.browser.get(url)\n tbl_html = self.browser.find_element_by_id(\"col-content\").get_attribute(\"innerHTML\")\n tbl_match = BeautifulSoup(tbl_html, \"html.parser\")\n\n country= self.getCountry(url)\n league = self.getLeague(url)\n season = self.getSeason(url)\n\n current_date_str = self.get_date(tbl_match).replace(' ',' ')\n date = datetime.strptime(current_date_str[current_date_str.find(',')+1:], \" %d %b %Y, %H:%M\")\n current_date_unix = calendar.timegm(date.utctimetuple())\n\n participants = self.get_participants(tbl_match)\n score=self.getScore(tbl_match)\n\n bookmakers = self.getBookmakers(tbl_match)\n for book in bookmakers:\n #Surebet\n self.saveQuote1X2(url, country, league, season, current_date_str, date, current_date_unix, participants, score, book)\n self.findDropOdds(book, country, league, season, date, current_date_str, current_date_unix, score, participants,url)\n try:\n self.browser.find_element_by_id('tab-sport-others').click()\n data = self.browser.find_elements_by_xpath(\"//*[contains(text(),'Double Chance')]\")[0]\n except:\n try:\n data = self.browser.find_elements_by_xpath('//*[@title=\"Double Chance\"]')[0]\n except:\n data = None\n finally:\n if(data!=None):\n data.click()\n # E' importante aspettare che la formazione della pagina sia completa\n # e cercare nuovamente la tabella: altrimenti trova i book delle quote 1,2 e X\n # e non quelle delle doppie chances\n WebDriverWait(self.browser, 20).until(\n EC.element_to_be_clickable((By.XPATH, \"//*[@class='name2'][1]\")))\n tbl_html = self.browser.find_element_by_id(\"col-content\").get_attribute(\"innerHTML\")\n tbl = BeautifulSoup(tbl_html, \"html.parser\")\n bookmakers = self.getBookmakers(tbl)\n for book in bookmakers:\n #Surebet\n self.saveQuoteDC(url, country, league, season, current_date_str, date, current_date_unix, participants, score, book)\n else:\n print('[DEBUG] Unable to find double chances...')\n\n except Exception as e:\n print(e)\n finally:\n self.browser.close()\n\n\n def get_date(self, tag):\n \"\"\"\n Extract the date from an HTML tag for a date row.\n\n Args:\n tag (obj): HTML tag object from BeautifulSoup.\n\n Returns:\n (str) Extracted date string.\n \"\"\"\n\n this_date = tag.find(class_=\"datet\").string\n if \"Today\" in this_date:\n return \"Today\"\n elif this_date.endswith(\" - Play Offs\"):\n this_date = this_date[:-12]\n return this_date\n\n def get_time(self, tag):\n \"\"\"\n Extract the time from an HTML tag for a soccer match row.\n\n Args:\n tag (obj): HTML tag object from BeautifulSoup.\n\n Returns:\n (str) Extracted time.\n \"\"\"\n\n return tag.find(class_=\"datet\").string\n\n def get_participants(self, tag):\n \"\"\"\n Extract the match's participants from an HTML tag for a soccer match\n row.\n\n Args:\n tag (obj): HTML tag object from BeautifulSoup.\n\n Returns:\n (list of str) Extracted match participants.\n \"\"\"\n parsed_strings = tag.findChildren()[0].text.split(\" - \")\n participants = []\n participants.append(parsed_strings[0])\n participants.append(parsed_strings[-1])\n return participants\n\n def get_urlMatch(self, tag):\n \"\"\"\n return match url\n \"\"\"\n\n with open('matches/urls', 'a') as outfile:\n outfile.write('http://www.oddsportal.com'+tag.find_all(\"a\")[0]['href']+'\\n')\n\n\nscraper = Scraper(True)\nscraper.scrape_url(\"http://www.oddsportal.com/soccer/ecuador/serie-a/independiente-del-valle-dep-cuenca-KW7lI2H9/\")\n","repo_name":"Susysk/ML2018","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":12400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43794732722","text":"from __future__ import print_function\nfrom argparse import ArgumentParser\nfrom contextlib import contextmanager\nfrom datetime import timedelta\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\nimport os\nfrom os import makedirs\nfrom os.path import exists\nimport pathlib\nimport requests\nimport shutil\nimport sys\nfrom sys import stdout\nimport tempfile\nfrom time import monotonic\nfrom urllib.error import URLError, HTTPError\nfrom urllib.parse import urlparse\nimport urllib.request\n\nfrom eiq import config\nfrom eiq.helper.google_drive_downloader import GoogleDriveDownloader\n\ntry:\n import progressbar\n found = True\nexcept ImportError:\n found = False\n\n\nclass ProgressBar:\n def __init__(self):\n self.pbar = None\n\n def __call__(self, block_num, block_size, total_size):\n if not self.pbar:\n self.pbar = progressbar.ProgressBar(maxval=total_size)\n self.pbar.start()\n\n downloaded = block_num * block_size\n if downloaded < total_size:\n self.pbar.update(downloaded)\n else:\n self.pbar.finish()\n\n\ndef log(*args):\n logging.info(\" \".join(\"%s\" % a for a in args))\n\n\nclass InferenceTimer:\n def __init__(self):\n self.time = 0\n\n @contextmanager\n def timeit(self, message: str = None):\n begin = monotonic()\n try:\n yield\n finally:\n end = monotonic()\n self.convert(end-begin)\n print(\"{0}: {1}\".format(message, self.time))\n\n def convert(self, elapsed):\n self.time = str(timedelta(seconds=elapsed))\n\n\ndef get_temporary_path(*path):\n return os.path.join(tempfile.gettempdir(), *path)\n\n\ndef download_url(file_path: str = None, filename: str = None,\n url: str = None, netloc: str = None):\n\n if not check_connection(url):\n sys.exit(\"'{0}' could not be reached, \" \\\n \" please check your internet connection.\".format(netloc))\n\n try:\n log(\"Downloading '{0}'\".format(filename))\n log(\"From '{0}' ...\".format(netloc))\n\n with timeit(\"Download time\"):\n if found is True:\n urllib.request.urlretrieve(url, file_path, ProgressBar())\n else:\n urllib.request.urlretrieve(url, file_path)\n except URLError as e:\n sys.exit(\"Something went wrong with URLError: \" % e)\n except HTTPError as e:\n sys.exit(\"Something went wrong with HTTPError: \" % e)\n finally:\n return file_path\n\n\ndef retrieve_from_id(gd_id_url: str=None, pathname: str = None,\n filename: str=None, unzip_flag: bool=False):\n dirpath = os.path.join(config.TMP_FILE_PATH, pathname)\n tmpdir = get_temporary_path(dirpath)\n if not os.path.exists(dirpath):\n try:\n pathlib.Path(tmpdir).mkdir(parents=True, exist_ok=True)\n except OSError:\n sys.exit(\"os.mkdir() function has failed: %s\" % tmpdir)\n\n fp = os.path.join(tmpdir)\n if (os.path.isfile(fp)):\n return fp\n else:\n dst = os.path.join(tmpdir, filename)\n GoogleDriveDownloader.download_file_from_google_drive(\n file_id=gd_id_url, dest_path=dst, unzip=unzip_flag)\n return fp\n\n\ndef retrieve_from_url(url: str = None, name: str = None,\n filename: str = None, unzip: bool=False):\n dirpath = os.path.join(config.TMP_FILE_PATH, name)\n if filename is None:\n filename_parsed = urlparse(url)\n filename = os.path.basename(filename_parsed.path)\n\n tmpdir = get_temporary_path(dirpath)\n if not os.path.exists(dirpath):\n try:\n pathlib.Path(tmpdir).mkdir(parents=True, exist_ok=True)\n except OSError:\n sys.exit(\"os.mkdir() function has failed: %s\" % tmpdir)\n\n fp = os.path.join(tmpdir, filename)\n if (os.path.isfile(fp)):\n return fp\n else:\n file = download_url(fp, filename, url, filename_parsed.netloc)\n\n if unzip:\n path = os.path.dirname(file)\n shutil.unpack_archive(file, path)\n return path\n\n return file\n\n\ndef check_connection(url: str = None):\n try:\n urllib.request.urlopen(url)\n return True\n except:\n return False\n\n\ndef copy(target_dir, src_dir):\n if not os.path.exists(target_dir):\n try:\n pathlib.Path(target_dir).mkdir(parents=True, exist_ok=True)\n except OSError:\n sys.exit(\"os.mkdir() function has failed: %s\" % target_dir)\n\n for file in os.listdir(src_dir):\n file_path = os.path.join(src_dir, file)\n\n if os.path.isdir(file_path):\n copy(os.path.join(target_dir, file), file_path)\n else:\n if file != config.INIT_MODULE_FILE:\n shutil.copy(file_path, target_dir)\n\n\ndef args_parser(camera: bool = False, webcam: bool = False,\n image: bool = False, model: bool = False,\n label: bool = False, epochs: bool = False,\n videopath: bool = False, camera_inference: bool = False):\n parser = ArgumentParser()\n if camera:\n parser.add_argument(\n '-c', '--camera', type=int, default=0,\n help=\"set the number your camera is identified at /dev/video.\")\n if camera_inference:\n parser.add_argument(\n '-ci', '--camera_inference', type=bool, default=False,\n help=\"set to True if you want to run inference on your camera, \" \\\n \"otherwise it is going to run inference on a single image.\")\n if webcam:\n parser.add_argument(\n '-w', '--webcam', type=int, default=-1,\n help=\"if you are using a webcam, set the number your \" \\\n \"webcam is identified at /dev/video.\")\n if image:\n parser.add_argument(\n '-i', '--image', default=None,\n help=\"path of the image to be classified\")\n if model:\n parser.add_argument(\n '-m', '--model', default=None,\n help=\"path of the .tflite model to be executed\")\n if label:\n parser.add_argument(\n '-l', '--label', default=None,\n help=\"path of the file containing labels\")\n if epochs:\n parser.add_argument(\n '-e', '--epochs', type=int, default=50,\n help=\"number of epochs for the traning\")\n if videopath:\n parser.add_argument(\n '-v', '--videopath', type=None, default=0,\n help=\"path of the video file\")\n\n return parser.parse_args()\n","repo_name":"gitter-badger/pyeiq","sub_path":"eiq/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37726985802","text":"\"\"\"\nTest of neo.io.asciiimageio\n\"\"\"\nimport os\nimport unittest\nimport quantities as pq\nfrom neo.io import AsciiImageIO\nimport numpy as np\n\n\nclass TestAsciiImageIO(unittest.TestCase):\n\n def test_read_txt(self):\n img = ''\n img_list = []\n for frame in range(20):\n img_list.append([])\n for y in range(50):\n img_list[frame].append([])\n for x in range(50):\n img += str(x)\n img += '\\t'\n img_list[frame][y].append(x)\n img_list = np.array(img_list)\n file_name = \"txt_test_file.txt\"\n file = open(file_name, mode=\"w\")\n file.write(str(img))\n file.close()\n\n object = AsciiImageIO(file_name='txt_test_file.txt',\n nb_frame=20, nb_row=50, nb_column=50, units='V',\n sampling_rate=1 * pq.Hz, spatial_scale=1 * pq.micrometer)\n block = object.read_block()\n self.assertEqual(len(block.segments), 1)\n self.assertEqual(len(block.segments[0].imagesequences), 1)\n self.assertEqual(block.segments[0].imagesequences[0].shape, (20, 50, 50))\n self.assertEqual(block.segments[0].imagesequences[0].any(), img_list.any())\n\n file.close()\n os.remove(file_name)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"NeuralEnsemble/python-neo","sub_path":"neo/test/iotest/test_asciiimageio.py","file_name":"test_asciiimageio.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"78"} +{"seq_id":"10277098627","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 26 10:41:51 2021\n\n@author: janssens\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset\nfrom dataloader import DataLoaderDALES, DataLoaderMicroHH\n\nrp = '/home/hp200321/data'\n\nlps = [rp+'/botany-6-768/runs/Run_40',\n rp+'/botany-6-1536-50/runs/Run_40'\n ]\nlabs = ['dx = 100m',\n 'dx = 200m'\n ]\nsp = '/home/hp200321/data/pp/figs'\nmods = ['dales',\n 'dales']\n\n#rp = '/scratch-shared/janssens'\n\n#lps = [rp+'/bomex200_from100_12hr',\n# rp+'/bomex100a5_from100_12hr',\n# rp+'/bomex200_fiso_from100_12hr',\n# rp+'/bomex100_e12',\n# rp+'/bomex50',\n# rp+'/tmp.bomex/bomex_200m',\n# rp+'/tmp.bomex/bomex_100m',\n # '/scratch-shared/janssens/tmp.bomex/bomex_50m/ppagg',\n# ]\n#sp = '/scratch-shared/janssens/bomex_comparisons'\n\n#labs = [\n# r'D1: $\\Delta x = 200m$',\n# r'D2: $\\Delta x = 200m$, a5',\n# r'D3: $\\Delta x = 200m$, fiso',\n# r'D4: $\\Delta x = 100m$',\n# r'D5: $\\Delta x = 50m$',\n# r'M1: $\\Delta x = 200m$',\n# r'M2: $\\Delta x = 100m$',\n #r'M3 - $\\Delta x = 50m$',\n# ]\n#mods = [\n# 'dales',\n# 'dales',\n# 'dales',\n# 'dales',\n# 'dales',\n# 'microhh',\n# 'microhh',\n # 'microhh',\n# ]\n\nls = ['-',\n '-.',\n (0, (3, 2, 1, 2, 1, 2)),\n '--',\n ':',\n '-',\n '--',\n # ':'\n ]\n\ndef mav(x, w):\n return np.convolve(x, np.ones(w), 'valid') / w\n#%% Plot vertical profiles at a single time\n\nvar = 'qt'\nxlab = r'$q_t$]'\ntplt = 2\ntav = 2\nzmin = 100\nzmax = 3000\n\nfig=plt.figure(); ax=plt.gca()\nfor i in range(len(lps)):\n lp = lps[i]\n \n if mods[i] == 'dales':\n dl = DataLoaderDALES(lp)\n col = plt.cm.RdYlBu(0.99)\n elif mods[i] == 'microhh':\n dl = DataLoaderMicroHH(lp)\n col = plt.cm.RdYlBu(0.7)\n \n time = dl.time1d/3600\n zt = dl.zf\n \n itmin = np.argmin(abs(tplt-time))\n itmax = np.argmin(abs(tplt+tav-time))\n \n izs = np.argmin(abs(zmin-zt))\n ize = np.argmin(abs(zmax-zt))\n \n load_func = getattr(dl, 'load_'+var+'av')\n pltvar = np.mean(load_func(izs,ize)[itmin:itmax,:],axis=0)\n\n ax.plot(pltvar,zt[izs:ize],label=labs[i],color=col,linestyle=ls[i])\n \nax.set_ylabel('z [m]')\nax.set_xlabel(xlab if len(xlab)>0 else var.split('/')[-1])\n# ax.set_title('t = %.2f'%(tplt) +' hr')\nax.legend(loc='best',bbox_to_anchor=(1,1))\nax.ticklabel_format(style='sci',axis='x',scilimits=(0,0))\nplt.savefig(sp+'/prof_'+var+'_t'+str(tplt)+'.pdf',bbox_inches='tight')\nplt.show()\n\n","repo_name":"martinjanssens/ppagg","sub_path":"compare_1d.py","file_name":"compare_1d.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35804222158","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport keras\nkeras.__version__\n\n\n# # 5.2 - Using convnets with small datasets\n# \n# This notebook contains the code sample found in Chapter 5, Section 2 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.\n# \n# ## Training a convnet from scratch on a small dataset\n# \n# Having to train an image classification model using only very little data is a common situation, which you likely encounter yourself in \n# practice if you ever do computer vision in a professional context.\n# \n# Having \"few\" samples can mean anywhere from a few hundreds to a few tens of thousands of images. As a practical example, we will focus on \n# classifying images as \"dogs\" or \"cats\", in a dataset containing 4000 pictures of cats and dogs (2000 cats, 2000 dogs). We will use 2000 \n# pictures for training, 1000 for validation, and finally 1000 for testing.\n# \n# In this section, we will review one basic strategy to tackle this problem: training a new model from scratch on what little data we have. We \n# will start by naively training a small convnet on our 2000 training samples, without any regularization, to set a baseline for what can be \n# achieved. This will get us to a classification accuracy of 71%. At that point, our main issue will be overfitting. Then we will introduce \n# *data augmentation*, a powerful technique for mitigating overfitting in computer vision. By leveraging data augmentation, we will improve \n# our network to reach an accuracy of 82%.\n# \n# In the next section, we will review two more essential techniques for applying deep learning to small datasets: *doing feature extraction \n# with a pre-trained network* (this will get us to an accuracy of 90% to 93%), and *fine-tuning a pre-trained network* (this will get us to \n# our final accuracy of 95%). Together, these three strategies -- training a small model from scratch, doing feature extracting using a \n# pre-trained model, and fine-tuning a pre-trained model -- will constitute your future toolbox for tackling the problem of doing computer \n# vision with small datasets.\n\n# ## The relevance of deep learning for small-data problems\n# \n# You will sometimes hear that deep learning only works when lots of data is available. This is in part a valid point: one fundamental \n# characteristic of deep learning is that it is able to find interesting features in the training data on its own, without any need for manual \n# feature engineering, and this can only be achieved when lots of training examples are available. This is especially true for problems where \n# the input samples are very high-dimensional, like images.\n# \n# However, what constitutes \"lots\" of samples is relative -- relative to the size and depth of the network you are trying to train, for \n# starters. It isn't possible to train a convnet to solve a complex problem with just a few tens of samples, but a few hundreds can \n# potentially suffice if the model is small and well-regularized and if the task is simple. \n# Because convnets learn local, translation-invariant features, they are very \n# data-efficient on perceptual problems. Training a convnet from scratch on a very small image dataset will still yield reasonable results \n# despite a relative lack of data, without the need for any custom feature engineering. You will see this in action in this section.\n# \n# But what's more, deep learning models are by nature highly repurposable: you can take, say, an image classification or speech-to-text model \n# trained on a large-scale dataset then reuse it on a significantly different problem with only minor changes. Specifically, in the case of \n# computer vision, many pre-trained models (usually trained on the ImageNet dataset) are now publicly available for download and can be used \n# to bootstrap powerful vision models out of very little data. That's what we will do in the next section.\n# \n# For now, let's get started by getting our hands on the data.\n\n# ## Downloading the data\n# \n# The cats vs. dogs dataset that we will use isn't packaged with Keras. It was made available by Kaggle.com as part of a computer vision \n# competition in late 2013, back when convnets weren't quite mainstream. You can download the original dataset at: \n# `https://www.kaggle.com/c/dogs-vs-cats/data` (you will need to create a Kaggle account if you don't already have one -- don't worry, the \n# process is painless).\n# \n# The pictures are medium-resolution color JPEGs. They look like this:\n# \n# ![cats_vs_dogs_samples](https://s3.amazonaws.com/book.keras.io/img/ch5/cats_vs_dogs_samples.jpg)\n\n# Unsurprisingly, the cats vs. dogs Kaggle competition in 2013 was won by entrants who used convnets. The best entries could achieve up to \n# 95% accuracy. In our own example, we will get fairly close to this accuracy (in the next section), even though we will be training our \n# models on less than 10% of the data that was available to the competitors.\n# This original dataset contains 25,000 images of dogs and cats (12,500 from each class) and is 543MB large (compressed). After downloading \n# and uncompressing it, we will create a new dataset containing three subsets: a training set with 1000 samples of each class, a validation \n# set with 500 samples of each class, and finally a test set with 500 samples of each class.\n# \n# Here are a few lines of code to do this:\n\n# In[2]:\n\n\nimport os, shutil\n\n\n# In[ ]:\n\n\n# The path to the directory where the original\n# dataset was uncompressed\noriginal_dataset_dir = '/Users/fchollet/Downloads/kaggle_original_data'\n\n# The directory where we will\n# store our smaller dataset\nbase_dir = '/Users/fchollet/Downloads/cats_and_dogs_small'\nos.mkdir(base_dir)\n\n# Directories for our training,\n# validation and test splits\ntrain_dir = os.path.join(base_dir, 'train')\nos.mkdir(train_dir)\nvalidation_dir = os.path.join(base_dir, 'validation')\nos.mkdir(validation_dir)\ntest_dir = os.path.join(base_dir, 'test')\nos.mkdir(test_dir)\n\n# Directory with our training cat pictures\ntrain_cats_dir = os.path.join(train_dir, 'cats')\nos.mkdir(train_cats_dir)\n\n# Directory with our training dog pictures\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\nos.mkdir(train_dogs_dir)\n\n# Directory with our validation cat pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nos.mkdir(validation_cats_dir)\n\n# Directory with our validation dog pictures\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\nos.mkdir(validation_dogs_dir)\n\n# Directory with our validation cat pictures\ntest_cats_dir = os.path.join(test_dir, 'cats')\nos.mkdir(test_cats_dir)\n\n# Directory with our validation dog pictures\ntest_dogs_dir = os.path.join(test_dir, 'dogs')\nos.mkdir(test_dogs_dir)\n\n# Copy first 1000 cat images to train_cats_dir\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n# Copy next 500 cat images to validation_cats_dir\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_cats_dir, fname)\n shutil.copyfile(src, dst)\n \n# Copy next 500 cat images to test_cats_dir\nfnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_cats_dir, fname)\n shutil.copyfile(src, dst)\n \n# Copy first 1000 dog images to train_dogs_dir\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_dogs_dir, fname)\n shutil.copyfile(src, dst)\n \n# Copy next 500 dog images to validation_dogs_dir\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_dogs_dir, fname)\n shutil.copyfile(src, dst)\n \n# Copy next 500 dog images to test_dogs_dir\nfnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n\n# As a sanity check, let's count how many pictures we have in each training split (train/validation/test):\n\n# In[4]:\n\n\nprint('total training cat images:', len(os.listdir(train_cats_dir)))\n\n\n# In[5]:\n\n\nprint('total training dog images:', len(os.listdir(train_dogs_dir)))\n\n\n# In[6]:\n\n\nprint('total validation cat images:', len(os.listdir(validation_cats_dir)))\n\n\n# In[7]:\n\n\nprint('total validation dog images:', len(os.listdir(validation_dogs_dir)))\n\n\n# In[8]:\n\n\nprint('total test cat images:', len(os.listdir(test_cats_dir)))\n\n\n# In[9]:\n\n\nprint('total test dog images:', len(os.listdir(test_dogs_dir)))\n\n\n# \n# So we have indeed 2000 training images, and then 1000 validation images and 1000 test images. In each split, there is the same number of \n# samples from each class: this is a balanced binary classification problem, which means that classification accuracy will be an appropriate \n# measure of success.\n\n# ## Building our network\n# \n# We've already built a small convnet for MNIST in the previous example, so you should be familiar with them. We will reuse the same \n# general structure: our convnet will be a stack of alternated `Conv2D` (with `relu` activation) and `MaxPooling2D` layers.\n# \n# However, since we are dealing with bigger images and a more complex problem, we will make our network accordingly larger: it will have one \n# more `Conv2D` + `MaxPooling2D` stage. This serves both to augment the capacity of the network, and to further reduce the size of the \n# feature maps, so that they aren't overly large when we reach the `Flatten` layer. Here, since we start from inputs of size 150x150 (a \n# somewhat arbitrary choice), we end up with feature maps of size 7x7 right before the `Flatten` layer.\n# \n# Note that the depth of the feature maps is progressively increasing in the network (from 32 to 128), while the size of the feature maps is \n# decreasing (from 148x148 to 7x7). This is a pattern that you will see in almost all convnets.\n# \n# Since we are attacking a binary classification problem, we are ending the network with a single unit (a `Dense` layer of size 1) and a \n# `sigmoid` activation. This unit will encode the probability that the network is looking at one class or the other.\n\n# In[10]:\n\n\nfrom keras import layers\nfrom keras import models\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\n\n# Let's take a look at how the dimensions of the feature maps change with every successive layer:\n\n# In[11]:\n\n\nmodel.summary()\n\n\n# For our compilation step, we'll go with the `RMSprop` optimizer as usual. Since we ended our network with a single sigmoid unit, we will \n# use binary crossentropy as our loss (as a reminder, check out the table in Chapter 4, section 5 for a cheatsheet on what loss function to \n# use in various situations).\n\n# In[12]:\n\n\nfrom keras import optimizers\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n\n\n# ## Data preprocessing\n# \n# As you already know by now, data should be formatted into appropriately pre-processed floating point tensors before being fed into our \n# network. Currently, our data sits on a drive as JPEG files, so the steps for getting it into our network are roughly:\n# \n# * Read the picture files.\n# * Decode the JPEG content to RBG grids of pixels.\n# * Convert these into floating point tensors.\n# * Rescale the pixel values (between 0 and 255) to the [0, 1] interval (as you know, neural networks prefer to deal with small input values).\n# \n# It may seem a bit daunting, but thankfully Keras has utilities to take care of these steps automatically. Keras has a module with image \n# processing helper tools, located at `keras.preprocessing.image`. In particular, it contains the class `ImageDataGenerator` which allows to \n# quickly set up Python generators that can automatically turn image files on disk into batches of pre-processed tensors. This is what we \n# will use here.\n\n# In[13]:\n\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# All images will be rescaled by 1./255\ntrain_datagen = ImageDataGenerator(rescale=1./255)\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n # This is the target directory\n train_dir,\n # All images will be resized to 150x150\n target_size=(150, 150),\n batch_size=20,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary')\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_dir,\n target_size=(150, 150),\n batch_size=20,\n class_mode='binary')\n\n\n# Let's take a look at the output of one of these generators: it yields batches of 150x150 RGB images (shape `(20, 150, 150, 3)`) and binary \n# labels (shape `(20,)`). 20 is the number of samples in each batch (the batch size). Note that the generator yields these batches \n# indefinitely: it just loops endlessly over the images present in the target folder. For this reason, we need to `break` the iteration loop \n# at some point.\n\n# In[14]:\n\n\nfor data_batch, labels_batch in train_generator:\n print('data batch shape:', data_batch.shape)\n print('labels batch shape:', labels_batch.shape)\n break\n\n\n# Let's fit our model to the data using the generator. We do it using the `fit_generator` method, the equivalent of `fit` for data generators \n# like ours. It expects as first argument a Python generator that will yield batches of inputs and targets indefinitely, like ours does. \n# Because the data is being generated endlessly, the generator needs to know example how many samples to draw from the generator before \n# declaring an epoch over. This is the role of the `steps_per_epoch` argument: after having drawn `steps_per_epoch` batches from the \n# generator, i.e. after having run for `steps_per_epoch` gradient descent steps, the fitting process will go to the next epoch. In our case, \n# batches are 20-sample large, so it will take 100 batches until we see our target of 2000 samples.\n# \n# When using `fit_generator`, one may pass a `validation_data` argument, much like with the `fit` method. Importantly, this argument is \n# allowed to be a data generator itself, but it could be a tuple of Numpy arrays as well. If you pass a generator as `validation_data`, then \n# this generator is expected to yield batches of validation data endlessly, and thus you should also specify the `validation_steps` argument, \n# which tells the process how many batches to draw from the validation generator for evaluation.\n\n# In[15]:\n\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch=100,\n epochs=30,\n validation_data=validation_generator,\n validation_steps=50)\n\n\n# It is good practice to always save your models after training:\n\n# In[16]:\n\n\nmodel.save('cats_and_dogs_small_1.h5')\n\n\n# Let's plot the loss and accuracy of the model over the training and validation data during training:\n\n# In[34]:\n\n\nimport matplotlib.pyplot as plt\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n\n\n# These plots are characteristic of overfitting. Our training accuracy increases linearly over time, until it reaches nearly 100%, while our \n# validation accuracy stalls at 70-72%. Our validation loss reaches its minimum after only five epochs then stalls, while the training loss \n# keeps decreasing linearly until it reaches nearly 0.\n# \n# Because we only have relatively few training samples (2000), overfitting is going to be our number one concern. You already know about a \n# number of techniques that can help mitigate overfitting, such as dropout and weight decay (L2 regularization). We are now going to \n# introduce a new one, specific to computer vision, and used almost universally when processing images with deep learning models: *data \n# augmentation*.\n\n# ## Using data augmentation\n# \n# Overfitting is caused by having too few samples to learn from, rendering us unable to train a model able to generalize to new data. \n# Given infinite data, our model would be exposed to every possible aspect of the data distribution at hand: we would never overfit. Data \n# augmentation takes the approach of generating more training data from existing training samples, by \"augmenting\" the samples via a number \n# of random transformations that yield believable-looking images. The goal is that at training time, our model would never see the exact same \n# picture twice. This helps the model get exposed to more aspects of the data and generalize better.\n# \n# In Keras, this can be done by configuring a number of random transformations to be performed on the images read by our `ImageDataGenerator` \n# instance. Let's get started with an example:\n\n# In[35]:\n\n\ndatagen = ImageDataGenerator(\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\n\n# These are just a few of the options available (for more, see the Keras documentation). Let's quickly go over what we just wrote:\n# \n# * `rotation_range` is a value in degrees (0-180), a range within which to randomly rotate pictures.\n# * `width_shift` and `height_shift` are ranges (as a fraction of total width or height) within which to randomly translate pictures \n# vertically or horizontally.\n# * `shear_range` is for randomly applying shearing transformations.\n# * `zoom_range` is for randomly zooming inside pictures.\n# * `horizontal_flip` is for randomly flipping half of the images horizontally -- relevant when there are no assumptions of horizontal \n# asymmetry (e.g. real-world pictures).\n# * `fill_mode` is the strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.\n# \n# Let's take a look at our augmented images:\n\n# In[36]:\n\n\n# This is module with image preprocessing utilities\nfrom keras.preprocessing import image\n\nfnames = [os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)]\n\n# We pick one image to \"augment\"\nimg_path = fnames[3]\n\n# Read the image and resize it\nimg = image.load_img(img_path, target_size=(150, 150))\n\n# Convert it to a Numpy array with shape (150, 150, 3)\nx = image.img_to_array(img)\n\n# Reshape it to (1, 150, 150, 3)\nx = x.reshape((1,) + x.shape)\n\n# The .flow() command below generates batches of randomly transformed images.\n# It will loop indefinitely, so we need to `break` the loop at some point!\ni = 0\nfor batch in datagen.flow(x, batch_size=1):\n plt.figure(i)\n imgplot = plt.imshow(image.array_to_img(batch[0]))\n i += 1\n if i % 4 == 0:\n break\n\nplt.show()\n\n\n# If we train a new network using this data augmentation configuration, our network will never see twice the same input. However, the inputs \n# that it sees are still heavily intercorrelated, since they come from a small number of original images -- we cannot produce new information, \n# we can only remix existing information. As such, this might not be quite enough to completely get rid of overfitting. To further fight \n# overfitting, we will also add a Dropout layer to our model, right before the densely-connected classifier:\n\n# In[37]:\n\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n\n\n# Let's train our network using data augmentation and dropout:\n\n# In[38]:\n\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,)\n\n# Note that the validation data should not be augmented!\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n # This is the target directory\n train_dir,\n # All images will be resized to 150x150\n target_size=(150, 150),\n batch_size=32,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary')\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_dir,\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch=100,\n epochs=100,\n validation_data=validation_generator,\n validation_steps=50)\n\n\n# Let's save our model -- we will be using it in the section on convnet visualization.\n\n# In[41]:\n\n\nmodel.save('cats_and_dogs_small_2.h5')\n\n\n# Let's plot our results again:\n\n# In[43]:\n\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n\n\n# Thanks to data augmentation and dropout, we are no longer overfitting: the training curves are rather closely tracking the validation \n# curves. We are now able to reach an accuracy of 82%, a 15% relative improvement over the non-regularized model.\n# \n# By leveraging regularization techniques even further and by tuning the network's parameters (such as the number of filters per convolution \n# layer, or the number of layers in the network), we may be able to get an even better accuracy, likely up to 86-87%. However, it would prove \n# very difficult to go any higher just by training our own convnet from scratch, simply because we have so little data to work with. As a \n# next step to improve our accuracy on this problem, we will have to leverage a pre-trained model, which will be the focus of the next two \n# sections.\n","repo_name":"liuxinfengabc/cultivate","sub_path":"5.机器学习/src/deep-learning-with-pyton/5.2-using-convnets-with-small-datasets.py","file_name":"5.2-using-convnets-with-small-datasets.py","file_ext":"py","file_size_in_byte":23992,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"78"} +{"seq_id":"26315911655","text":"from django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect\n\nfrom APP.forms import userregister, StudentForm, Parentform, complaint_form, payment_form, \\\n review_form, roombooking_form\nfrom APP.models import Hostel, Food, Complaint, Payment, Notification, Attendance, Review, Staff, Room_booking, \\\n Student_register, Parent_register, Fee\n\n\n# Create your views here.\n\ndef mainpage(request):\n if request.method == \"POST\":\n username = request.POST.get('uname')\n password = request.POST.get('pass')\n user = authenticate(request,username = username,password = password)\n if user is not None:\n login(request,user)\n if user.is_staff:\n return redirect('admin1')\n elif user is not None and user.is_student:\n if user.student.approval_status == True:\n login(request,user)\n return redirect('student_profileview')\n elif user is not None and user.is_parent:\n if user.parent.approval_status == True:\n login(request,user)\n\n return redirect('parent_profileview')\n else:\n messages.info(request,\"invalid credentials\")\n return render(request,'index.html')\n\n# student registration form\n\ndef student(request):\n u_form = userregister()\n s_form = StudentForm()\n if request.method == \"POST\":\n u_form = userregister(request.POST)\n s_form = StudentForm(request.POST,request.FILES)\n if u_form.is_valid() and s_form.is_valid():\n user = u_form.save(commit=False)\n user.is_student = True\n user.save()\n student = s_form.save(commit=False)\n student.user = user\n student.save()\n messages.info(request,\"student registration completed\")\n\n return redirect('mainpage')\n\n return render(request,'signupstudent.html',{'u_form':u_form,'s_form':s_form})\n\n# parent registration from\n\ndef parent(request):\n u_form = userregister()\n p_form = Parentform()\n if request.method == \"POST\":\n u_form = userregister(request.POST)\n p_form = Parentform(request.POST)\n if u_form.is_valid() and p_form.is_valid():\n user = u_form.save(commit=False)\n user.is_parent = True\n user.save()\n parent = p_form.save(commit=False)\n parent.user = user\n parent.save()\n messages.info(request,\"parent registration completed\")\n\n return redirect('mainpage')\n return render(request,'signupparent.html',{'u_form':u_form,'p_form':p_form})\n\n@login_required(login_url='mainpage')\ndef admin1(request):\n return render(request,'admin.html')\n\n@login_required(login_url='mainpage')\ndef studentview(request):\n return render(request,'student_dashboard.html')\n\n@login_required(login_url='mainpage')\ndef parentview(request):\n return render(request,'parent_dashboard.html')\n\n@login_required(login_url='mainpage')\ndef student_view_hostel(request):\n data = Hostel.objects.all()\n return render(request,'hostel_view.html',{'data':data})\n\n@login_required(login_url='mainpage')\ndef student_view_food(request):\n data = Food.objects.all()\n return render(request, 'food_view.html', {'data': data})\n\n\n@login_required(login_url='mainpage')\ndef add_complaint(request):\n form = complaint_form()\n u = request.user\n if request.method == \"POST\":\n form = complaint_form(request.POST,request.FILES)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.user = u\n form.save()\n return redirect('view_complaint')\n return render(request,'add_a_complaint.html',{'form':form})\n\n\n@login_required(login_url='mainpage')\ndef view_complaint(request):\n u = Student_register.objects.get(user=request.user)\n data = Complaint.objects.filter(student_name=u)\n return render(request,'view_complaint.html',{'data':data})\n\n\n@login_required(login_url='mainpage')\ndef complaint_update(request,id):\n comp1 = Complaint.objects.get(id=id)\n form = complaint_form(instance=comp1)\n if request.method == 'POST':\n form = complaint_form(request.POST,instance=comp1)\n if form.is_valid():\n form.save()\n return redirect('view_complaint')\n return render(request,'update_complaint.html',{'form':form})\n\n@login_required(login_url='mainpage')\ndef complaint_delete(request,id):\n Complaint.objects.get(id=id).delete()\n return redirect('view_complaint')\n\n@login_required(login_url='mainpage')\ndef fee_view(request):\n data = Fee.objects.all()\n return render(request,'fee_view_details.html',{'data':data})\n\n# @login_required(login_url='mainpage')\n# def add_payment(request):\n# form = payment_form()\n# if request.method == \"POST\":\n# form = payment_form(request.POST,request.FILES)\n# if form.is_valid():\n# form.save()\n# if request.GET.get('button') == 'a':\n# print('submit')\n# return redirect('fee_view')\n# return render(request,'add_payment_details.html',{'form':form})\n\n@login_required(login_url='mainpage')\ndef view_student_payment(request):\n u = Student_register.objects.get(user=request.user)\n data = Payment.objects.filter(name=u)\n return render(request,'student_view_payment.html',{'data':data})\n\n@login_required(login_url='mainpage')\ndef approve_payment(request,id):\n pay1 = Payment.objects.get(id=id)\n pay1.status = 1\n pay1.save()\n messages.info(request, 'Student fee paid successfully')\n return redirect('view_student_payment')\n\ndef reject_payment(request,id):\n pay1 = Payment.objects.get(id=id)\n pay1.status = 2\n pay1.save()\n messages.info(request, 'Student fee not paid')\n return redirect('view_student_payment')\n\n\n@login_required(login_url='mainpage')\ndef notification_view(request):\n data = Notification.objects.all()\n return render(request, 'view_notification_details.html', {'data': data})\n\n@login_required(login_url='mainpage')\ndef attendance_view(request):\n u = Student_register.objects.get(user=request.user)\n data = Attendance.objects.filter(name=u)\n return render(request, 'view_attendance.html', {'data': data})\n\n@login_required(login_url='mainpage')\ndef add_review(request):\n form = review_form()\n if request.method == \"POST\":\n form = review_form(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n return redirect('view_review')\n return render(request,'add_review.html',{'form':form})\n\n@login_required(login_url='mainpage')\ndef view_review(request):\n data = Review.objects.filter(user=request.user)\n return render(request,'view_review.html',{'data':data})\n\n\n@login_required(login_url='mainpage')\ndef update_review(request,id):\n rev1 = Review.objects.get(id=id)\n form = review_form(instance=rev1)\n if request.method == 'POST':\n form = review_form(request.POST,instance=rev1)\n if form.is_valid():\n form.save()\n return redirect('view_review')\n return render(request,'update_review_details.html',{'form':form})\n\n\n@login_required(login_url='mainpage')\ndef delete_review(request,id):\n Review.objects.get(id=id).delete()\n return redirect('view_review')\n\n\n@login_required(login_url='mainpage')\ndef student_view_staff(request):\n data = Staff.objects.all()\n return render(request, 'student_view_staff.html', {'data': data})\n\n\n@login_required(login_url='mainpage')\ndef add_book_room(request):\n form = roombooking_form()\n if request.method == \"POST\":\n form = roombooking_form(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('view_room_booking')\n return render(request, 'book_room.html', {'form': form})\n\n\n@login_required(login_url='mainpage')\ndef view_room_booking(request):\n u = Student_register.objects.get(user=request.user)\n data = Room_booking.objects.filter(student_name=u)\n return render(request, 'student_book_room.html', {'data': data})\n\n\n@login_required(login_url='mainpage')\ndef update_roombooking(request,id):\n room1 = Room_booking.objects.get(id=id)\n form = roombooking_form(instance=room1)\n if request.method == 'POST':\n form = roombooking_form(request.POST,instance=room1)\n if form.is_valid():\n form.save()\n return redirect('view_room_booking')\n return render(request,'update_roombooking.html',{'form':form})\n\n\n@login_required(login_url='mainpage')\ndef delete_roombooking(request,id):\n Room_booking.objects.get(id=id).delete()\n return redirect('view_room_booking')\n\n\n@login_required(login_url='mainpage')\ndef student_viewstudents(request):\n data = Student_register.objects.all()\n return render(request,'student_view_student.html',{'data':data})\n\n# parent views\n\n@login_required(login_url='mainpage')\ndef parent_view_hostel(request):\n data = Hostel.objects.all()\n return render(request,'parent_hostel_view.html',{'data':data})\n\n\n@login_required(login_url='mainpage')\ndef parent_view_staff(request):\n data = Staff.objects.all()\n return render(request, 'parent_view_staff.html', {'data': data})\n\n\n@login_required(login_url='mainpage')\ndef parent_attendance_view(request):\n u = Parent_register.objects.get(user=request.user)\n data = Attendance.objects.filter(name=u.student_name)\n return render(request, 'parent_view_attendance.html', {'data': data})\n\n\n@login_required(login_url='mainpage')\ndef parent_add_payment(request):\n form = payment_form()\n if request.method == \"POST\":\n form = payment_form(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n return redirect('parent_view_payment')\n return render(request,'parent_add_payment_details.html',{'form':form})\n\n\n@login_required(login_url='mainpage')\ndef parent_view_payment(request):\n u = Parent_register.objects.get(user=request.user)\n data = Payment.objects.filter(name=u.student_name)\n return render(request,'parent_view_payment.html',{'data':data})\n\n\n@login_required(login_url='mainpage')\ndef parent_view_fee(request):\n data = Fee.objects.all()\n return render(request, 'parent_view_fee_details.html', {'data': data})\n\n\n@login_required(login_url='mainpage')\ndef student_profileview(request):\n student = Student_register.objects.get(user=request.user)\n return render(request,'student_dashboard.html',{'student':student})\n\n\n@login_required(login_url='mainpage')\ndef student_updateprofile(request):\n student1 = Student_register.objects.get(user=request.user)\n form = StudentForm(instance=student1)\n if request.method == 'POST':\n form = StudentForm(request.POST,request.FILES, instance=student1)\n if form.is_valid():\n form.save()\n return redirect('student_profileview')\n return render(request, 'student_update_profile.html', {'form': form})\n\n@login_required(login_url='mainpage')\ndef delete_profile_student(request):\n user = request.user\n print(user)\n if request.method == \"POST\":\n user.delete()\n messages.info(request, 'Your account deleted successfully')\n return redirect('mainpage')\n return render(request,'delete_account.html')\n\n\n@login_required(login_url='mainpage')\ndef logout_view(request):\n logout(request)\n return redirect('mainpage')\n\n\n@login_required(login_url='mainpage')\ndef parent_profileview(request):\n parent = Parent_register.objects.get(user=request.user)\n return render(request,'parent_dashboard.html',{'parent':parent})\n\n\n@login_required(login_url='mainpage')\ndef parent_updateprofile(request):\n parent1 = Parent_register.objects.get(user=request.user)\n form = Parentform(instance=parent1)\n if request.method == 'POST':\n form = Parentform(request.POST,request.FILES, instance=parent1)\n if form.is_valid():\n form.save()\n return redirect('parent_profileview')\n return render(request, 'parent_updateprofile.html', {'form': form})\n\n@login_required(login_url='mainpage')\ndef delete_profile_parent(request):\n user = request.user\n print(user)\n if request.method == \"POST\":\n user.delete()\n messages.info(request, 'Your account deleted successfully')\n return redirect('mainpage')\n return render(request,'delete_account_parent.html')\n\n\n","repo_name":"Mubashira98/DJANGO_hostel_management","sub_path":"APP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34872032182","text":"\"\"\"\n1-10 2,3,5,7\n11-20 11,13.17, 19\n21-30 23,29\n31-40 31,37\n2,3,5,7\n2,3,5,7,11\n2,3,5,7,11,13\n\n50\n\n\n\"\"\"\ndef isPrime(num):\n if(num < 1):\n return 1\n if(num == 1):\n return 0 \n i = 2 \n while(i <= num/2):\n if(num % i == 0):\n return 0\n i+=1 \n return 1 \n\n\ndef countPrime(num):\n count = 0\n arrPrime = []\n for i in range(1,num+1):\n if(isPrime(i)):\n arrPrime.append(i)\n return arrPrime\n\nnum = int(input())\ntraverseCount = 0\narr = countPrime(num)\nprint(arr)\ncurrCount = num\nprint(len(arr))\n# print(len(arr) < 2)\nwhile(len(arr) > 3):\n print(\"len\", len(arr))\n traverseCount += 1\n currCount -= len(arr)\n val = 0\n for i in arr:\n if(i < currCount):\n arr = arr[:i] \n break\n \n print(\n \"arr\",arr)\n print(val) \n \n\nprint(traverseCount + 1) \n","repo_name":"premkhandelwal/Python-Practice","sub_path":"codeVita2603.py","file_name":"codeVita2603.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9270766936","text":"from tkinter import *\r\nimport sqlite3\r\nwindow = Tk()\r\nwindow.geometry(\"700x500\")\r\ndef db():\r\n con=sqlite3.connect('details3.db')\r\n con.execute('''CREATE TABLE IF NOT EXISTS DETAILS(name varchar(10),age varchar(4))''')\r\n #print(name,date)\r\n con.execute('insert into details (name,age) values (?,?)',(name.get(),date.get()))\r\n cur=con.execute('''SELECT * FROM DETAILS''')\r\n con.commit()\r\n for i in cur:\r\n print(i)\r\n\r\ndef f():\r\n p=open(\"test.txt\",\"a+\")\r\n p.write(\"\\nName:\"+str(name.get()))\r\n p.close()\r\ndef fun():\r\n Label(text=\"Your age:\"+str(date.get())).grid(row=10,column=10)\r\n f=open(\"test.txt\",\"a+\")\r\n f.write(str(date.get()))\r\n f.close()\r\n\r\nnameLable = Label(text=\"Name:\").grid(row=0,column=0,padx=10,pady=10)\r\nphoneLable = Label(text=\"Phone:\").grid(row=1,column=0,padx=10,pady=10)\r\ndateLable = Label(text=\"Bday:\").grid(row=2,column=0,padx=10,pady=10)\r\nname = StringVar()\r\nphone = StringVar()\r\ndate = StringVar()\r\nnameEntry = Entry(window, textvariable=name).grid(row=0,column=1,padx=10,pady=10)\r\nphoneEntry = Entry(window, textvariable=phone).grid(row=1,column=1,padx=10,pady=10)\r\ndateEntry = Entry(window, textvariable=date).grid(row=2,column=1,padx=10,pady=10)\r\nb = Button(text=\"age calculator\",command=fun).grid(row=5,column=6)\r\nb1=Button(text=\"write\",command=f).grid(row=6,column=6)\r\nb2=Button(text=\"write to db\",command=db).grid(row=7,column=6)\r\n# con = sqlite3.connect('details.db')\r\n# cur =con.execute('select * from details')\r\n# for i in cur:\r\n# print(i)\r\n# print('Hello')\r\nwindow.mainloop()","repo_name":"abhilash1004/PythonPractice","sub_path":"AP LAB/Week8/tkinterpractice.py","file_name":"tkinterpractice.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5637464573","text":"# Making a habit tracker and uploading the data in google sheets in real time.\n\nimport requests\nfrom datetime import datetime\nimport os\n\n# --------------------------------- Defined passwords and id in environment ----------------\n\nAPP_ID = os.environ[\"APP_ID\"]\nAPI_KEY = os.environ[\"API_KEY\"]\nAPI_USERNAME = os.environ[\"API_USERNAME\"]\nAPI_PASSWORD = os.environ[\"API_PASSWORD\"]\n\n# Or\n'''\nAPP_ID = \"Your ID\"\nAPI_KEY = \"Your Key\"\nAPI_USERNAME = \"Your Username\"\nAPI_PASSWORD = \"Your Password\"\n'''\n\nheaders = {\n \"x-app-id\":APP_ID,\n \"x-app-key\":API_KEY,\n}\n\n# --------------------------------- Extracting data from nutritionix ----------------------\n\nEndpoint = \"https://trackapi.nutritionix.com/v2/natural/exercise\"\n\nGENDER = \"male\"\nWEIGHT_KG = 59\nHEIGHT_CM = 174\nAGE = 17\n\nexercise_text = input(\"Tell me which exercises you did: \")\n\nparameters = {\n \"query\": exercise_text,\n \"gender\": GENDER,\n \"weight_kg\": WEIGHT_KG,\n \"height_cm\": HEIGHT_CM,\n \"age\": AGE\n}\n\nresponse = requests.post(Endpoint, json=parameters, headers=headers)\nresult = response.json()\n\n# --------------------------------- Uploading data in google sheete ----------------\n\ngoogle_sheet_url = \"https://api.sheety.co/34631ec83d7e0fb140930af1051a3fc1/myWorkout/workouts\"\n\ntoday_date = datetime.now().strftime(\"%d/%m/%Y\")\nnow_time = datetime.now().strftime(\"%X\")\n\nfor items in range(len(result[\"exercises\"])):\n activity = result['exercises'][items][\"user_input\"]\n duration = result['exercises'][items][\"duration_min\"]\n calories = result['exercises'][items][\"nf_calories\"]\n print(activity,duration,calories)\n\n values = {\n \"workout\":{\n \"date\":today_date,\n \"time\":now_time,\n \"exercise\":activity,\n \"duration\":duration,\n \"calories\":calories\n }\n }\n\n sheet_response = requests.post(google_sheet_url,json=values,auth=(API_USERNAME,API_PASSWORD,))\n print(sheet_response.text)\n\n# --------------------------------- The End ----------------------------------------------------","repo_name":"Aadityakharkia/Python-Course","sub_path":"Day 38-Habit Tracker- Upgraded Version/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"42514424399","text":"from __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"preview\"],\n \"supported_by\": \"community\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: oci_compute_capacity_reservation_instance_shape_facts\nshort_description: Fetches details about one or multiple ComputeCapacityReservationInstanceShape resources in Oracle Cloud Infrastructure\ndescription:\n - Fetches details about one or multiple ComputeCapacityReservationInstanceShape resources in Oracle Cloud Infrastructure\n - Lists the shapes that can be reserved within the specified compartment.\nversion_added: \"2.9.0\"\nauthor: Oracle (@oracle)\noptions:\n compartment_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.\n type: str\n required: true\n availability_domain:\n description:\n - The name of the availability domain.\n - \"Example: `Uocm:PHX-AD-1`\"\n type: str\n display_name:\n description:\n - A filter to return only resources that match the given display name exactly.\n type: str\n aliases: [\"name\"]\n sort_by:\n description:\n - The field to sort by. You can provide one sort order (`sortOrder`). Default order for\n TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME\n sort order is case sensitive.\n - \"**Note:** In general, some \\\\\"List\\\\\" operations (for example, `ListInstances`) let you\n optionally filter by availability domain if the scope of the resource type is within a\n single availability domain. If you call one of these \\\\\"List\\\\\" operations without specifying\n an availability domain, the resources are grouped by availability domain, then sorted.\"\n type: str\n choices:\n - \"TIMECREATED\"\n - \"DISPLAYNAME\"\n sort_order:\n description:\n - The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order\n is case sensitive.\n type: str\n choices:\n - \"ASC\"\n - \"DESC\"\nextends_documentation_fragment: [ oracle.oci.oracle ]\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: List compute_capacity_reservation_instance_shapes\n oci_compute_capacity_reservation_instance_shape_facts:\n # required\n compartment_id: \"ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx\"\n\n # optional\n availability_domain: Uocm:PHX-AD-1\n display_name: display_name_example\n sort_by: TIMECREATED\n sort_order: ASC\n\n\"\"\"\n\nRETURN = \"\"\"\ncompute_capacity_reservation_instance_shapes:\n description:\n - List of ComputeCapacityReservationInstanceShape resources\n returned: on success\n type: complex\n contains:\n availability_domain:\n description:\n - The shape's availability domain.\n returned: on success\n type: str\n sample: Uocm:PHX-AD-1\n instance_shape:\n description:\n - The name of the available shape used to launch instances in a compute capacity reservation.\n returned: on success\n type: str\n sample: instance_shape_example\n sample: [{\n \"availability_domain\": \"Uocm:PHX-AD-1\",\n \"instance_shape\": \"instance_shape_example\"\n }]\n\"\"\"\n\nfrom ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils\nfrom ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (\n OCIResourceFactsHelperBase,\n get_custom_class,\n OCIAnsibleModule,\n)\n\ntry:\n from oci.core import ComputeClient\n\n HAS_OCI_PY_SDK = True\nexcept ImportError:\n HAS_OCI_PY_SDK = False\n\n\nclass ComputeCapacityReservationInstanceShapeFactsHelperGen(OCIResourceFactsHelperBase):\n \"\"\"Supported operations: list\"\"\"\n\n def get_required_params_for_list(self):\n return [\n \"compartment_id\",\n ]\n\n def list_resources(self):\n optional_list_method_params = [\n \"availability_domain\",\n \"display_name\",\n \"sort_by\",\n \"sort_order\",\n ]\n optional_kwargs = dict(\n (param, self.module.params[param])\n for param in optional_list_method_params\n if self.module.params.get(param) is not None\n )\n return oci_common_utils.list_all_resources(\n self.client.list_compute_capacity_reservation_instance_shapes,\n compartment_id=self.module.params.get(\"compartment_id\"),\n **optional_kwargs\n )\n\n\nComputeCapacityReservationInstanceShapeFactsHelperCustom = get_custom_class(\n \"ComputeCapacityReservationInstanceShapeFactsHelperCustom\"\n)\n\n\nclass ResourceFactsHelper(\n ComputeCapacityReservationInstanceShapeFactsHelperCustom,\n ComputeCapacityReservationInstanceShapeFactsHelperGen,\n):\n pass\n\n\ndef main():\n module_args = oci_common_utils.get_common_arg_spec()\n module_args.update(\n dict(\n compartment_id=dict(type=\"str\", required=True),\n availability_domain=dict(type=\"str\"),\n display_name=dict(aliases=[\"name\"], type=\"str\"),\n sort_by=dict(type=\"str\", choices=[\"TIMECREATED\", \"DISPLAYNAME\"]),\n sort_order=dict(type=\"str\", choices=[\"ASC\", \"DESC\"]),\n )\n )\n\n module = OCIAnsibleModule(argument_spec=module_args)\n\n if not HAS_OCI_PY_SDK:\n module.fail_json(msg=\"oci python sdk required for this module.\")\n\n resource_facts_helper = ResourceFactsHelper(\n module=module,\n resource_type=\"compute_capacity_reservation_instance_shape\",\n service_client_class=ComputeClient,\n namespace=\"core\",\n )\n\n result = []\n\n if resource_facts_helper.is_list():\n result = resource_facts_helper.list()\n else:\n resource_facts_helper.fail()\n\n module.exit_json(compute_capacity_reservation_instance_shapes=result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oracle/oci-ansible-collection","sub_path":"plugins/modules/oci_compute_capacity_reservation_instance_shape_facts.py","file_name":"oci_compute_capacity_reservation_instance_shape_facts.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"78"} +{"seq_id":"18803306423","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# analyze_url.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: Zhenkun +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/01/21 09:47:55 by Kay Zhou #+# #+# #\n# Updated: 2020/12/17 14:18:02 by Zhenkun ### ########.fr #\n# #\n# **************************************************************************** #\n\nfrom my_weapon import *\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom file_read_backwards import FileReadBackwards\n\n\nelection_files = set([\n \"biden OR joebiden\",\n \"trump OR donaldtrump OR realdonaldtrump\",\n])\n\ndef read_tweets(start, end):\n months = set([\n # \"202006\",\n # \"202007\",\n \"202008\",\n ])\n set_tweets = set()\n file_names = sorted(Path(\"raw_data\").rglob(\"*.txt\"), reverse=True)\n\n for in_name in file_names:\n query = in_name.stem.split(\"-\")[-1]\n if (\"biden\" in query or \"trump\" in query) and in_name.parts[1] in months:\n print(in_name)\n cnt = 0\n with FileReadBackwards(in_name) as f:\n while True:\n line = f.readline()\n if not line:\n print(cnt, \"end of the file!\")\n print(\"-\" * 50)\n break\n try:\n d = json.loads(line.strip())\n except:\n print('json.loads Error:', line)\n continue\n \n tweet_id = d[\"id\"]\n if tweet_id in set_tweets:\n continue\n set_tweets.add(tweet_id)\n\n dt = pendulum.from_format(\n d[\"created_at\"], 'ddd MMM DD HH:mm:ss ZZ YYYY')\n if dt < start:\n print(\"sum:\", cnt, d[\"created_at\"], \"end!\")\n break\n if dt >= end:\n continue\n\n if cnt % 50000 == 0:\n print(\"New data ->\", cnt)\n cnt += 1\n yield d, dt\n\n\ndef read_tweets_json_day(dt):\n print(\"read_tweets_json_day:\", dt.to_date_string())\n set_tweets = set()\n dt_str = dt.format('YYYYMMDD')\n file_names = Path(f\"raw_data/{dt.format('YYYYMM')}\").rglob(f\"*.txt\")\n\n for in_name in file_names:\n if not in_name.parts[-1].startswith(dt_str):\n continue\n\n print(in_name)\n cnt = 0\n for line in open(in_name):\n try:\n d = json.loads(line.strip())\n except Exception:\n print(\"ERROR: json.loads()\")\n continue\n tweet_id = d[\"id\"]\n if tweet_id in set_tweets:\n continue\n set_tweets.add(tweet_id)\n yield d\n\n\ndef write_top_trump_biden_url(start, end):\n url_counter = Counter()\n for dt in pendulum.period(start, end):\n for d in read_tweets_json_day(dt):\n if d[\"urls\"]:\n for url in d[\"urls\"]:\n url = url[\"expanded_url\"]\n if url.startswith(\"https://twitter.com\"):\n continue\n url_counter[url] += 1\n out_name = f'data/url-{start.format(\"MMDD\")}-{end.format(\"MMDD\")}.txt'\n with open(out_name, \"w\") as f:\n for ht, cnt in url_counter.most_common():\n print(ht, cnt, file=f)\n\n\nif __name__ == \"__main__\":\n \n start = pendulum.datetime(2020, 11, 23, tz=\"UTC\")\n end = pendulum.datetime(2020, 11, 30, tz=\"UTC\")\n write_top_trump_biden_url(start, end)\n\n start = pendulum.datetime(2020, 11, 16, tz=\"UTC\")\n end = pendulum.datetime(2020, 11, 23, tz=\"UTC\")\n write_top_trump_biden_url(start, end)\n\n start = pendulum.datetime(2020, 11, 9, tz=\"UTC\")\n end = pendulum.datetime(2020, 11, 16, tz=\"UTC\")\n write_top_trump_biden_url(start, end)\n\n start = pendulum.datetime(2020, 11, 2, tz=\"UTC\")\n end = pendulum.datetime(2020, 11, 9, tz=\"UTC\")\n write_top_trump_biden_url(start, end)\n\n start = pendulum.datetime(2020, 10, 26, tz=\"UTC\")\n end = pendulum.datetime(2020, 11, 2, tz=\"UTC\")\n write_top_trump_biden_url(start, end)\n\n start = pendulum.datetime(2020, 10, 19, tz=\"UTC\")\n end = pendulum.datetime(2020, 10, 26, tz=\"UTC\")\n write_top_trump_biden_url(start, end)\n\n start = pendulum.datetime(2020, 10, 12, tz=\"UTC\")\n end = pendulum.datetime(2020, 10, 19, tz=\"UTC\")\n write_top_trump_biden_url(start, end)","repo_name":"kayzhou/US_election","sub_path":"analyze_url.py","file_name":"analyze_url.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27079715564","text":"# инициализация экрана, настройка частоты обновления, подключение библиотек\nimport numpy\nimport pygame.font\nfrom pygame import K_1\nfrom pygame import K_2\nfrom pygame import K_3\nfrom pygame import K_4\nfrom pygame import K_0\nfrom pygame import K_SPACE\nfrom logging import getLogger\n\nimport calculation\nfrom drawing import draw_resist, draw_blackbox, draw_battery, draw_description, draw_voltmeter, update_nodes, \\\n draw_conductor\nfrom utils import order, is_mouse_in_grid\nfrom add_item import add_item\n\nlog = getLogger(\"main\")\n\npygame.init()\npygame.font.init()\n# использумые шрифты\nmy_font = pygame.font.SysFont('calibri', 30)\nsecond_font = pygame.font.SysFont('calibri', 40)\n\nFPS = 25\nscreen = pygame.display.set_mode((1000, 600))\n\n# объявление и задание начальных условий для переменных состояний, напряжение нулевое, выводы вольтметра не подключены\nvolts = 0\nexitA = -1\nexitB = -1\n# Отрисовка узлов схемы, экрана вольтметра, таблицы подсказок\nupdate_nodes(screen)\ndraw_voltmeter(screen)\ndraw_description(screen)\n# Создание матрицы смежностей и заполнение начальными значениями\nadjacency_matrix = numpy.zeros((25, 25))\n\nclock = pygame.time.Clock()\nclock.tick(FPS)\nfinished = False\n\n# Основной цикл\nwhile not finished:\n # Обновление значения показаний вольтметра\n draw_voltmeter(screen, volts)\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n # После нажатия на кнопку мыши происходит подключение вольтметра к схеме и выделение подключеннх узлов\n # Определяем координаты ближайшего узла сетки\n x1 = round((pygame.mouse.get_pos()[0] - 600) / 90)\n y1 = round((pygame.mouse.get_pos()[1] - 120) / 90)\n if exitA == -1:\n exitA = order(x1, y1) + 1\n log.debug(exitA)\n pygame.draw.circle(screen, (255, 0, 0), (600 + x1 * 90, 90 * y1 + 120), 7, 0)\n surf = my_font.render(\"A\", True, (255, 0, 0))\n screen.blit(surf, (600 + x1 * 90 - 20, 90 * y1 + 120 + 3))\n elif exitB == -1:\n exitB = order(x1, y1) + 1\n log.debug(exitB)\n pygame.draw.circle(screen, (255, 0, 0), (600 + x1 * 90, 90 * y1 + 120), 7, 0)\n surf = my_font.render(\"B\", True, (255, 0, 0))\n screen.blit(surf, (600 + x1 * 90 - 20, 90 * y1 + 120 + 3))\n\n if event.type == pygame.KEYDOWN:\n if pygame.key.get_pressed()[K_SPACE]:\n # Запускается расчет схемы\n # Обновляется значение напряжения на то, что между узлами, к которым подключен вольтметр\n grid = calculation.Grid(adjacency_matrix)\n # Grid - наследник nx.Graph с функцией расчета схем\n volts = round(grid.get_voltage(exitA, exitB), 2)\n if pygame.key.get_pressed()[K_0]:\n # Сброс значений переменных состояний схемы и обновление правого экрана\n update_nodes(screen)\n volts = 0\n exitA = -1\n exitB = -1\n for i in range(25):\n for j in range(25):\n adjacency_matrix[i, j] = 0\n\n if is_mouse_in_grid(pygame.mouse.get_pos()):\n # Если курсор наведен на схему и нажата клавиша из списка команд, отрисовывается соответств. элемент\n # Первичный вид координат нажатия (не целый)\n x = (pygame.mouse.get_pos()[0] - 600) / 90\n y = (pygame.mouse.get_pos()[1] - 120) / 90\n mouse_pos = x, y\n if pygame.key.get_pressed()[K_1]:\n add_item(draw_conductor, 1, mouse_pos, screen, adjacency_matrix)\n if pygame.key.get_pressed()[K_2]:\n add_item(draw_resist, 2, mouse_pos, screen, adjacency_matrix)\n if pygame.key.get_pressed()[K_3]:\n add_item(draw_battery, 3, mouse_pos, screen, adjacency_matrix)\n if pygame.key.get_pressed()[K_4]:\n add_item(draw_blackbox, 4, mouse_pos, screen, adjacency_matrix)\n if event.type == pygame.QUIT:\n finished = True\n print(adjacency_matrix)\n\n pygame.display.update()\n\npygame.quit()\n","repo_name":"ilgridnev/infaproj","sub_path":"maincode.py","file_name":"maincode.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39327274875","text":"#!/usr/bin/env python3\n\nfrom path import Path # type: ignore\n\nif __name__ == \"__main__\":\n temp = Path(\".\") / \"temp\"\n if not temp.isdir():\n temp.mkdir()\n f = temp / \"test.txt\"\n if not f.isfile():\n f.touch()\n f.write_text(\"pypy\")\n for l in f.lines():\n print(l)\n","repo_name":"42cursus-youkim/Python-Piscine-Django","sub_path":"Day03/ex01/my_program.py","file_name":"my_program.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26001862156","text":"import random \nclass Coin:\n\tdef __init__(self, rare=False, clean=True, heads=True,**kargs):\n\t\tfor key,value in kargs.items():\n\t\t\tsetattr(self,key,value)\n\t\t\n\t\tself.is_rare=rare\n\t\tself.is_clean=True\n\t\tself.is_heads=heads\n\t\tif self.is_rare:\n\t\t\tself.value=self.original_value*1.25\n\t\telse:\n\t\t\tself.value=self.original_value\n\t\t\n\t\tif self.is_clean:\n\t\t\tself.color=self.clean_color\n\t\telse:\n\t\t\tself.color=self.rusty_color\n\n\tdef clean(self):\n\t\tself.color=self.clean_color\n\n\tdef rusty(self):\n\t\tself.color=self.rusty_color\n\n\n\tdef flip(self):\n\t\thead_options=[True,False]\n\t\tchoice=random.choice(head_options)\n\t\tself.heads=choice\n\n\tdef __del__(self):\n\t\tprint(\"Coin Spent!\")\n\n\nclass Pound(Coin):\n\n\tdef __init__(self):\n\t\tdata={\n\t\t\"original_value\":1.00,\n\t\t\"clean_color\":\"gold\",\n\t\t\"rusty_color\":\"greenish\",\n\t\t\"dia\":22.5,\n\t\t\"thickness\":3.5,\n\t\t\"num_edges\":1,\n\t\t\"mass\":9.5}\n\t\tsuper().__init__(**data)\n\t\ncoin1=Pound()\nprint(coin1.color)\nprint(coin1.is_rare)\nprint(type(coin1))\nprint(coin1.value)\ncoin2=Pound()\nprint(coin2.value)\nprint(coin2.is_rare)\n","repo_name":"garima0106/Python-basics","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29999159053","text":"# r10142008 周昕妤 \n# stream cloud link: https://amy011872-nlp-web-amy-assignment-bonus-1-z6ldel.streamlitapp.com/\n\nimport streamlit as st\nfrom textblob import TextBlob\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom snownlp import SnowNLP\nfrom datetime import datetime\n\n\ndef vader_anaylze(rawText):\n anaylzer = SentimentIntensityAnalyzer()\n compound = anaylzer.polarity_scores(rawText)['compound']\n pos_list, neg_list, neu_list = [], [], []\n for i in rawText.split():\n senti = anaylzer.polarity_scores(i)['compound']\n if senti > 0:\n pos_list.append((i, senti))\n elif senti < 0:\n neg_list.append((i, senti))\n else:\n neu_list.append((i, senti))\n\n return pos_list, neg_list, neu_list, compound\n\ndef snow_analyze(rawText):\n res = SnowNLP(rawText)\n senti_score = res.sentiments\n\n return senti_score\n\nst.markdown('# Welcome to my sentiment anaylsis app!')\nst.caption('Streamlit project r10142008 周昕妤')\nmenu = ['English version', 'Chinese version']\nchoice = st.sidebar.selectbox(\"Language\", menu)\n\nif choice == 'English version':\n col1, col2 = st.columns(2)\n\n with col1.form(key='snetiForm'):\n raw_text = st.text_area('Please enter a sentence:')\n btn = st.form_submit_button(label='Analyze')\n\n if btn:\n with col2:\n st.success(\"Finish analyzing!\")\n st.write(datetime.now())\n st.markdown('### Results from different models:')\n\n polarity = TextBlob(raw_text).sentiment.polarity\n if polarity > 0:\n st.markdown('Textblob: Positive 🥰')\n st.balloons()\n elif polarity < 0:\n st.markdown('Textblob: Negative 😭')\n st.snow()\n else:\n st.markdown('Textblob: Neutral 😶') \n\n pos_list, neg_list, neu_list, compound = vader_anaylze(raw_text)\n if compound > 0:\n st.markdown('vaderSentiment: Positive 🥰')\n st.balloons()\n elif compound < 0:\n st.markdown('vaderSentiment: Negative 😭')\n st.snow()\n else:\n st.markdown('vaderSentiment: Neutral 😶') \n\n \n if len(raw_text) != 0:\n with st.expander('Click here to have more details of the analysis.'):\n st.markdown('#### Results from textblob:')\n polarity = TextBlob(raw_text).sentiment.polarity\n subjectivity = TextBlob(raw_text).sentiment.subjectivity\n st.write('Polarity:', polarity)\n st.write('Subjectivity:', subjectivity)\n if polarity > 0:\n st.markdown('Sentiment: Positive 🥰')\n elif polarity < 0:\n st.markdown('Sentiment: Negative 😭')\n else:\n st.markdown('Sentiment: Neutral 😶') \n \n st.markdown('#### Results from vaderSentiment:')\n pos_list, neg_list, neu_list, compound = vader_anaylze(raw_text)\n if compound > 0:\n st.markdown('Sentiment: Positive 🥰')\n elif compound < 0:\n st.markdown('Sentiment: Negative 😭')\n else:\n st.markdown('Sentiment: Neutral 😶') \n pos_col, neu_col, neg_col = st.columns(3)\n pos_col.write('Positive tokens:')\n for pos in pos_list:\n pos_col.write(pos)\n neu_col.write('Neutral tokens:')\n for neu in neu_list:\n neu_col.write(neu)\n neg_col.write('Negative tokens:')\n for neg in neg_list:\n neg_col.write(neg)\n\nif choice == 'Chinese version':\n\n st.markdown('#### 歡迎使用中文版!')\n \n col1, col2 = st.columns(2)\n\n with col1.form(key='snetiForm'):\n raw_text = st.text_area('請輸入一個句子')\n btn = st.form_submit_button(label='分析')\n\n if btn:\n with col2:\n st.success(\"Finish analyzing!\")\n st.write(datetime.now())\n senti_score = snow_analyze(raw_text)\n st.write('分數:', senti_score)\n if senti_score > 0.5:\n st.write('Sentiment: 正向!🥰')\n st.balloons()\n elif senti_score < 0.5:\n st.write('Sentiment: 負向 😭😭😭')\n st.snow()\n else:\n st.write('Sentiment: 中性 😶😶')\n ","repo_name":"amy011872/nlp_web_amy","sub_path":"bonus/assignment-bonus-1.py","file_name":"assignment-bonus-1.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2990033818","text":"import json\n \n# NOTE: This script is used to parse the JSON file from the ESPN API. It is not\n# to be used without care, and is untested. It only serves as a simple tool to make\n# data cleaning easier\n \n# Function to parse JSON file and make a new JSON file\n# To clean output, use find+replace with following commands:\n# 1. \"name\" to name\n# 2. \"iconUrl\" to iconUrl\n# 3. \"sport\": \"Sport.XXX\" to sport: Sport.XXX\n\n# Takes the file paths as arguments\ndef make_json(inputFilePath, outputFilePath):\n \n fullData = []\n \n with open(inputFilePath, 'r', encoding='utf-8') as f:\n my_data = json.load(f)\n\n teams_list = my_data['sports'][0]['leagues'][0]\n\n\n for i in teams_list['teams']:\n displayName = i['team']['displayName']\n icon = i['team']['logos'][2]['href']\n newTeam = {\"name\": displayName, \"iconUrl\": icon, \"sport\": \"Sport.MLB\"}\n fullData.append(newTeam)\n\n f.close()\n\n with open(outpuFilePath, 'w', encoding='utf-8') as jsonf:\n jsonf.write(json.dumps(fullData, indent=4))\n\n \n# Driver Code\n \n# Decide the two file paths according to your\n# computer system\ninputFilePath = r'frontend/tools/io/input.json'\noutpuFilePath = r'frontend/tools/io/output.txt'\n \n# Call the make_json function\nmake_json(inputFilePath, outpuFilePath)\n\n\n\n","repo_name":"wilkyrlx/on-deck","sub_path":"frontend/tools/teamJsonGenerator.py","file_name":"teamJsonGenerator.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3961368747","text":"import requests\r\nimport json\r\nimport hashlib\r\nse = requests.session()\r\n\r\ndef api(name, args):\r\n res = ''\r\n if name == '历史上的今天':\r\n # api doc https://www.free-api.com/doc/533\r\n url = 'https://api.oick.cn/lishi/api.php'\r\n r = se.get(url)\r\n dict = r.json()\r\n res += '今天是' + dict['day']\r\n for item in dict['result']:\r\n res += '\\n'\r\n res += item['date']\r\n res += ' '\r\n res += item['title']\r\n elif name == '天气':\r\n # api doc https://www.free-api.com/doc/518\r\n url = 'http://aider.meizu.com/app/weather/listWeather'\r\n cityName = args[0]\r\n if cityName == '':\r\n res = '未获取到城市名,请以例如\"天气 上海\" 格式回复'\r\n else:\r\n with open('cityId.json','r',encoding='utf-8') as fp:\r\n json_data = json.load(fp)\r\n cityIdCheck = [item for item in json_data if item['countyname'] == cityName]\r\n if len(cityIdCheck) == 0:\r\n res = '未查询到城市'\r\n else:\r\n cityId = [item for item in json_data if item['countyname'] == cityName][0]['areaid']\r\n r = se.get(url,params={'cityIds' : cityId})\r\n data = r.json()\r\n value = data['value'][0]\r\n if data['code'] == '200':\r\n res ='今日'+ cityName +'天气:' + value['weathers'][0]['weather'] + '\\n'\r\n res += '日间温度:' + value['weathers'][0]['temp_day_c'] + '℃ \\n' + '夜间温度:' + value['weathers'][0]['temp_night_c'] + '℃ \\n'\r\n res += '实时温度:' + value['realtime']['temp'] + '\\n风力:' + value['realtime']['wD'] + value['realtime']['wS'] + '\\n'\r\n for item in value['indexes']:\r\n res += '\\n' + item['name'] + ':' + item['content']\r\n res += '\\n更新时间:' + value['weatherDetailsInfo']['publishTime']\r\n else:\r\n res = '网络错误,查询失败'\r\n elif name == '翻译':\r\n # 百度翻译免费接口,appid secret需自行申请\r\n url = 'https://fanyi-api.baidu.com/api/trans/vip/translate'\r\n appid = ''\r\n sec = ''\r\n salt = '118'\r\n q = args[0]\r\n s = appid + q + salt + sec\r\n md5 = hashlib.md5(bytes(s, encoding='utf8')).hexdigest()\r\n r = se.get(url, params={'q':q,'from':'auto','to':'en','appid':appid,'salt':salt,'sign':md5})\r\n data = r.json()\r\n res = data['trans_result'][0]['dst']\r\n # 自动回复\r\n else:\r\n resp = requests.get(\"http://api.qingyunke.com/api.php\", {'key': 'free', 'appid': 0, 'msg': name})\r\n resp.encoding = 'utf8'\r\n resp = resp.json()\r\n res = resp['content']\r\n \r\n return res\r\n","repo_name":"Rubick-Svito/wechatAuto","sub_path":"myapi.py","file_name":"myapi.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"38811262868","text":"#! /usr/bin/python\n\n\"\"\"This Module Contains Msg Base Class\n This is an abstract class whose\n methods will be overridden by\n derived classes customized to\n different msg formats\n This module contains the following functions and classes\n 1) _get_line_separator - default line separator\n 2) get_line_separator - reutrns the configured line separator\n 3) _get_request_line - default request line\n 4) get_request_line - reutrns the configured request line\n 5) _get_response_line - default response line\n 6) get_response_line - reutrns the configured response line\n 7) _wrap_func - internal function to customize above functions\n 8) init - inits the msg module(customizes the\n request_line, response_line, lineseparator).\n install the message validation handler\n 9) splitmsg - splits the message into parts (tuple) using\n the linseparator as the delimiter\n 10) Msg - Base class of all message formats\n 11) MsgReq - Base class of all message request formats\n 12) MsgResp - Base class of all message response formats\n\"\"\"\n\nfrom msgtype import MsgType\nfrom msgfac import register\nfrom com.goffersoft.utils.uid import getuid\n\n\ndef _get_line_separator():\n \"\"\" default line separator\"\"\"\n return '\\r\\n'\n\n\ndef _get_request_line():\n \"\"\" default request line\"\"\"\n return 'Generic Request Message : Version 1.0'\n\n\ndef _get_response_line():\n \"\"\" default response line\"\"\"\n return 'Generic Response Message : Version 1.0'\n\n\ndef get_line_separator():\n \"\"\"line separator\"\"\"\n return _get_line_separator()\n\n\ndef get_request_line():\n \"\"\"request line\"\"\"\n return _get_request_line()\n\n\ndef get_response_line():\n \"\"\"response line\"\"\"\n return _get_response_line()\n\n\ndef _wrap_func(str):\n \"\"\"wraps 'str' and returns new fucntion\"\"\"\n def func():\n return str\n return func\n\n\ndef init(validate_handler,\n request_line=None,\n response_line=None,\n line_separator=None):\n \"\"\"inits the msg module. customizes the\n 1) request line\n 2) response line\n 3) line separator\n 4) registers the message validation\n handler with the msgfac module using the\n register function\"\"\"\n global get_request_line\n global get_response_line\n global get_line_separator\n\n if(request_line is None):\n get_request_line = _get_request_line\n else:\n get_request_line = _wrap_func(request_line)\n get_request_line.__name__ = 'get_request_line'\n\n if(response_line is None):\n get_response_line = _get_response_line\n else:\n get_response_line = _wrap_func(response_line)\n get_response_line.__name__ = 'get_response_line'\n\n if(line_separator is None):\n get_line_separator = _get_line_separator\n else:\n get_line_separator = _wrap_func(line_separator)\n get_line_separator.__name__ = 'get_line_separator'\n\n register(get_request_line(),\n get_response_line(),\n validate_handler)\n\n\ndef splitmsg(msg,\n lineseparator=None):\n \"\"\"helper function to split a message to parts\n (tuple) using the line separator as the\n delimiter\"\"\"\n if msg is None:\n return ''\n\n if lineseparator is None:\n lineseparator = get_line_separator()\n\n return msg.split(lineseparator)\n\n\nclass Msg(object):\n \"\"\" The Base Class For all\n Message Formats\n Assummption :\n Messages are broken into lines\n lines are delineated by lineseparator\n First Line is a request or a response line\n the rest of the message is typically group\n into headers and a body but need not be. The\n actual format is determined by the derviced\n classes.\n \"\"\"\n def __init__(self,\n msg=None,\n msgparts=None,\n type=None,\n lineseparator=None,\n hdrs_dict=None,\n body=None,\n reqline=None,\n reqid=None,\n respcode=None,\n reason=None,\n respline=None,\n respid=None):\n \"\"\" Initializes an instance of the base class\n arguments -:\n 1) msg -> message as a string\n 2) msgparts -> msgparts as a tuple\n (parts separated by lineseparator)\n 3) type -> msgtype.MsgType (REQ or RESP)\n 4) lineseparator -> string to break message into lines\n 5) hdrs_dict -> list of hdrs as a dictionary\n 6) body -> message body as a string\n 7) reqline -> requent line (MsgType.REQ only) as a sring\n 8) reqid -> as a uuid (typically as a uuid.uuid4()\n but can be anything)\n 9) respcode -> response code as a string (MsgType.RESP only)\n 10) reason -> descriptive text indicating reason\n for success/failure\n 11) respline -> response line (MsgType.RESP only) as a sring\n 12) respid -> as a uuid (typically as a uuid.uuid4()\n but can be anything --> MsgType.RESP only)\n \"\"\"\n self.__reset(lineseparator=lineseparator,\n msg=msg,\n msgparts=msgparts,\n type=type,\n respcode=respcode,\n hdrs_dict=hdrs_dict,\n body=body,\n reqline=reqline,\n respline=respline,\n reqid=reqid,\n respid=respid,\n reason=reason)\n\n def __update_msg(self):\n \"\"\" internal funtion to update 'msg' \"\"\"\n if(self.__linesep is not None and\n self.__msg is None and\n self.__msgparts is not None):\n self.__msg = ''\n for m in self.__msgparts:\n self.__msg += str(m) + self.__linesep\n return self.__msg\n\n def __update_msgparts(self):\n \"\"\" internal funtion to update 'msgparts' \"\"\"\n if(self.__linesep is not None and\n self.__msgparts is None and\n self.__msg is not None):\n self.__msgparts = self.__msg.split(self.__linesep)\n return self.__msgparts\n\n def __reset(self,\n msg=None,\n msgparts=None,\n type=None,\n lineseparator=None,\n hdrs_dict=None,\n body=None,\n reqline=None,\n reqid=None,\n respcode=None,\n reason=None,\n respline=None,\n respid=None):\n \"\"\" internal funtion to reset the object to its initial state \"\"\"\n self.__linesep = lineseparator\n self.__msg = msg\n self.__msgparts = msgparts\n self.__type = type\n self.__respcode = respcode\n self.__hdrs = hdrs_dict\n self.__body = body\n self.__reqline = reqline\n self.__respline = respline\n self.__reqid = reqid\n self.__respid = respid\n self.__reason = reason\n\n if(hdrs_dict is None):\n self.__hdrs = {}\n\n if(lineseparator is None):\n self.__linesep = get_line_separator()\n\n self.update_msg()\n self.update_msgparts()\n\n def reset(self,\n msg=None,\n msgparts=None,\n type=None,\n lineseparator=None,\n hdrs_dict=None,\n body=None,\n reqline=None,\n reqid=None,\n respcode=None,\n reason=None,\n respline=None,\n respid=None):\n \"\"\" resets the object to its initial state \"\"\"\n self.__reset(lineseparator=lineseparator,\n msg=msg,\n msgparts=msgparts,\n type=type,\n respcode=respcode,\n hdrs_dict=hdrs_dict,\n body=body,\n reqline=reqline,\n respline=respline,\n reqid=reqid,\n respid=respid)\n\n def update_msg(self):\n \"\"\"---> Maybe override in derived class <---\"\"\"\n return self.__update_msg()\n\n def update_msgparts(self):\n \"\"\" ---> Must override in derived class <---\"\"\"\n return self.__update_msgparts()\n\n @property\n def lineseparator(self):\n \"\"\" lineseparator getter method \"\"\"\n return self.__linesep\n\n @lineseparator.setter\n def lineseparator(self, linesep):\n \"\"\" lineseparator setter method \"\"\"\n if(self.__linesep != linesep):\n self.__linesep = linesep\n self.__msg = None\n\n @property\n def msg(self):\n \"\"\" msg getter method \"\"\"\n if(self.__msg is None):\n if(self.__msgparts is None):\n self.__msgparts = self.update_msgparts()\n self.__msg = self.update_msg()\n return self.__msg\n\n @msg.setter\n def msg(self, msg):\n \"\"\" msg setter method \"\"\"\n if(msg != self.__msg):\n self.__msg = msg\n self.__msgparts = None\n self.update_msgparts()\n\n @property\n def msgparts(self):\n \"\"\" msgparts getter method \"\"\"\n if(self.__msgparts is None):\n self.__msgparts = self.update_msgparts()\n return self.__msgparts\n\n @msgparts.setter\n def msgparts(self, msgparts):\n \"\"\" msgparts setter method \"\"\"\n if(msgparts != self.__msgparts):\n self.__msgparts = msgparts\n self.__msg = None\n self.update_msg()\n\n @property\n def type(self):\n \"\"\" msgtype getter method \"\"\"\n return self.__type\n\n @type.setter\n def type(self, type):\n \"\"\" msgtype setter method \"\"\"\n if(isinstance(type, MsgType) is False):\n raise(TypeError,\n 'Expected argument to be of type MsgType')\n\n if(self.__type != type):\n self.msg = None\n self.msgparts = None\n self.__type = type\n\n @property\n def respcode(self):\n \"\"\" response code getter method \"\"\"\n return self.__respcode\n\n @respcode.setter\n def respcode(self, respcode):\n \"\"\" response code setter method \"\"\"\n if((respcode is not None) and\n (self.__respcode != respcode)):\n self.msg = None\n self.msgparts = None\n self.__respcode = respcode\n\n @property\n def reason(self):\n \"\"\" reason code getter method \"\"\"\n return self.__reason\n\n @reason.setter\n def reason(self, reason):\n \"\"\" reason code setter method \"\"\"\n if((reason is not None) and\n (self.__reason != reason)):\n self.msg = None\n self.msgparts = None\n self.__reason = reason\n\n @property\n def hdrs(self):\n \"\"\" hdrs getter method \"\"\"\n return self.__hdrs\n\n def addhdr(self, key, value):\n \"\"\" add a hdr (key, value) to hdrs\"\"\"\n if(key is not None):\n if(self.__hdrs is None):\n self.__hdrs = {}\n self.__hdrs[key] = value\n self.msg = None\n self.msgparts = None\n\n def delhdr(self, key):\n \"\"\" delete a hdr (key) from hdrs\"\"\"\n if((key is not None) and\n (key in self.__hdrs)):\n del self.__hdrs[key]\n self.msg = None\n self.msgparts = None\n\n @hdrs.setter\n def hdrs(self, hdrs):\n \"\"\" hdrs setter method \"\"\"\n if((hdrs is not None) and\n (self.__hdrs != hdrs)):\n self.msg = None\n self.msgparts = None\n self.__hdrs = hdrs\n\n @property\n def body(self):\n \"\"\" body getter method \"\"\"\n return self.__body\n\n @body.setter\n def body(self, body):\n \"\"\" body setter method \"\"\"\n if((body is not None) and\n (self.__body != body)):\n self.msg = None\n self.msgparts = None\n self.__body = body\n\n @property\n def reqline(self):\n \"\"\" reqline getter method \"\"\"\n return self.__reqline\n\n @reqline.setter\n def reqline(self, reqline):\n \"\"\" reqline setter method \"\"\"\n if((reqline is not None) and\n (self.__reqline != reqline)):\n self.msg = None\n self.msgparts = None\n self.__reqline = reqline\n\n @property\n def respline(self):\n \"\"\" response line getter method \"\"\"\n return self.__respline\n\n @respline.setter\n def respline(self, respline):\n \"\"\" response line setter method \"\"\"\n if((respline is not None) and\n (self.__respline != respline)):\n self.msg = None\n self.msgparts = None\n self.__respline = respline\n\n @property\n def reqid(self):\n \"\"\" request id getter method \"\"\"\n return self.__reqid\n\n @reqid.setter\n def reqid(self, id):\n \"\"\" request id setter method \"\"\"\n if(id is None):\n id = str(getuid())\n\n if(self.__reqid != id):\n self.msg = None\n self.msgparts = None\n self.__reqid = id\n\n @property\n def respid(self):\n \"\"\" response id getter method \"\"\"\n return self.__respid\n\n @respid.setter\n def respid(self, id):\n \"\"\" response id setter method \"\"\"\n if(id is None):\n id = str(getuid())\n\n if(self.__respid != id):\n self.msg = None\n self.msgparts = None\n self.__respid = id\n\n def tostr(self):\n \"\"\" return msg as a string \"\"\"\n return self.msg\n\n def tobytes(self):\n \"\"\" return msg as bytes \"\"\"\n if(self.msg is not None):\n return self.msg.encode()\n return None\n\n def __repr__(self):\n return self.tostr()\n\n def debug(self):\n \"\"\" return instance data as a string \"\"\"\n return ('type=%s\\nmsg=%r\\nmsgparts=%s\\n'\n 'hdrs=%s\\nbody=%r\\n'\n 'lineseparator=%r\\n'\n 'reqline=%s\\n'\n 'reqid=%s\\n'\n 'respcode=%s\\n'\n 'reason=%s\\n'\n 'respline=%s\\n'\n 'respid=%s\\n'\n % (self.type,\n self.msg,\n self.msgparts,\n self.hdrs,\n self.body,\n self.lineseparator,\n self.reqline,\n self.reqid,\n self.respcode,\n self.reason,\n self.respline,\n self.respid))\n\n\nclass MsgReq(Msg):\n \"\"\" The Base Class For all\n Message Request Formats\n \"\"\"\n def __init__(self,\n msg=None,\n msgparts=None,\n lineseparator='\\r\\n',\n hdrs_dict=None,\n body=None,\n reqline=None,\n reqid=None):\n \"\"\" inits the object - see base class for more comments\"\"\"\n reqid, reqline =\\\n self.__reset(msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n reqline=reqline,\n reqid=reqid)\n\n super(MsgReq, self).__init__(type=MsgType.REQ,\n msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n reqline=reqline,\n reqid=reqid)\n\n def __reset(self,\n msg=None,\n msgparts=None,\n lineseparator='\\r\\n',\n hdrs_dict=None,\n body=None,\n reqline=None,\n reqid=None):\n \"\"\" internal function to reset the object to its initial state\"\"\"\n if(reqid is None):\n reqid = str(getuid())\n\n if(reqline is None):\n reqline = get_request_line()\n\n return reqid, reqline\n\n def reset(self,\n msg=None,\n msgparts=None,\n lineseparator='\\r\\n',\n hdrs_dict=None,\n body=None,\n reqline=None,\n reqid=None):\n \"\"\" resets the object to its initial state\"\"\"\n reqid, reqline =\\\n self.__reset(msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n reqline=reqline,\n reqid=reqid)\n\n super(MsgReq, self).reset(type=MsgType.REQ,\n msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n reqline=reqline,\n reqid=reqid)\n\n def debug(self):\n \"\"\" return instance data as a string \"\"\"\n return ('type=%s\\nmsg=%r\\nmsgparts=%s\\n'\n 'hdrs=%s\\nbody=%r\\n'\n 'lineseparator=%r\\n'\n 'reqline=%r\\n'\n 'reqid=%s\\n'\n % (self.type,\n self.msg,\n self.msgparts,\n self.hdrs,\n self.body,\n self.lineseparator,\n self.reqline,\n self.reqid))\n\n\nclass MsgResp(Msg):\n \"\"\" The Base Class For all\n Message Response Formats\n \"\"\"\n def __init__(self,\n msg=None,\n msgparts=None,\n lineseparator='\\r\\n',\n respcode=None,\n reason=None,\n hdrs_dict=None,\n body=None,\n respline=None,\n respid=None):\n \"\"\" inits the object - see base class for more comments\"\"\"\n respid, respline = \\\n self.__reset(msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n respcode=respcode,\n reason=reason,\n respline=respline,\n respid=respid)\n\n super(MsgResp, self).__init__(type=MsgType.RESP,\n msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n respcode=respcode,\n reason=reason,\n respline=respline,\n respid=respid)\n\n def __reset(self,\n msg=None,\n msgparts=None,\n lineseparator=None,\n respcode=None,\n reason=None,\n hdrs_dict=None,\n body=None,\n respline=None,\n respid=None):\n \"\"\" internal function to reset the object to its initial state\"\"\"\n if(respid is None):\n respid = str(getuid())\n\n if(respline is None):\n respline = get_response_line()\n\n return respid, respline\n\n def reset(self,\n msg=None,\n msgparts=None,\n lineseparator=None,\n respcode=None,\n reason=None,\n hdrs_dict=None,\n body=None,\n respline=None,\n respid=None):\n \"\"\" resets the object to its initial state\"\"\"\n respid, respline = \\\n self.__reset(msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n respcode=respcode,\n reason=reason,\n respline=respline,\n respid=respid)\n\n super(MsgResp, self).reset(type=MsgType.RESP,\n msg=msg,\n msgparts=msgparts,\n lineseparator=lineseparator,\n hdrs_dict=hdrs_dict,\n body=body,\n respcode=respcode,\n reason=reason,\n respline=respline,\n respid=respid)\n\n def debug(self):\n \"\"\" return instance data as a string \"\"\"\n return ('type=%s\\nmsg=%r\\nmsgparts=%s\\n'\n 'hdrs=%s\\nbody=%r\\n'\n 'lineseparator=%r\\n'\n 'respcode=%s\\n'\n 'reason=%s\\n'\n 'respline=%r\\n'\n 'respid=%s\\n'\n % (self.type,\n self.msg,\n self.msgparts,\n self.hdrs,\n self.body,\n self.lineseparator,\n self.respcode,\n self.reason,\n self.respline,\n self.respid))\n\n\nif __name__ == '__main__':\n mreq = MsgReq(msg='Hello')\n mresp = MsgResp(msg='World')\n msg = Msg(msg='Hello World')\n\n print(mreq.debug())\n print(mresp.debug())\n print(msg.debug())\n print(mreq.debug())\n print(mresp.debug())\n print(msg.debug())\n\n mreq.reset()\n mresp.reset()\n msg.reset()\n","repo_name":"goffersoft/common-utils-python","sub_path":"experimental/com/goffersoft/msg/msg.py","file_name":"msg.py","file_ext":"py","file_size_in_byte":22006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8652327931","text":"import asyncio\nfrom typing import Generic, List, TypeVar\n\nfrom fastapi import HTTPException, Query\nfrom pydantic.generics import GenericModel\nfrom tortoise import QuerySet\nfrom tortoise.exceptions import FieldError\n\ndefault_offset = 0\nmax_offset = None\n\ndefault_limit = 10\nmax_limit = 1000\n\nDataT = TypeVar(\"DataT\")\n\n\nclass PaginationResult(GenericModel, Generic[DataT]):\n count: int\n results: List[DataT]\n\n\nclass Pagination:\n def __init__(\n self,\n limit: int = Query(default=default_limit, ge=1, le=max_limit),\n offset: int = Query(default=default_offset, ge=0, le=max_offset),\n order_by: List[str] = Query(None),\n ):\n self.limit = limit\n self.offset = offset\n self.order_by = order_by\n\n async def paginate(self, qs: QuerySet):\n if self.order_by:\n try:\n qs = qs.order_by(*self.order_by)\n except FieldError as e:\n raise HTTPException(status_code=400, detail=str(e))\n count, results = await asyncio.gather(qs.count(), qs.limit(self.limit).offset(self.offset))\n\n return PaginationResult(count=count, results=results)\n","repo_name":"neo-f/galaxy","sub_path":"app/utils/paginator.py","file_name":"paginator.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24855381531","text":"from flask import Flask, redirect, url_for, render_template, request\r\n\r\natt = ['acousticness', 'danceability', 'energy', 'explicit',\r\n 'instrumentalness', 'key', 'liveness', 'loudness', 'mode',\r\n 'speechiness', 'tempo', 'valence', 'year']\r\n\r\ndict = {}\r\nuserAtt = []\r\ngraph = \"\"\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\", methods=[\"POST\",\"GET\"])\r\ndef home():\r\n if request.method == \"POST\":\r\n for item in att:\r\n dict[item] = request.form[item]\r\n\r\n file = open('output.txt', 'w')\r\n for item in dict.keys():\r\n file.write(item + \":\" + dict[item] + \"\\n\")\r\n userAtt.append(dict[item])\r\n file.close()\r\n \r\n graph = request.form[\"graphChoice\"]\r\n\r\n print(graph)\r\n print(dict)\r\n print(userAtt)\r\n return render_template(\"index.html\", data = dict)\r\n else:\r\n return render_template(\"index.html\")\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"Western-AI/spotify","sub_path":"FinalModel/app_backend.py","file_name":"app_backend.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72847075133","text":"from django.shortcuts import render, redirect\nfrom url.forms import URL_Form\nfrom url.models import URL\n\n# Create your views here.\ndef index(request):\n context = {}\n if request.method == 'POST':\n form = URL_Form(request.POST)\n if form.is_valid():\n new_url = form.save()\n request.session['new_object_id'] = new_url.id\n return redirect('index')\n if 'new_object_id' in request.session:\n session_object = URL.objects.get(pk=request.session['new_object_id'])\n context.update({'session_object': session_object})\n form = URL_Form\n context.update({'form': form})\n return render(request, 'url/index.html', context)\n\ndef page_redirect(request, number):\n redirect_url = URL.objects.get(pk=number).user_url\n return redirect(redirect_url)","repo_name":"blarmon/url_shortener","sub_path":"url/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25676017145","text":"'''\napi doesnt support calling a list of symbols because filters use implicit logical AND\nalso cant just request all the data and then post-process because db server runs out of memory\nso instead call one symbol at a time; yawn\n'''\nfrom constants import TSDB, hosts, URL_KWARGS, PARAMS_KWARGS, DEV, path, PROD\nfrom mosaic_api_templates import api_config_dict\nfrom mosaic_wapi import build_url\nimport os\nimport requests\nimport pandas as pd\n\nSOURCE_KEY = 'sourcekey'\nFILTERS = 'filters'\nSINGLE_QUOTE = \"'\"\nEQUALS_SIGN = '='\n\neia_weekly_dict = {\n URL_KWARGS:\n {'stage': 'raw',\n 'source': 'eia_weekly'},\n PARAMS_KWARGS:\n {FILTERS: {SOURCE_KEY:\n ['WCRSTUS1',\n 'WGTSTUS1',\n 'W_EPOOXE_SAE_NUS_MBBL',\n 'WKJSTUS1',\n 'WDISTUS1',\n 'WRESTUS1',\n 'WPRSTUS1',\n 'W_EPPO6_SAE_NUS_MBBL'\n ]}}\n}\n\n\ndef build_kwargs_for_url():\n url_kwargs = eia_weekly_dict[URL_KWARGS]\n url_kwargs['host'] = host\n url_kwargs['api_name'] = api_name\n return url_kwargs\n\n\ndef build_params_list():\n source_keys = eia_weekly_dict[PARAMS_KWARGS][FILTERS][SOURCE_KEY]\n source_keys_as_filter = [f\"{SOURCE_KEY}='{source_key}'\" for source_key in source_keys]\n return [{FILTERS: source_key} for source_key in source_keys_as_filter]\n\n\ndef call_api_for_many_and_build_df(url, params_list):\n df = pd.DataFrame()\n for params in params_list:\n response = requests.get(url=url, params=params)\n new_df = pd.DataFrame(response.json())\n df = pd.concat([df, new_df], axis='rows')\n return df\n\n\nif __name__ == '__main__':\n env = DEV\n api_name = TSDB\n host_name = TSDB\n host = hosts[host_name][env]\n\n # build url\n kwargs = build_kwargs_for_url()\n template_url = api_config_dict[api_name]['url_template']\n url = build_url(template_url, kwargs)\n\n # build params\n params_list = build_params_list()\n\n # build df for list of symbols\n df = call_api_for_many_and_build_df(url=url, params_list=params_list)\n\n # save as xls\n xlsx_for_fundamental_timeseries = 'fundamental_timeseries.xlsx'\n pathfile = os.path.join(path, xlsx_for_fundamental_timeseries)\n with pd.ExcelWriter(pathfile) as writer:\n df.to_excel(writer, sheet_name='timeseries')\n\n pivot_df = df.pivot(index='date', columns=SOURCE_KEY, values='value')\n print(pivot_df)\n","repo_name":"pnorton-hartreepartners/sandbox","sub_path":"fundamental_timeseries.py","file_name":"fundamental_timeseries.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23834582955","text":"class Solution(object):\n def isPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n #Regex solution\n s = re.sub(r'[^a-zA-Z0-9]', '', s)\n s = s.lower()\n print(s)\n i = 0\n j = len(s)-1\n while (not(i > j)):\n if (s[i] != s[j]):\n return False\n i += 1\n j -= 1\n return True\n","repo_name":"matchasaur/NeetCode-150","sub_path":"Easy: Valid Palindrome.py","file_name":"Easy: Valid Palindrome.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35694532031","text":"def stock_availability(input_list, *args):\n inv_list = list(input_list)\n command = args[0]\n number = 0\n\n if command == \"delivery\":\n for i in range(1, len(args)):\n inv_list.append(args[i])\n\n elif command == \"sell\":\n for i in range(1, len(args)):\n if args[i].isdigit():\n number = int(args[i])\n\n for i in range(number):\n inv_list.pop(0)\n\n\n","repo_name":"printfabric/SoftUni","sub_path":"python/02. python_advanced/06_exam_prep/14_February_2021/03_problem.py","file_name":"03_problem.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38811948818","text":"import json\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom argparse import ArgumentParser\n\nfrom tqdm import tqdm\n\nfrom gofigr import GoFigr\nfrom gofigr.jupyter import from_config_or_env\n\n\n@from_config_or_env(\"GF_\", os.path.join(os.environ['HOME'], '.gofigr'))\ndef get_gf(test_user, test_password):\n return GoFigr(username=test_user, password=test_password, url=\"https://api-dev.gofigr.io\")\n\n\ndef clean_up():\n gf = get_gf()\n\n ana = gf.primary_workspace.get_analysis(\"Integration tests\", create=True).fetch()\n print(\"Cleaning up....\")\n for fig in tqdm(ana.figures):\n fig.fetch()\n for rev in fig.revisions:\n rev.delete(delete=True)\n\n fig.delete(delete=True)\n\n\ndef main():\n parser = ArgumentParser(description=\"Runs integration tests based on a config file\")\n parser.add_argument(\"config\", help=\"config file (JSON)\")\n parser.add_argument(\"output\", help=\"output directory\")\n parser.add_argument(\"--force\", action=\"store_true\", help=\"Force re-run even if directory already exists\")\n args = parser.parse_args()\n\n run_one = os.path.join(os.path.dirname(sys.argv[0]), \"run_one.sh\")\n clean_up()\n\n with open(args.config, \"r\") as f:\n all_configurations = json.load(f)\n\n if isinstance(all_configurations, dict):\n all_configurations = [all_configurations]\n\n for idx, config in enumerate(tqdm(all_configurations)):\n out_dir = os.path.join(args.output, config[\"name\"])\n\n print(f\"Running configuration {idx + 1}/{len(all_configurations)}: \")\n print(f\" * Python: {config['python']}\")\n print(f\" * Dependencies: {config['dependencies']}\")\n print(f\" * Directory: {out_dir}\")\n\n if os.path.exists(out_dir):\n if args.force:\n shutil.rmtree(out_dir)\n else:\n print(\" => Path exists. Skipping\\n\")\n continue\n\n os.makedirs(out_dir, exist_ok=True)\n\n with open(os.path.join(out_dir, \"config.json\"), 'w') as f:\n json.dump(config, f)\n\n with open(os.path.join(out_dir, \"driver_stdout.txt\"), 'wb') as f, \\\n open(os.path.join(out_dir, \"driver_stderr.txt\"), 'wb') as ferr:\n cp = subprocess.run([\"bash\", run_one, out_dir, config[\"python\"], config[\"service\"], config[\"dependencies\"]],\n stdout=f, stderr=ferr)\n if cp.returncode != 0:\n print(f\" => Process failed with code {cp.returncode}\")\n\n with open(os.path.join(out_dir, \"errors.json\"), 'w') as ef:\n json.dump({\"error\": None if cp.stderr is None else cp.stderr.decode('ascii', errors='ignore')}, ef)\n\n else:\n print(\" => Complete\")\n\n clean_up()\n print(\"Cleanup complete.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"GoFigr/gofigr-python","sub_path":"tests/integration/run_compatibility.py","file_name":"run_compatibility.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33708295139","text":"def fix_sciprt(entity):\n entity_dir = f'data/{entity}/'\n f = open((f'{entity_dir}r.comment.json').strip())\n lines = f.readlines()\n new_f = open((f'{entity_dir}_fixed_r.comment.json').strip(),'w')\n for line in lines[:-2]:\n new_f.write(line)\n new_line = lines[-2][:-3] + lines[-2][-2:]\n new_f.write(new_line)\n new_f.write(lines[-1])\n new_f.close()\n f.close()\n\n\n\n","repo_name":"AidaRamezani/MoralReddit","sub_path":"src/fix_scripts.py","file_name":"fix_scripts.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41903660030","text":"from __future__ import absolute_import, print_function\nfrom future.builtins import bytes # makes python 2 `bytes()` more similar to python 3\n\nfrom collections import defaultdict\n\nimport itertools\nimport random\nimport time\nimport threading\nimport sys\n\nfrom six.moves.queue import Queue\n\nfrom sbp.client import Framer, Handler\nfrom sbp.file_io import (SBP_MSG_FILEIO_READ_DIR_RESP, SBP_MSG_FILEIO_READ_RESP,\n SBP_MSG_FILEIO_WRITE_RESP, SBP_MSG_FILEIO_CONFIG_RESP,\n MsgFileioReadDirReq, MsgFileioReadDirResp,\n MsgFileioReadReq, MsgFileioRemove, MsgFileioWriteReq,\n MsgFileioConfigReq, MsgFileioConfigResp)\n\nfrom piksi_tools import serial_link\nfrom piksi_tools import __version__ as VERSION\nfrom piksi_tools.utils import Time\n\nMAX_PAYLOAD_SIZE = 255\nSBP_FILEIO_WINDOW_SIZE = 100\nSBP_FILEIO_BATCH_SIZE = 1\nSBP_FILEIO_TIMEOUT = 3\nMAXIMUM_RETRIES = 20\nPROGRESS_CB_REDUCTION_FACTOR = 100\nTEXT_ENCODING = 'utf-8' # used for printing out directory listings and files\nWAIT_SLEEP_S = 0.001\nCONFIG_REQ_RETRY_MS = 100\nCONFIG_REQ_TIMEOUT_MS = 1000\nREADDIR_WAIT_S = 5.0\n\n\nclass PendingRequest(object):\n \"\"\"\n Represents a request that is pending.\n\n Fields\n ----------\n message : MsgFileioWriteReq, MsgFileioReadDirReq, MsgFileioReadReq\n The request that's pending\n time : Time\n The time the message was sent (or re-sent at)\n time_expire : Time\n The time the message will be considered expired (and then retried)\n tries : int\n The number of times we've attemptted to send the write message\n index : int\n The index of this object into the pending write map\n completed : bool\n If the request is already completed\n \"\"\"\n\n __slots__ = [\"message\", \"time\", \"time_expire\", \"tries\", \"index\", \"completed\"]\n\n def __init__(self, index):\n self.index = index\n self.completed = None\n self.time = Time()\n\n def __repr__(self):\n return \"PendingRequest(offset=%r,seq=%r,time=%r,tries=%r,index=%r)\" % (\n self.message.offset, self.message.sequence, self.time, self.tries, self.index)\n\n def track(self, pending_req, time, time_expire):\n \"\"\"\n Load information about the pending write so that it can be tracked.\n \"\"\"\n self.message = pending_req\n self.time = time\n self.time_expire = time_expire\n self.tries = 0\n self.completed = False\n return self\n\n def record_retry(self, retry_time, new_expire_time):\n \"\"\"\n Record a retry event, indicates that the SelectiveRepeater decided to\n retry sending the tracked MsgFileioWriteReq message.\n \"\"\"\n self.tries += 1\n self.time = retry_time\n self.time_expire = new_expire_time\n return self\n\n\nclass SelectiveRepeater(object):\n \"\"\"\n Selective repeater for SBP file I/O requests\n\n Fields\n ----------\n _pending_map : list(PendingRequest)\n List (used as a map) of PendingRequest objects, used to track\n outstanding requests.\n _request_pool : Queue\n Queue of available requests (recorded by index number)\n _seqmap : dict(int,int)\n Dictionary mapping SBP request sequence IDs to their corresponding\n request index.\n _batch_msgs : list\n Collector for a batch of messages to be sent in one\n buffer via the link\n _last_check_time : Time\n The last time we checked if any packets had expired\n _expire_map : dict(Time, dict(PendingRequest, PendingRequest))\n Dictionary which records the future time at which a request\n will expire.\n\n _msg_type : int\n The message type we're currently sending\n _link : Handler\n The link over which we're sending data\n\n _callback_thread : int\n ID of the thread that we expect callbacks from\n _link_thread : int\n ID of the thread that handles link writes\n \"\"\"\n\n def __init__(self, link, msg_type, cb=None):\n \"\"\"\n Args\n ---\n link : Handler\n Link over which messages will be sent.\n msg_type :\n The type of message being sent\n cb :\n Invoked when SBP message with type `msg_type` is received\n \"\"\"\n\n self._link = link\n self._msg_type = msg_type\n self._callback = cb\n\n self._seqmap = {}\n self._batch_msgs = []\n self._last_check_time = Time.now()\n self._expire_map = defaultdict(dict)\n\n self._init_fileio_config(SBP_FILEIO_WINDOW_SIZE, SBP_FILEIO_BATCH_SIZE, PROGRESS_CB_REDUCTION_FACTOR)\n\n self._callback_thread = None\n self._link_thread = None\n\n self._total_sends = 1.0\n self._total_retries = 0\n\n self._config_retry_time = None\n\n def _init_fileio_config(self, window_size, batch_size, progress_cb_reduction_factor):\n self._pending_map = [PendingRequest(X) for X in range(window_size)]\n self._request_pool = Queue(window_size)\n for pending_req in self._pending_map:\n self._request_pool.put(pending_req)\n self._batch_size = batch_size\n self._progress_cb_reduction_factor = progress_cb_reduction_factor\n\n def __enter__(self):\n self._link.add_callback(self._request_cb, self._msg_type)\n self._link.add_callback(self._config_cb, SBP_MSG_FILEIO_CONFIG_RESP)\n return self\n\n def __exit__(self, type, value, traceback):\n self._link.remove_callback(self._request_cb, self._msg_type)\n self._link.remove_callback(self._config_cb, SBP_MSG_FILEIO_CONFIG_RESP)\n\n def _verify_cb_thread(self):\n \"\"\"\n Verify that only one thread is consuming requests.\n \"\"\"\n if self._callback_thread is None:\n self._callback_thread = threading.currentThread().ident\n assert self._callback_thread == threading.currentThread().ident\n\n def _verify_link_thread(self):\n \"\"\"\n Verify that only one thread is producing requests.\n \"\"\"\n if self._link_thread is None:\n self._link_thread = threading.currentThread().ident\n assert self._link_thread == threading.currentThread().ident\n\n def _return_pending_req(self, pending_req):\n \"\"\"\n Return a pending request to the write pool and clean any\n entries in the expiration map.\n \"\"\"\n self._verify_cb_thread()\n pending_req.completed = True\n try:\n msg = pending_req.message\n except AttributeError:\n # Got a completion for something that was never requested\n return\n self._try_remove_keys(self._seqmap, msg.sequence)\n if self._try_remove_keys(self._expire_map[pending_req.time_expire], pending_req):\n # Only put the request back if it was successfully removed\n self._request_pool.put(pending_req)\n\n def _record_pending_req(self, msg, time_now, expiration_time):\n \"\"\"\n Acquire a pending request object and record it's future\n expiration time in a map.\n \"\"\"\n self._verify_link_thread()\n # Queue.get will block if no requests are available\n pending_req = self._request_pool.get(True)\n assert self._pending_map[pending_req.index].index == pending_req.index\n self._seqmap[msg.sequence] = pending_req.index\n self._pending_map[pending_req.index].track(msg, time_now, expiration_time)\n self._expire_map[expiration_time][pending_req] = pending_req\n\n def _config_cb(self, msg, **metadata):\n self._config_msg = msg\n self._init_fileio_config(msg.window_size, msg.batch_size, PROGRESS_CB_REDUCTION_FACTOR * 2)\n\n def _request_cb(self, msg, **metadata):\n \"\"\"\n Process request completions.\n \"\"\"\n index = self._seqmap.get(msg.sequence)\n if index is None:\n return\n pending_req = self._pending_map[index]\n if self._callback:\n self._callback(pending_req.message, msg)\n self._return_pending_req(pending_req)\n\n def _has_pending(self):\n return self._request_pool.qsize() != len(self._pending_map)\n\n def _retry_send(self, check_time, pending_req, delete_keys):\n \"\"\"\n Retry a request by updating it's expire time on the object\n itself and in the expiration map.\n \"\"\"\n self._total_retries += 1\n self._total_sends += 1\n timeout_delta = Time(SBP_FILEIO_TIMEOUT)\n send_time = Time.now()\n new_expire = send_time + timeout_delta\n pending_req.record_retry(send_time, new_expire)\n self._expire_map[new_expire][pending_req] = pending_req\n self._link(pending_req.message)\n delete_keys.append(pending_req)\n\n def _try_remove_keys(self, d, *keys):\n success = True\n for key in keys:\n try:\n del d[key]\n except KeyError:\n success = False\n return success\n\n def _check_pending(self):\n \"\"\"\n Scans from the last check time to the current time looking\n for requests that are due to expire and retries them if\n necessary.\n \"\"\"\n time_now = Time.now()\n timeout_delta = Time(SBP_FILEIO_TIMEOUT)\n for check_time in Time.iter_since(self._last_check_time, time_now):\n pending_reqs = self._expire_map[check_time]\n retried_writes = []\n for pending_req in list(pending_reqs):\n time_expire = pending_req.time + timeout_delta\n if time_now >= time_expire:\n if pending_req.tries >= MAXIMUM_RETRIES:\n raise Exception('Timed out')\n # If the completion map becomes inconsistent (because\n # things are completing at the same time they're\n # being re-tried) then the `completed` field should\n # prevent us from re-sending a write in this case.\n if not pending_req.completed:\n self._retry_send(check_time, pending_req, retried_writes)\n # Pending writes can be marked completed while this function\n # is running, so a key error means is was marked completed\n # after we sent a retry (therefore _try_remove_keys ignores\n # key errors).\n self._try_remove_keys(self._expire_map[check_time], *retried_writes)\n self._last_check_time = time_now\n\n def _window_available(self, batch_size):\n return self._request_pool.qsize() >= batch_size\n\n def _ensure_config_req_sent(self):\n if self._config_retry_time is not None:\n return\n now = Time.now()\n self._config_retry_time = now + Time(0, CONFIG_REQ_RETRY_MS)\n self._config_timeout = now + Time(0, CONFIG_REQ_TIMEOUT_MS)\n self._config_seq = random.randint(0, 0xffffffff)\n self._config_msg = None\n self._link(MsgFileioConfigReq(sequence=self._config_seq))\n\n def _config_received(self):\n self._ensure_config_req_sent()\n if self._config_msg is not None:\n return True\n now = Time.now()\n if now >= self._config_retry_time:\n self._link(MsgFileioConfigReq(sequence=self._config_seq))\n self._config_retry_time = now + Time(0, CONFIG_REQ_RETRY_MS)\n if now >= self._config_timeout:\n self._config_msg = MsgFileioConfigResp(sequence=0,\n window_size=100,\n batch_size=1,\n fileio_version=0)\n return self._config_msg is not None\n\n def _wait_config_received(self):\n while not self._config_received():\n time.sleep(WAIT_SLEEP_S)\n\n def _wait_window_available(self, batch_size):\n self._wait_config_received()\n while not self._window_available(batch_size):\n self._check_pending()\n if not self._window_available(batch_size):\n time.sleep(WAIT_SLEEP_S)\n\n @property\n def total_retries(self):\n return self._total_retries\n\n @property\n def total_sends(self):\n return self._total_sends\n\n @property\n def progress_cb_reduction_factor(self):\n return self._progress_cb_reduction_factor\n\n def send(self, msg, batch_size=None):\n if batch_size is not None:\n self._send(msg, batch_size)\n else:\n self._send(msg, self._batch_size)\n\n def _send(self, msg, batch_size):\n \"\"\"\n Sends data via the current link, potentially batching it together.\n\n Parameters\n ----------\n msg : MsgFileioReadReq, MsgFileioReadDirReq, MsgFileioWriteReq, MsgFileioRemove\n The message to be sent via the current link\n batch_size : int\n The number of message to batch together before actually sending\n \"\"\"\n if msg is not None:\n self._batch_msgs.append(msg)\n if len(self._batch_msgs) >= batch_size:\n self._wait_window_available(batch_size)\n time_now = Time.now()\n expiration_time = time_now + Time(SBP_FILEIO_TIMEOUT)\n for msg in self._batch_msgs:\n self._record_pending_req(msg, time_now, expiration_time)\n self._link(*self._batch_msgs)\n self._total_sends += len(self._batch_msgs)\n del self._batch_msgs[:]\n\n def flush(self):\n \"\"\"\n Flush any pending requests (batched or otherwise) and wait for all\n pending requests to complete.\n \"\"\"\n self.send(None, batch_size=0)\n while self._has_pending():\n self._check_pending()\n time.sleep(WAIT_SLEEP_S)\n\n\nclass FileIO(object):\n def __init__(self, link):\n self.link = link\n self._seq = random.randint(0, 0xffffffff)\n\n def next_seq(self):\n self._seq += 1\n return self._seq\n\n def read(self, filename):\n \"\"\"\n Read the contents of a file.\n\n Parameters\n ----------\n filename : bytes\n Name of the file to read.\n\n Returns\n -------\n out : bytearray\n Contents of the file.\n \"\"\"\n offset = 0\n chunksize = MAX_PAYLOAD_SIZE - 4\n closure = {'mostly_done': False, 'done': False, 'buf': {}, 'pending': set()}\n\n def cb(req, resp):\n closure['pending'].remove(req.offset)\n closure['buf'][req.offset] = resp.contents\n if req.chunk_size != len(resp.contents):\n closure['mostly_done'] = True\n if closure['mostly_done'] and len(closure['pending']) == 0:\n closure['done'] = True\n\n with SelectiveRepeater(self.link, SBP_MSG_FILEIO_READ_RESP, cb) as sr:\n while not closure['mostly_done']:\n seq = self.next_seq()\n msg = MsgFileioReadReq(\n sequence=seq,\n offset=offset,\n chunk_size=chunksize,\n filename=filename)\n closure['pending'].add(offset)\n sr.send(msg)\n offset += chunksize\n while not closure['done']:\n time.sleep(WAIT_SLEEP_S)\n sorted_buffers = sorted(closure['buf'].items(), key=lambda kv: kv[0])\n return bytearray(itertools.chain.from_iterable(kv[1] for kv in sorted_buffers))\n\n def readdir(self, dirname=b'.'):\n \"\"\"\n List the files in a directory.\n\n Parameters\n ----------\n dirname : bytes (optional)\n Name of the directory to list. Defaults to the root directory.\n\n Returns\n -------\n out : [bytes]\n List of file names.\n \"\"\"\n files = []\n while True:\n seq = self.next_seq()\n msg = MsgFileioReadDirReq(\n sequence=seq, offset=len(files), dirname=dirname)\n self.link(msg)\n reply = self.link.wait(SBP_MSG_FILEIO_READ_DIR_RESP, timeout=READDIR_WAIT_S)\n if not reply:\n raise Exception(\"Timeout waiting for FILEIO_READ_DIR reply\")\n # Why isn't this already decoded?\n reply = MsgFileioReadDirResp(reply)\n if reply.sequence != seq:\n raise Exception(\"Reply FILEIO_READ_DIR doesn't match request (%d vs %d)\" % (reply.sequence, seq))\n chunk = bytes(reply.contents).rstrip(b'\\0')\n\n if len(chunk) == 0:\n return files\n files += chunk.split(b'\\0')\n\n def remove(self, filename):\n \"\"\"\n Delete a file.\n\n Parameters\n ----------\n filename : bytes\n Name of the file to delete.\n \"\"\"\n msg = MsgFileioRemove(filename=filename)\n self.link(msg)\n\n def write(self, filename, data, offset=0, trunc=True, progress_cb=None):\n \"\"\"\n Write to a file.\n\n Parameters\n ----------\n filename : bytes\n Name of the file to write to.\n data : bytearray\n Data to write\n offset : int (optional)\n Offset into the file at which to start writing in bytes.\n trunc : bool (optional)\n Overwite the file, i.e. delete any existing file before writing. If\n this option is not specified and the existing file is longer than the\n current write then the contents of the file beyond the write will\n remain. If offset is non-zero then this flag is ignored.\n\n Returns\n -------\n out : str\n Contents of the file.\n \"\"\"\n if trunc and offset == 0:\n self.remove(filename)\n\n filename_len = len(filename)\n data_len = len(data)\n\n sequence_len = 4\n offset_len = 4\n null_sep_len = 1\n\n write_req_overhead_len = sequence_len + offset_len + null_sep_len\n\n # How do we calculate this from the MsgFileioWriteReq class?\n chunksize = MAX_PAYLOAD_SIZE - filename_len - write_req_overhead_len\n\n chunk_buf = bytearray(filename_len + null_sep_len + chunksize)\n\n chunk_buf[0:filename_len] = filename\n chunk_buf[filename_len] = ord(b'\\x00')\n\n chunk_offset = filename_len + null_sep_len\n\n with SelectiveRepeater(self.link, SBP_MSG_FILEIO_WRITE_RESP) as sr:\n while offset < data_len:\n\n seq = self.next_seq()\n\n end_offset = min(offset + chunksize, data_len)\n chunk_len = min(chunksize, data_len - offset)\n chunk_end = (chunk_offset + chunk_len)\n\n chunk = data[offset:end_offset]\n chunk_buf[chunk_offset:chunk_end] = chunk\n\n if chunk_len < len(chunk_buf):\n write_buf = chunk_buf[:chunk_end]\n else:\n write_buf = chunk_buf\n\n msg = MsgFileioWriteReq(\n sequence=seq,\n offset=offset,\n filename=write_buf, # Note: We put \"write_buf\" into the name because\n # putting in the correct place (the data\n # field) results in a huge slowdown\n # (presumably because an issue in the\n # construct library).\n data=b'')\n\n sr.send(msg)\n offset += chunk_len\n\n if (progress_cb is not None and seq % sr.progress_cb_reduction_factor == 0):\n progress_cb(offset, sr)\n\n if progress_cb is not None:\n progress_cb(offset, sr)\n sr.flush()\n\n\ndef hexdump(data):\n \"\"\"\n Print a hex dump.\n\n Parameters\n ----------\n data : indexable\n Data to display dump of, can be anything that supports length and index\n operations.\n \"\"\"\n ret = ''\n ofs = 0\n while data:\n # get 16 bytes from byte array and store in \"chunk\"\n chunk = data[:16]\n # remove bytes from data\n data = data[16:]\n s = \"%08X \" % ofs\n s += \" \".join(\"%02X\" % c for c in chunk[:8]) + \" \"\n s += \" \".join(\"%02X\" % c for c in chunk[8:])\n s += \"\".join(\" \" for i in range(60 - len(s))) + \"|\"\n for c in chunk:\n s += chr(c) if 32 <= c < 128 else '.'\n s += '|\\n'\n ofs += 16\n ret += s\n return ret\n\n\ndef print_dir_listing(files):\n \"\"\"\n Print a directory listing.\n\n Parameters\n ----------\n files : [bytes]\n List of file names in the directory.\n \"\"\"\n for f in files:\n print(printable_text_from_device(f))\n\n\ndef get_args():\n \"\"\"\n Get and parse arguments.\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Swift Nav File I/O Utility version ' + VERSION)\n parser.add_argument(\n '-w',\n '--write',\n nargs=2,\n help='Write a file from local SOURCE to remote destination DEST',\n metavar=('SOURCE', 'DEST'))\n parser.add_argument(\n '-r',\n '--read',\n nargs='+',\n help='read a file from remote SOURCE to local DEST. If no DEST is provided, file is read to stdout.',\n metavar=('SOURCE', 'DEST'))\n parser.add_argument('-l', '--list', default=None, nargs=1, help='list a directory')\n parser.add_argument('-d', '--delete', nargs=1, help='delete a file')\n parser.add_argument(\n '-p',\n '--port',\n default=serial_link.SERIAL_PORT,\n help='specify the serial port to use.')\n parser.add_argument(\n \"-b\",\n \"--baud\",\n default=serial_link.SERIAL_BAUD,\n help=\"specify the baud rate to use.\")\n parser.add_argument(\n \"-t\",\n \"--tcp\",\n action=\"store_true\",\n default=False,\n help=\"Use a TCP connection instead of a local serial port. \\\n If TCP is selected, the port is interpreted as host:port\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"print extra debugging information.\",\n action=\"store_true\")\n parser.add_argument(\n \"-x\", \"--hex\", help=\"output in hex dump format.\", action=\"store_true\")\n parser.add_argument(\n \"-f\",\n \"--ftdi\",\n help=\"use pylibftdi instead of pyserial.\",\n action=\"store_true\")\n return parser.parse_args()\n\n\ndef raw_filename(str_filename):\n \"\"\"Return a filename in raw bytes from a command line option string.\"\"\"\n # Non-unicode characters/bytes in the command line options are decoded by\n # using 'surrogateescape' and file system encoding, and this reverts that.\n # References:\n # https://www.python.org/dev/peps/pep-0383/\n # https://docs.python.org/3/library/os.html#file-names-command-line-arguments-and-environment-variables\n return bytes(str_filename, sys.getfilesystemencoding(), 'surrogateescape')\n\n\ndef printable_text_from_device(data):\n \"\"\"Takes text data from the device as bytes and returns a string where any\n characters incompatible with stdout have been replaced with '?'\"\"\"\n str = data.decode(TEXT_ENCODING, 'replace')\\\n .encode(sys.stdout.encoding, 'replace')\\\n .decode(sys.stdout.encoding)\n return str\n\n\ndef mk_progress_cb(file_length):\n time_last = [Time.now()]\n offset_last = [0]\n\n b_to_mb = 1024 * 1024.0\n file_mb = file_length / b_to_mb\n rolling_avg_len = 20\n rolling_avg_pts = []\n previous_avg = [None]\n\n def compute_rolling_average(speed_kbs):\n removed_pt = None\n if len(rolling_avg_pts) >= rolling_avg_len:\n removed_pt = rolling_avg_pts.pop(0)\n rolling_avg_pts.append(speed_kbs)\n if removed_pt is not None:\n assert previous_avg[0] is not None\n new_avg_contrib = speed_kbs / rolling_avg_len\n removed_avg_contrib = removed_pt / rolling_avg_len\n previous_avg[0] -= removed_avg_contrib\n previous_avg[0] += new_avg_contrib\n return previous_avg[0]\n else:\n previous_avg[0] = sum(rolling_avg_pts) / len(rolling_avg_pts)\n return previous_avg[0]\n\n def the_callback(offset, repeater):\n time_current = Time.now()\n offset_delta = offset - offset_last[0]\n time_delta = time_current - time_last[0]\n percent_done = 100 * (offset / float(file_length))\n mb_confirmed = offset / b_to_mb\n speed_kbs = offset_delta / time_delta.to_float() / 1024\n rolling_avg = compute_rolling_average(speed_kbs)\n fmt_str = \"\\r[{:02.02f}% ({:.02f}/{:.02f} MB) at {:.02f} kB/s ({:0.02f}% retried)]\"\n percent_retried = 100 * (repeater.total_retries / repeater.total_sends)\n status_str = fmt_str.format(percent_done,\n mb_confirmed,\n file_mb,\n rolling_avg,\n percent_retried,\n repeater.total_retries,\n repeater.total_sends)\n sys.stdout.write(status_str)\n sys.stdout.flush()\n time_last[0] = time_current\n offset_last[0] = offset\n\n return the_callback\n\n\ndef main():\n\n args = get_args()\n selected_driver = serial_link.get_base_args_driver(args)\n\n # Driver with context\n with selected_driver as driver:\n # Handler with context\n with Handler(Framer(driver.read, driver.write, args.verbose)) as link:\n f = FileIO(link)\n try:\n if args.write:\n file_data = bytearray(open(args.write[0], 'rb').read())\n f.write(raw_filename(args.write[1]), file_data, progress_cb=mk_progress_cb(len(file_data)))\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n elif args.read:\n if len(args.read) not in [1, 2]:\n sys.stderr.write(\"Error: fileio read requires either 1 or 2 arguments, SOURCE and optionally DEST.\")\n sys.exit(1)\n data = f.read(raw_filename(args.read[0]))\n if len(args.read) == 2:\n with open(args.read[1], ('w' if args.hex else 'wb')) as fd:\n fd.write(hexdump(data) if args.hex else data)\n elif args.hex:\n print(hexdump(data))\n else:\n print(printable_text_from_device(data))\n elif args.delete:\n f.remove(raw_filename(args.delete[0]))\n elif args.list is not None:\n print_dir_listing(f.readdir(raw_filename(args.list[0])))\n else:\n print(\"No command given, listing root directory:\")\n print_dir_listing(f.readdir())\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"allenjseb/piksi_tools","sub_path":"piksi_tools/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":27026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"70734294653","text":"__author__ = 'elpablogrande'\n\nclass PyLRUCache:\n \"\"\"\n Instances of PyLRUCache:\n hold the cache of data in a dictionary as key-value pairs\n Keep track of the rank of each key in the dictionary\n Set and get items in the cache and update rank accordingly\n PyLRUCache instance Variables/Objects:\n cache_size: Integer. Number of items cache can hold.\n rank_offset: Integer. Keeps track of order in which items are inserted, updated or accessed.\n cache_dict: Dictionary. Holds the data being contained in the LRU.\n Also contains the rank_offset at the time of last insert, read or update for the key.\n keymap: List. Holds dictionaries containing the key and rank offset of each item in the cache_dict.\n Always sorted by rank, so the dictionary at position 0 always represents the least recently used item.\n rank offset being stored here allows fast lookup of the ordinal position of any key in the keymap list.\n \"\"\"\n\n cache_size = 0\n rank_offset = 0\n cache_dict = {}\n keymap = []\n\n def __init__(self, capacity):\n \"\"\"\n Initializer method for cache object.\n :param capacity: capacity of cache (int).\n :return: void\n \"\"\"\n self.cache_size = capacity\n\n def set_key(self, key, value):\n \"\"\"\n Sets value for a given key and upserts key's value in the cache dictionary.\n :param key: Key to be added to the cache dictionary.\n :param value: Value for input key\n :return: 0 for OK\n \"\"\"\n\n # increment rank offset\n self.rank_offset += 1\n\n # rank offset value of key, if it exists; defaults to current rank offset if not\n key_rank_offset = self.rank_offset\n key_is_new = False\n\n # Does the key exist?\n try:\n key_rank_offset = self.cache_dict[key]['rank_offset']\n except KeyError:\n # If not:\n # Add the key to the dictionary\n key_is_new = True\n\n if key_is_new:\n if len(self.cache_dict) == self.cache_size:\n # If the dictionary is at capacity:\n # Remove the item at the lowest rank\n # Pop the lowest-ranked key reference from the rank keymap\n lowest_keymap = self.keymap[0]\n lowest_key = lowest_keymap['key']\n del self.cache_dict[lowest_key]\n self.keymap.pop(0)\n\n else:\n # If key exists:\n # Find the existing key's ordinal position in the keymap and pop that item\n keymap_ordinal = self.find_keymap_ordinal(key_rank_offset, 0, len(self.keymap))\n self.keymap.pop(keymap_ordinal)\n\n # Insert the new keymap reference and upsert the key\n self.keymap.append({'key': key, 'rank_offset':self.rank_offset})\n self.cache_dict[key] = {'value': value, 'rank_offset': self.rank_offset}\n\n # debug\n #print(self.cache_dict)\n #print(self.keymap)\n\n return 0\n\n def get_key_value(self, key):\n \"\"\"\n Returns value for key in the LRU cache dictionary if it exists.\n Updates use rank for key if exists\n :param key: key being searched.\n :return: Value for key if exists, or None\n \"\"\"\n\n key_dict = {}\n # Find the key and its rank offset in the LRU dictionary. Return None if doesn't exist.\n try:\n key_dict = self.cache_dict[key]\n except KeyError:\n return None\n\n # Key exists:\n # Increment rank offset\n self.rank_offset += 1\n\n # Find its ordinal position in keymap and pop that item\n keymap_ordinal = self.find_keymap_ordinal(key_dict['rank_offset'], 0, len(self.keymap))\n self.keymap.pop(keymap_ordinal)\n\n # Insert new item with rank offset for key in keymap\n self.keymap.append({'key': key, 'rank_offset': self.rank_offset})\n\n # Update rank offset in cache dictionary\n key_dict['rank_offset'] = self.rank_offset\n self.cache_dict[key] = key_dict\n\n # debug\n #print(self.cache_dict)\n #print(self.keymap)\n\n # Return the key's value.\n return key_dict['value']\n\n def find_keymap_ordinal(self, offset, min_position, max_position):\n \"\"\"\n Recursively finds the ordinal position of a key in the sorted keymap list based on the rank offset.\n Assumes it is being implemented correctly, i.e., it isn't equipped to handle an offset that doesn't exist.\n Also assumes that the keymap list is sorted by rank offset.\n :param offset: Integer. Rank offset value for the key being searched.\n :param min_position: Integer. Lowest ordinal position to consider.\n :param max_position: Integer. Highest ordinal position to consider.\n :return: integer ordinal position of item in keymap for offset.\n \"\"\"\n\n # Find the ordinal halfway between the min and max positions\n check_ordinal = min_position + int((max_position - min_position) // 2)\n\n # Get its rank offset value\n rank_offset = self.keymap[check_ordinal]['rank_offset']\n\n if rank_offset == offset:\n # if it matches, we've found our ordinal. Return.\n return check_ordinal\n elif rank_offset < offset:\n # recurse high.\n return self.find_keymap_ordinal(offset, check_ordinal, max_position)\n else:\n # recurse low.\n return self.find_keymap_ordinal(offset, min_position, check_ordinal)\n","repo_name":"elpablogrande/PyLRU","sub_path":"pylru/pylrucache.py","file_name":"pylrucache.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19102942901","text":"import urequests\nimport config\n\ndef upload_reading(reading):\n # create adafruit.io payload format\n payload = {\n \"created_at\": reading[\"timestamp\"],\n \"feeds\": []\n }\n\n # add all the sensor readings\n nickname = config.nickname\n for key, value in reading[\"readings\"].items():\n key = key.replace(\"_\", \"-\")\n payload[\"feeds\"].append({\"key\": f\"{nickname}-{key}\", \"value\": value})\n\n # send the payload\n username = config.adafruit_io_username\n headers = {'X-AIO-Key': config.adafruit_io_key, 'Content-Type': 'application/json'}\n url = f\"http://io.adafruit.com/api/v2/{username}/groups/enviro/data\"\n\n try:\n result = urequests.post(url, json=payload, headers=headers)\n result.close()\n return result.status_code == 200\n except:\n pass\n \n return False","repo_name":"PascalKern/enviro.prev","sub_path":"enviro/destinations/adafruit_io.py","file_name":"adafruit_io.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25514307793","text":"import os\r\nimport os.path\r\nimport sys\r\nimport re\r\nimport glob\r\n\r\n\r\nfpath = sys.argv[-1]\r\n#fpath = \"F:\\\\Try\\\\train\"\r\nx = os.getcwd()\r\n\r\ndef num_there():\r\n global spamCount\r\n global hamCount\r\n global fileCount_ham\r\n global fileCount_spam\r\n for root, dirs, files in os.walk(fpath):\r\n #path = root.split('\\\\')\r\n # print(path)\r\n temp = os.path.normpath(root)\r\n path = os.path.basename(temp)\r\n os.chdir(root)\r\n if path == 'spam':\r\n for file in files:\r\n if file.endswith(\".txt\"):\r\n #print(file)\r\n fileCount_spam += 1\r\n f = open( file, 'r', encoding=\"latin1\")\r\n predicate = f.read().strip().split()\r\n for line in predicate:\r\n if line in dict_spam:\r\n count = dict_spam[line]\r\n count += 1\r\n spamCount += 1\r\n dict_spam[line] = count\r\n else:\r\n dict_ham[line] = 1\r\n dict_spam[line] = 2\r\n spamCount = spamCount + 2\r\n hamCount = hamCount + 1\r\n f.close()\r\n\r\n if path == 'ham':\r\n os.chdir(root)\r\n for file in files:\r\n if file.endswith(\".txt\"):\r\n #print(file)\r\n fileCount_ham += 1\r\n f = open(file, 'r', encoding=\"latin1\")\r\n predicate = f.read().strip().split()\r\n for line in predicate:\r\n if line in dict_ham:\r\n count = dict_ham[line]\r\n count += 1\r\n hamCount += 1\r\n dict_ham[line] = count\r\n else:\r\n dict_spam[line] = 1\r\n dict_ham[line] = 2\r\n hamCount = hamCount + 2\r\n spamCount = spamCount + 1\r\n f.close()\r\n\r\n #print(len(dict_spam)) # dict is words\r\n #print(spamCount)\r\n #print(len(dict_ham))\r\n #print(hamCount)\r\n\r\n #print(fileCount_spam)\r\n #print(fileCount_ham)\r\n\r\n for i in dict_spam:\r\n count = dict_spam[i]\r\n count = float(count) / float(spamCount)\r\n dict_spam[i] = float(count)\r\n\r\n #print(dict_spam)\r\n\r\n for i in dict_ham:\r\n count = dict_ham[i]\r\n count = float(count) / float(hamCount)\r\n dict_ham[i] = float(count)\r\n\r\n #print(dict_ham)\r\n\r\n denominator = fileCount_spam + fileCount_ham\r\n spamCount = float(fileCount_spam) / float(denominator)\r\n hamCount = float(fileCount_ham) / float(denominator)\r\n\r\n \r\n os.chdir(x)\r\n rw = open('nbmodel.txt', 'w', encoding='latin1')\r\n rw.write(str(spamCount) + \"\\n\")\r\n rw.write(str(hamCount) + \"\\n\")\r\n \r\n for i in dict_spam:\r\n rw.write(i + \" \\t \" + str(dict_spam.get(i)) + \" \\t \" + str(dict_ham.get(i)) + \"\\n\")\r\n #rw.write(i + \" \\t \" + str(dict_ham.get(i)) + \"\\n\")\r\n rw.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n dict_spam = {}\r\n dict_ham = {}\r\n hamCount = 0\r\n spamCount = 0\r\n fileCount_spam = 0\r\n fileCount_ham = 0\r\n num_there()\r\n","repo_name":"prayagrs/NLP","sub_path":"Naive Bayes/nblearn.py","file_name":"nblearn.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44030336310","text":"#https://huggingface.co/fhswf/bert_de_ner?text=Moskau+liefert+Polen+und+Bulgarien+kein+Gas+mehr\n#vohrer tensorflow, transformers und torch mit pip installieren\nfrom base64 import encode\nfrom transformers import pipeline\nimport sys\nimport csv\n\ndef german_ner(text):\n combined_ner_token=[]\n final_list=[]\n indizes =[]\n ner=[]\n\n classifier = pipeline('ner', model=\"fhswf/bert_de_ner\")\n ner_transformation_result = classifier(text)\n ner_token = [elem['word'] for elem in ner_transformation_result]\n\n #zusammenhängende NER Token in einer seperaten Liste speichern\n #gleichzeitig diese Token aus der ner_token Liste löschen\n for index in reversed(indizes):\n combined_ner_token.append([ner_token[index-1],ner_token[index]])\n ner_token.remove(ner_token[index])\n ner_token.remove(ner_token[index-1])\n\n\n ner.extend(combined_ner_token)\n ner.extend(ner_token)\n\n #Wörter aus dem Text als Liste speichern\n for list_elem in text.split():\n #jedes NER Token\n for ner_elem in ner:\n #Prüfen, ob es sich um eine Liste zusammengehöriger NER Token handelt\n if type(ner_elem)== list:\n #es wird das Wort gesucht, in dem alle zusammengehörigen NER Token vorkommen\n for sub_ner_elem in ner_elem:\n first_found=\"\"\n #dafür sorgen, das beide Teilwörter aus der ner_token auch im selben Wort vorkommen.\n if sub_ner_elem.casefold() in list_elem.casefold(): #casefold lässt ein ignoriert Groß und Kleinschreibung beim Vergleichen \n if list_elem == first_found:\n final_list.append(list_elem)\n #das gefundene Wort zum ersten Token der zusammengehörigen NER Token wird gespeichert, damit darüber geprüft werden kann, ob die anderen NER Token auch reinpassen \n first_found=list_elem\n #handelt es sich um ein einzelnes NER Token, wird das mit allen Wörtern aus dem Satz vergliuchen\n if ner_elem.casefold() in list_elem.casefold(): #casefold ist für case insensitives Vergleichen\n final_list.append(list_elem)\n #Dopplungen entfernen\n final_list =list(set(final_list))\n return(final_list)\n\n\nif __name__ == '__main__': \n \n text_file_path = sys.argv[1]\n #delimiter = sys.argv[2]\n result=[]\n # print(delimiter)\n #print(f\"text_file_path {text_file_path}\")\n \n with open(text_file_path,encoding=\"latin-1\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = \"\\n\" )\n for row in csv_reader:\n #print(row)\n result.extend(german_ner(str(row)))\n\n print(result)","repo_name":"MarcusWalkow/OSINT_Framework","sub_path":"tools/german_ner.py","file_name":"german_ner.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41636236244","text":"def matrix_block_sum(mat, k):\n \"\"\"\n Returns a matrix answer where each answer[i][j] is the sum of all elements mat[r][c] for:\n i - k <= r <= i + k,\n j - k <= c <= j + k, and\n (r, c) is a valid position in the matrix.\n\n :param mat: a list of lists of integers representing the input matrix\n :param k: an integer representing the size of the sub-matrices\n :return: a list of lists of integers representing the answer matrix\n\n Time Complexity: o(m * n * k ^ 2)\n Space Complexity: o(n * m)\n \"\"\"\n\n # Get the number of rows and columns in the input matrix\n m, n = len(mat), len(mat[0])\n\n # Create a matrix of zeros to store the sums of the sub-matrices\n answer = [[0] * n for _ in range(m)]\n\n # Iterate over each element in the input matrix\n for i in range(m):\n for j in range(n):\n # Calculate the indices of the top-left and bottom-right corners\n # of the sub-matrix with center (i, j) and size (2*k+1) x (2*k+1)\n r1, c1 = max(0, i - k), max(0, j - k)\n r2, c2 = min(m - 1, i + k), min(n - 1, j + k)\n\n # Calculate the sum of all elements in the sub-matrix\n sub_matrix_sum = 0\n for r in range(r1, r2 + 1):\n for c in range(c1, c2 + 1):\n sub_matrix_sum += mat[r][c]\n\n # Store the sum of the sub-matrix in the corresponding element of the answer matrix\n answer[i][j] = sub_matrix_sum\n\n # Returning list of lists of integers representing the answer matrix\n return answer\n","repo_name":"Nitzantomer1998/LeetCode","sub_path":"Python/Dynamic Programming/Dynamic Programming I/1314. Matrix Block Sum.py","file_name":"1314. Matrix Block Sum.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1914999597","text":"# pytorch & misc\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport argparse\nimport time\nimport json\n\n# dataset and model\nimport sys\nimport os\nsys.path.append(os.path.abspath('../common'))\nfrom cub200 import *\nfrom utils import *\nfrom model import ResNet101, ResNet50\n\n# number of attributes and landmark annotations\nnum_classes = 200\n\n# arguments\nparser = argparse.ArgumentParser(description='Evalution of Classification Accuracy')\nparser.add_argument('--load', default='', type=str, help='name of model to evaluate')\nparser.add_argument('--noaug', default='', action='store_true', help='disable test-time augmentation')\nargs = parser.parse_args()\n\ndef test(test_loader, model):\n \"\"\"\n Evaluate the accuracy on test set.\n\n Parameters\n ----------\n test_loader: torch.utils.data.DataLoader\n Data loader for the testing set.\n model: pytorch model object\n Model that generates attribute prediction.\n\n Returns\n ----------\n acc: float\n Accuracy on the test set.\n\n \"\"\"\n # set up the averagemeters\n batch_time = AverageMeter()\n acc = AverageMeter()\n\n # record the time\n end = time.time()\n\n # testing\n with torch.no_grad():\n for i, (input, target, _, _) in enumerate(test_loader):\n\n # data to gpu\n input = input.cuda()\n target = target.cuda()\n\n if args.noaug:\n # inference the model\n output, _, _ = model(input)\n \n else:\n # flip the data\n input_flip = torch.flip(input, dims=[3])\n\n # inference the model\n output, _, _ = model(input)\n output_flip, _, _ = model(input_flip)\n output = output + output_flip\n\n # calculate the accuracy\n acc.update(accuracy(output, target)[0].item(), input.size(0))\n\n # measure elapsed time\n torch.cuda.synchronize()\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print the current testing status\n if i % 10 == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(\n i, len(test_loader), batch_time=batch_time, acc=acc), flush=True)\n\n # return the accuracy\n return acc.avg\n\ndef main():\n\n # load the config file\n config_file = '../../log/'+ args.load +'/train_config.json'\n with open(config_file) as fi:\n config = json.load(fi)\n print(\" \".join(\"\\033[96m{}\\033[0m: {},\".format(k, v) for k, v in config.items()))\n\n # define data transformation\n test_transforms = transforms.Compose([\n transforms.Resize(size=448),\n transforms.CenterCrop(size=448),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n ])\n\n # define test dataset and loader\n test_data = CUB200(root='../../data/cub200',\n train=False, transform=test_transforms)\n\n test_loader = torch.utils.data.DataLoader(\n test_data, batch_size=config['batch_size'], shuffle=False,\n num_workers=config['workers'], pin_memory=False, drop_last=False)\n\n # load the model in eval mode\n if config['arch'] == 'resnet101':\n model = nn.DataParallel(ResNet101(num_classes, num_parts=config['nparts'])).cuda()\n elif config['arch'] == 'resnet50':\n model = nn.DataParallel(ResNet50(num_classes, num_parts=config['nparts'])).cuda()\n else:\n raise(RuntimeError(\"Only support resnet50 or resnet101 for architecture!\"))\n\n resume = '../../checkpoints/'+args.load+'_best.pth.tar'\n print(\"=> loading checkpoint '{}'\".format(resume))\n checkpoint = torch.load(resume)\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n model.eval()\n\n # test the model\n acc = test(test_loader, model)\n\n # print the overall best acc\n print('Testing finished...')\n print('Best accuracy on test set is: %.4f.' % acc)\n\nif __name__ == '__main__':\n main()\n","repo_name":"zxhuang1698/interpretability-by-parts","sub_path":"src/cub200/eval_acc.py","file_name":"eval_acc.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"78"} +{"seq_id":"34641663274","text":"p1 = float(input('Nota P1: '))\nif p1 < 0 or p1 > 10:\n raise ValueError('Valor de nota P1 inválido!')\np2 = float(input('Nota P2: '))\nif p2 < 0 or p2 > 10:\n raise ValueError('Valor de nota P1 inválido!')\nmédia = (p1 + p2) / 2\nif média <= 5:\n print('Sua média é de {:.1f}. Você foi \\033[1;31mREPROVADO!\\033[m'.format(média))\nelif 5 <= média < 7:\n print('Sua média é de {:.1f}. Você está de \\033[1;34mRECUPERAÇÃO!\\033[m'.format(média))\nelif média >= 7:\n print('Sua média é de {:.1f}. Você foi \\033[1;32mAPROVADO!\\033[m'.format(média))\n","repo_name":"danieloliv1/curso-python-exercicios","sub_path":"exercicios-python/ex 040.py","file_name":"ex 040.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72044342973","text":"#import sys\n#input = sys.stdin.readline\n\nn, m = map(int, input().split())\n\nparents = list(range(n+1))\n\ndef find(target):\n '''find parent'''\n if target == parents[target]:\n return target\n \n return find(parents[target])\n\nfor _ in range(m):\n calc, a, b = map(int, input().split())\n if a == b:\n if calc == 1:\n print(\"YES\")\n continue\n \n a = find(a)\n b = find(b)\n if calc == 0:\n if a < b:\n parents[b] = a\n else:\n parents[a] = b\n else:\n if a == b:\n print(\"YES\")\n else:\n print(\"NO\")","repo_name":"yerine/algorithms","sub_path":"boj/1717_집합의 표현.py","file_name":"1717_집합의 표현.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2746438310","text":"#ardit's first exercise trial\n\n#first we define a function to let us format the string beautifully\ndef sentences(text):\n questions=('how','why','where','when','who')\n upper_case= text.capitalize()\n if text.startswith(questions):\n return '{}?'.format(upper_case)\n else:\n return '{}.'.format(upper_case)\n\n#now we create the looops that keep iterating once the condition is true\n#this variable 'final results ' is the output yhat is going to b shownto the user\nfinal_results=[] \n \nwhile True:\n user_input= input('say something: ') \n if user_input== 'end':\n break\n else:\n final_results.append(sentences(user_input))\n \nprint(' '.join(final_results)) \n \n \n\n \n \n ","repo_name":"Nuii-tekky/Python-Projrcts","sub_path":"exercises/exercise_1.py","file_name":"exercise_1.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8177021075","text":"\nfrom server import *\n\n\ndef lectura_parametres():\n \"\"\" Lectura dels parametres d'entrada. \"\"\"\n\n nom_arxius = {'servidor': 'server.cfg', 'equips': 'equips.dat'}\n debug = False\n trobat_conf_server = False\n trobat_conf_equips = False\n for arg in sys.argv:\n if arg == \"-d\":\n debug = True\n print_if_debug(debug, \"Mode DEBUG ON.\")\n elif arg == \"-c\":\n trobat_conf_server = True\n elif trobat_conf_server:\n nom_arxius[\"servidor\"] = arg\n print_if_debug(debug, \"Arxiu de dades de software modificat: \" + arg)\n trobat_conf_server = False\n elif arg == \"-u\":\n trobat_conf_equips = True\n elif trobat_conf_equips:\n nom_arxius[\"equips\"] = arg\n print_if_debug(debug, \"Arxiu d'equips autoritzats modificat: \" + arg)\n trobat_conf_equips = False\n if trobat_conf_equips or trobat_conf_server:\n print(\"No s'han pogut obrir els arxius modificats\")\n sys.exit()\n return debug, obrir_arxius(nom_arxius)\n\n\ndef obrir_arxius(nom_arxius):\n \"\"\" Retorna les dades del servidor i dels equips\"\"\"\n try:\n f_server = open(nom_arxius['servidor'], 'r')\n f_equip = open(nom_arxius['equips'], 'r')\n except EnvironmentError:\n print_if_error(\"No s'han pogut obrir els arxius.\")\n sys.exit()\n return {'servidor': f_server, 'equips': f_equip}\n\n\ndef agafar_dades_servidor(fitxer):\n \"\"\" Retorna les dades del servidor\"\"\"\n dades = {}\n lines = fitxer.readlines()\n for line in lines:\n line = line.split()\n for word in line:\n if word in ('Nom', 'MAC', 'UDP-port', 'TCP-port'):\n dades[word] = line[1]\n return dades\n\n\ndef agafar_dades_equips(fitxer):\n \"\"\" Retorna una llista amb tots els equips \"\"\"\n llistat_dades = []\n lines = fitxer.readlines()\n for line in lines:\n line = line.split()\n if len(line) == 2:\n llistat_dades.append(dades_equip(line))\n return llistat_dades\n\n\ndef dades_equip(line):\n \"\"\" Retorna les dades d'un equip. Per defecte, disconnected i 000000 \"\"\"\n dades = dict()\n dades['nom'] = line[0]\n dades['mac'] = line[1]\n dades['estat'] = 'DISCONNECTED'\n dades['aleatori'] = '000000'\n return dades\n\n\ndef to_str_tipus(tipus):\n \"\"\" Retorna la comanda segons el tipus \"\"\"\n tipus = ord(tipus)\n dicc_tipus = {0x00: 'REGISTER_REQ', 0x01: 'REGISTER_ACK', 0x02: 'REGISTER_NACK', 0x03: 'REGISTER_REJ', 0x09: 'ERROR', 0x10: 'ALIVE_INF', 0x11: 'ALIVE_ACK', 0x12: 'ALIVE_NACK', 0x13: 'ALIVE_REJ',\n 0x20: 'SEND_FILE', 0x21: 'SEND_ACK', 0x22: 'SEND_NACK', 0x23: 'SEND_REJ', 0x24: 'SEND_DATA', 0x25: 'SEND_END',\n 0x30: 'GET_FILE', 0x31: 'GET_ACK', 0x32: 'GET_NACK', 0x33: 'GET_REJ', 0x34: 'GET_DATA', 0x35: 'GET_END'\n }\n return dicc_tipus[tipus]\n\n\ndef to_int_tipus(str):\n \"\"\"Retorna l'enter que correspon a la comanda\"\"\"\n dicc_tipus = {'REGISTER_REQ': 0x00, 'REGISTER_ACK': 0x01, 'REGISTER_NACK': 0x02, 'REGISTER_REJ': 0x03, 'ERROR': 0x09, 'ALIVE_INF': 0x10, 'ALIVE_ACK': 0x11, 'ALIVE_NACK': 0x12, 'ALIVE_REG': 0x13,\n 'SEND_FILE': 0x20, 'SEND_ACK': 0x21, 'SEND_NACK': 0x22, 'SEND_REJ': 0x23, 'SEND_DATA': 0x24, 'SEND_END': 0x25,\n 'GET_FILE': 0x20, 'GET_ACK': 0x21, 'GET_NACK': 0x22, 'GET_REJ': 0x23, 'GET_DATA': 0x24, 'GET_END': 0x25}\n return dicc_tipus[str]\n","repo_name":"Oriolac/xarxes-prac1","sub_path":"server_data.py","file_name":"server_data.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"ca","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12335177282","text":"from api.models.block_type import BlockType\nfrom api.models.image import Image, ImageTag\nfrom rest_framework import serializers\n\n\nclass ImageSerializer(serializers.ModelSerializer):\n block_types = serializers.SlugRelatedField(\n slug_field=\"slug\",\n many=True,\n queryset=BlockType.objects.all(),\n )\n tags = serializers.SlugRelatedField(\n slug_field=\"slug\",\n many=True,\n queryset=ImageTag.objects.all(),\n )\n\n # При создании или редактировании картинки присылать поле file\n file = serializers.FileField(\n use_url=False,\n allow_empty_file=True,\n required=False,\n allow_null=True,\n write_only=True,\n )\n # При создании или редактировании картинки присылать поле file\n previewFile = serializers.FileField(\n use_url=False,\n allow_empty_file=True,\n required=False,\n allow_null=True,\n write_only=True,\n )\n # При чтении будет приходить поле src\n src = serializers.FileField(\n source=\"file\",\n use_url=False,\n read_only=True,\n )\n # При чтении будет приходить поле preview\n preview = serializers.FileField(\n source=\"previewFile\",\n use_url=False,\n read_only=True,\n )\n is_common = serializers.BooleanField(write_only=True)\n\n def update(self, image, validated_data):\n # вытаскиваем block_types и tags из данных\n # чтобы его нельзя было поменять\n validated_data.pop(\"block_types\")\n validated_data.pop(\"tags\")\n # Перезаписываем только те поля которые были переданы\n for attr, value in validated_data.items():\n setattr(image, attr, value)\n\n image.save()\n\n return image\n\n class Meta:\n model = Image\n fields = (\n \"id\",\n \"file\", # Запись\n \"previewFile\", # Запись\n \"src\", # Чтение\n \"preview\", # Чтение\n \"author\",\n \"block_types\",\n \"tags\",\n \"x\",\n \"y\",\n \"width\",\n \"height\",\n \"borderRadius\",\n \"rotate\",\n \"scale\",\n \"borderX\",\n \"borderY\",\n \"is_common\", # Запись\n )\n","repo_name":"AlexRarus/creator","sub_path":"backend/api/serializers/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"35901974066","text":"import logging\nimport re\nfrom pathlib import Path\nfrom typing import Union, Dict, List\n\nimport flair\nfrom flair.data import Corpus, FlairDataset, Sentence, Token\nfrom flair.datasets import ColumnCorpus\nfrom nltk import sent_tokenize, word_tokenize\nfrom torch.utils.data.dataset import random_split\n\nfrom category import Categories, BaseWordCategories, BaseCharCategories\nfrom dataset.processor import character_tokenizer\n\nlog = logging.getLogger(\"truecaser\")\n\n\nclass TruecaseDataset(FlairDataset):\n def __init__(\n self,\n dataset: FlairDataset,\n lower_text=True,\n categories: Categories = BaseCharCategories()\n ):\n self.dataset = dataset\n self.lower_text = lower_text\n self.use_char_tokenizer = isinstance(categories, BaseCharCategories)\n self.categories = categories\n\n def is_in_memory(self) -> bool:\n return False\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index: int = 0) -> Sentence:\n sentence: Sentence = self.dataset[index]\n new_sentence: Sentence\n if self.use_char_tokenizer:\n new_sentence = Sentence(text=sentence.to_plain_string(), use_tokenizer=character_tokenizer)\n else:\n new_sentence = Sentence()\n new_sentence.tokens = sentence.tokens.copy()\n for token in new_sentence.tokens:\n token.add_tag(\n 'case', self.categories.encode(token.text)\n )\n if self.lower_text:\n token.text = token.text.lower()\n return new_sentence\n\n\nclass LineCorpus(Corpus):\n def __init__(\n self,\n data_folder: Union[str, Path],\n column_format: Dict[int, str],\n train_file=None,\n test_file=None,\n dev_file=None,\n tag_to_bioes=None,\n comment_symbol: str = None,\n in_memory: bool = True,\n encoding: str = \"utf-8\",\n document_separator_token: str = None,\n ):\n \"\"\"\n Instantiates a Corpus from CoNLL column-formatted task data such as CoNLL03 or CoNLL2000.\n\n :param data_folder: base folder with the task data\n :param column_format: a map specifying the column format\n :param train_file: the name of the train file\n :param test_file: the name of the test file\n :param dev_file: the name of the dev file, if None, dev data is sampled from train\n :param tag_to_bioes: whether to convert to BIOES tagging scheme\n :param comment_symbol: if set, lines that begin with this symbol are treated as comments\n :param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads\n :param document_separator_token: If provided, multiple sentences are read into one object. Provide the string token\n that indicates that a new document begins\n :return: a Corpus with annotated train, dev and test data\n \"\"\"\n\n if type(data_folder) == str:\n data_folder: Path = Path(data_folder)\n\n if train_file is not None:\n train_file = data_folder / train_file\n if test_file is not None:\n test_file = data_folder / test_file\n if dev_file is not None:\n dev_file = data_folder / dev_file\n\n # automatically identify train / test / dev files\n if train_file is None:\n for file in data_folder.iterdir():\n file_name = file.name\n if file_name.endswith(\".gz\"):\n continue\n if \"train\" in file_name and not \"54019\" in file_name:\n train_file = file\n if \"dev\" in file_name:\n dev_file = file\n if \"testa\" in file_name:\n dev_file = file\n if \"testb\" in file_name:\n test_file = file\n\n # if no test file is found, take any file with 'test' in name\n if test_file is None:\n for file in data_folder.iterdir():\n file_name = file.name\n if file_name.endswith(\".gz\"):\n continue\n if \"test\" in file_name:\n test_file = file\n\n log.info(\"Reading data from {}\".format(data_folder))\n log.info(\"Train: {}\".format(train_file))\n log.info(\"Dev: {}\".format(dev_file))\n log.info(\"Test: {}\".format(test_file))\n\n # get train data\n train = LineDataset(\n train_file,\n column_format,\n tag_to_bioes,\n encoding=encoding,\n comment_symbol=comment_symbol,\n in_memory=in_memory,\n document_separator_token=document_separator_token,\n )\n\n # read in test file if exists, otherwise sample 10% of train data as test dataset\n if test_file is not None:\n test = LineDataset(\n test_file,\n column_format,\n tag_to_bioes,\n encoding=encoding,\n comment_symbol=comment_symbol,\n in_memory=in_memory,\n document_separator_token=document_separator_token,\n )\n else:\n train_length = len(train)\n test_size: int = round(train_length / 10)\n splits = random_split(train, [train_length - test_size, test_size])\n train = splits[0]\n test = splits[1]\n\n # read in dev file if exists, otherwise sample 10% of train data as dev dataset\n if dev_file is not None:\n dev = LineDataset(\n dev_file,\n column_format,\n tag_to_bioes,\n encoding=encoding,\n comment_symbol=comment_symbol,\n in_memory=in_memory,\n document_separator_token=document_separator_token,\n )\n else:\n train_length = len(train)\n dev_size: int = round(train_length / 10)\n splits = random_split(train, [train_length - dev_size, dev_size])\n train = splits[0]\n dev = splits[1]\n\n super(LineCorpus, self).__init__(train, dev, test, name=data_folder.name)\n\n\nclass LineDataset(FlairDataset):\n def __init__(\n self,\n path_to_line_file: Path,\n column_name_map: Dict[int, str],\n tag_to_bioes: str = None,\n comment_symbol: str = None,\n in_memory: bool = True,\n document_separator_token: str = None,\n encoding: str = \"utf-8\",\n categories: Categories = BaseWordCategories()\n ):\n \"\"\"\n Instantiates a line dataset\n\n :param path_to_line_file: path to the file with the line-formatted data\n :param column_name_map: a map specifying the column format\n :param tag_to_bioes: whether to convert to BIOES tagging scheme\n :param comment_symbol: if set, lines that begin with this symbol are treated as comments\n :param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads\n :param document_separator_token: If provided, multiple sentences are read into one object. Provide the string token\n that indicates that a new document begins\n :param categories token categories\n \"\"\"\n self.categories = categories\n assert path_to_line_file.exists()\n self.path_to_column_file = path_to_line_file\n self.tag_to_bioes = tag_to_bioes\n self.column_name_map = column_name_map\n self.comment_symbol = comment_symbol\n self.document_separator_token = document_separator_token\n\n # store either Sentence objects in memory, or only file offsets\n self.in_memory = in_memory\n if self.in_memory:\n self.sentences: List[Sentence] = []\n else:\n self.indices: List[int] = []\n\n self.total_sentence_count: int = 0\n\n # most data sets have the token text in the first column, if not, pass 'text' as column\n self.text_column: int = 0\n for column in self.column_name_map:\n if column_name_map[column] == \"text\":\n self.text_column = column\n\n # determine encoding of text file\n self.encoding = encoding\n\n sentence: Sentence = Sentence()\n with open(str(self.path_to_column_file), encoding=self.encoding) as f:\n\n line = f.readline()\n position = 0\n\n while line:\n\n if self.comment_symbol is not None and line.startswith(comment_symbol):\n line = f.readline()\n continue\n\n # if self.__line_completes_sentence(line):\n #\n # if len(sentence) > 0:\n #\n # sentence.infer_space_after()\n # if self.in_memory:\n # if self.tag_to_bioes is not None:\n # sentence.convert_tag_scheme(\n # tag_type=self.tag_to_bioes, target_scheme=\"iobes\"\n # )\n # self.sentences.append(sentence)\n # else:\n # self.indices.append(position)\n # position = f.tell()\n # self.total_sentence_count += 1\n # sentence: Sentence = Sentence()\n #\n # else:\n # fields: List[str] = re.split(\"\\s+\", line)\n # token = Token(fields[self.text_column])\n # for column in column_name_map:\n # if len(fields) > column:\n # if column != self.text_column:\n # token.add_tag(\n # self.column_name_map[column], fields[column]\n # )\n #\n # if not line.isspace():\n # sentence.add_token(token)\n for sent in sent_tokenize(line):\n sentence = Sentence()\n for tok in word_tokenize(sent):\n token = Token(tok.lower())\n token.add_tag(\n 'case', self.categories.encode(tok)\n )\n sentence.add_token(token)\n self.sentences.append(sentence)\n\n line = f.readline()\n\n # if len(sentence.tokens) > 0:\n # sentence.infer_space_after()\n # if self.in_memory:\n # self.sentences.append(sentence)\n # else:\n # self.indices.append(position)\n # self.total_sentence_count += 1\n\n def __line_completes_sentence(self, line: str) -> bool:\n sentence_completed = line.isspace()\n if self.document_separator_token:\n sentence_completed = False\n fields: List[str] = re.split(\"\\s+\", line)\n if len(fields) >= self.text_column:\n if fields[self.text_column] == self.document_separator_token:\n sentence_completed = True\n return sentence_completed\n\n def is_in_memory(self) -> bool:\n return self.in_memory\n\n def __len__(self):\n return self.total_sentence_count\n\n def __getitem__(self, index: int = 0) -> Sentence:\n\n if self.in_memory:\n sentence = self.sentences[index]\n\n # else:\n # with open(str(self.path_to_column_file), encoding=self.encoding) as file:\n # file.seek(self.indices[index])\n # line = file.readline()\n # sentence: Sentence = Sentence()\n # while line:\n # if self.comment_symbol is not None and line.startswith(\n # self.comment_symbol\n # ):\n # line = file.readline()\n # continue\n #\n # if self.__line_completes_sentence(line):\n # if len(sentence) > 0:\n # sentence.infer_space_after()\n # if self.tag_to_bioes is not None:\n # sentence.convert_tag_scheme(\n # tag_type=self.tag_to_bioes, target_scheme=\"iobes\"\n # )\n # return sentence\n #\n # else:\n # fields: List[str] = re.split(\"\\s+\", line)\n # token = Token(fields[self.text_column])\n # for column in self.column_name_map:\n # if len(fields) > column:\n # if column != self.text_column:\n # token.add_tag(\n # self.column_name_map[column], fields[column]\n # )\n #\n # if not line.isspace():\n # sentence.add_token(token)\n #\n # line = file.readline()\n return sentence\n\n\nclass Conll_2003_Trucase(LineCorpus):\n def __init__(\n self,\n base_path: Union[str, Path] = None,\n tag_to_bioes: str = \"ner\",\n in_memory: bool = True,\n document_as_sequence: bool = False,\n ):\n \"\"\"\n :param base_path: Path to corpus on your machine\n :param tag_to_bioes: NER by default, need not be changed, but you could also select 'pos' or 'np' to predict\n POS tags or chunks respectively\n :param in_memory: If True, keeps dataset in memory giving speedups in training.\n :param document_as_sequence: If True, all sentences of a document are read into a single Sentence object\n \"\"\"\n if type(base_path) == str:\n base_path: Path = Path(base_path)\n\n # column format\n columns = {0: \"text\", 1: \"pos\", 2: \"np\", 3: \"ner\"}\n\n # this dataset name\n dataset_name = self.__class__.__name__.lower()\n\n # default dataset folder is the cache root\n if not base_path:\n base_path = Path(flair.cache_root) / \"datasets\"\n data_folder = base_path / dataset_name\n\n # check if data there\n if not data_folder.exists():\n log.warning(\"-\" * 100)\n log.warning(f'ACHTUNG: dataset not found at \"{data_folder}\".')\n log.warning(\"-\" * 100)\n\n super(Conll_2003_Trucase, self).__init__(\n data_folder,\n columns,\n tag_to_bioes=tag_to_bioes,\n in_memory=in_memory,\n document_separator_token=None if not document_as_sequence else \"-DOCSTART-\",\n test_file='test_docs.txt',\n train_file='train_docs.txt',\n dev_file='valid_docs.txt',\n )\n\n\nclass Wiki_Data(LineCorpus):\n def __init__(\n self,\n base_path: Union[str, Path] = None,\n tag_to_bioes: str = \"ner\",\n in_memory: bool = True,\n document_as_sequence: bool = False,\n ):\n \"\"\"\n :param base_path: Path to the CoNLL-03 corpus on your machine\n :param tag_to_bioes: NER by default, need not be changed, but you could also select 'pos' or 'np' to predict\n POS tags or chunks respectively\n :param in_memory: If True, keeps dataset in memory giving speedups in training.\n :param document_as_sequence: If True, all sentences of a document are read into a single Sentence object\n \"\"\"\n if type(base_path) == str:\n base_path: Path = Path(base_path)\n\n # column format\n columns = {0: \"text\", 1: \"pos\", 2: \"np\", 3: \"ner\"}\n\n # this dataset name\n dataset_name = self.__class__.__name__.lower()\n\n # default dataset folder is the cache root\n if not base_path:\n base_path = Path(flair.cache_root) / \"datasets\"\n data_folder = base_path / dataset_name\n\n # check if data there\n if not data_folder.exists():\n log.warning(\"-\" * 100)\n log.warning(f'ACHTUNG:dataset not found at \"{data_folder}\".')\n log.warning(\"-\" * 100)\n\n super(Wiki_Data, self).__init__(\n data_folder,\n columns,\n tag_to_bioes=tag_to_bioes,\n in_memory=in_memory,\n document_separator_token=None if not document_as_sequence else \"-DOCSTART-\",\n test_file='test.txt',\n train_file='train.txt',\n dev_file='dev.txt',\n )\n\n\nclass WIKINER_ENGLISH(ColumnCorpus):\n def __init__(\n self,\n base_path: Union[str, Path] = None,\n tag_to_bioes: str = \"ner\",\n in_memory: bool = False,\n ):\n if type(base_path) == str:\n base_path: Path = Path(base_path)\n\n # column format\n columns = {0: \"text\", 1: \"pos\", 2: \"ner\", 3: \"case\"}\n\n # this dataset name\n dataset_name = self.__class__.__name__.lower()\n\n # default dataset folder is the cache root\n if not base_path:\n base_path = Path(flair.cache_root) / \"datasets\"\n data_folder = base_path / dataset_name\n\n # download data if necessary\n #_download_wikiner(\"en\", dataset_name)\n\n super(WIKINER_ENGLISH, self).__init__(\n data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory\n )\n","repo_name":"malagus/truecaser-mt","sub_path":"dataset/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":17596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29465788671","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Problem Statement: Loan Approval Prediction Problem\n# \n\n# In[225]:\n\n\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nfrom sklearn.ensemble import AdaBoostClassifier , GradientBoostingClassifier,RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier \n\nfrom sklearn.metrics import classification_report , accuracy_score , confusion_matrix\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import cross_val_score\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[226]:\n\n\ndata = pd.read_csv(\"loan_prediction.csv\")\ndata.head(5)\n\n\n# In[227]:\n\n\ndata.shape\n\n\n# In[228]:\n\n\ndata.dtypes\n\n\n# In[229]:\n\n\nsns.countplot(x=\"Gender\",hue=\"Loan_Status\",data=data)\n\n\n# In[230]:\n\n\nsns.countplot(x=\"Married\",hue=\"Loan_Status\",data=data)\n\n\n# In[231]:\n\n\ncorrelation_mat = data.corr()\n\n\n# In[232]:\n\n\nsns.heatmap(correlation_mat,annot=True,linewidths=.5,cmap=\"YlGnBu\")\n\n\n# ### There is a positive correlation between ApplicantIncome and LoanAmount, CoapplicantIncome and LoanAmount.\n\n# In[233]:\n\n\nsns.pairplot(data)\nplt.show()\n\n\n# In[234]:\n\n\ndata.describe()\n\n\n# In[235]:\n\n\ndata.info()\n\n\n# In[236]:\n\n\ndata.isnull().sum()\n\n\n# In[237]:\n\n\nplt.figure(figsize=(10,6))\nsns.heatmap(data.isnull(),yticklabels=False)\n\n\n# Prepare data for model training i.e. removing ouliers , filling null values \n\n# In[238]:\n\n\nprint(data[\"Gender\"].value_counts())\nprint(data[\"Married\"].value_counts())\nprint(data[\"Self_Employed\"].value_counts())\nprint(data[\"Dependents\"].value_counts())\nprint(data[\"Credit_History\"].value_counts())\nprint(data[\"Loan_Amount_Term\"].value_counts())\n\n\n# In[239]:\n\n\ndata[\"Gender\"].fillna(data[\"Gender\"].mode()[0],inplace=True)\ndata[\"Married\"].fillna(data[\"Married\"].mode()[0],inplace=True)\ndata[\"Self_Employed\"].fillna(data[\"Self_Employed\"].mode()[0],inplace=True)\ndata[\"Loan_Amount_Term\"].fillna(data[\"Loan_Amount_Term\"].mode()[0],inplace=True)\ndata[\"Dependents\"].fillna(data[\"Dependents\"].mode()[0],inplace=True)\ndata[\"Credit_History\"].fillna(data[\"Credit_History\"].mode()[0],inplace=True)\n\ndata[\"Dependents\"] = data[\"Dependents\"].replace('3+',int(3))\ndata[\"Dependents\"] = data[\"Dependents\"].replace('1',int(1))\ndata[\"Dependents\"] = data[\"Dependents\"].replace('2',int(2))\ndata[\"Dependents\"] = data[\"Dependents\"].replace('0',int(0))\n\ndata[\"LoanAmount\"].fillna(data[\"LoanAmount\"].median(),inplace=True)\n\nprint(data.isnull().sum())\n\nplt.figure(figsize=(10,6))\nsns.heatmap(data.isnull())\n\n\n# In[240]:\n\n\ndata.head(5)\n\n\n# In[241]:\n\n\ndata[\"Gender\"] = le.fit_transform(data[\"Gender\"])\ndata[\"Married\"] = le.fit_transform(data[\"Married\"])\ndata[\"Education\"] = le.fit_transform(data[\"Education\"])\ndata[\"Self_Employed\"] = le.fit_transform(data[\"Self_Employed\"])\ndata[\"Property_Area\"] = le.fit_transform(data[\"Property_Area\"])\ndata[\"Loan_Status\"] = le.fit_transform(data[\"Loan_Status\"])\n\ndata.head(5)\n\n\n# In[242]:\n\n\nX = data.drop([\"Loan_Status\",\"Loan_ID\"],axis=1)\ny = data[\"Loan_Status\"]\n\n\n# In[243]:\n\n\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)\n\n\n# ## Logistic Regression\n\n# In[244]:\n\n\nmodel=LogisticRegression(solver=\"liblinear\")\n\n\n# In[245]:\n\n\nmodel.fit(X_train,y_train)\n\n\n# In[246]:\n\n\nmodel.score(X_train,y_train)\n\n\n# In[247]:\n\n\nmodel.score(X_test,y_test)\n\n\n# ## Decision Tree\n\n# In[248]:\n\n\ndtree=DecisionTreeClassifier(criterion=\"gini\")\ndtree.fit(X_train,y_train)\n\n\n# In[249]:\n\n\ndtree.score(X_train,y_train)\n\n\n# In[250]:\n\n\ndtree.score(X_test,y_test)\n\n\n# In[251]:\n\n\ndTreeR = DecisionTreeClassifier(criterion = 'gini', max_depth = 3, random_state=0)\ndTreeR.fit(X_train, y_train)\nprint(dTreeR.score(X_train, y_train))\n\n\n# In[252]:\n\n\ny_predict = dTreeR.predict(X_test)\n\n\n# In[253]:\n\n\naccuracy_score(y_test,y_predict)\n\n\n# In[254]:\n\n\nprint(dTreeR.score(X_test, y_test))\n\n\n# In[255]:\n\n\nfrom sklearn import metrics\n\n\n# In[256]:\n\n\ncm=metrics.confusion_matrix(y_test, y_predict,labels=[0, 1])\n\ndf_cm = pd.DataFrame(cm, index = [i for i in [\"No\",\"Yes\"]],\n columns = [i for i in [\"No\",\"Yes\"]])\nplt.figure(figsize = (7,5))\nsns.heatmap(df_cm, annot=True ,fmt='g')\n\n\n# ## Bagging Classifier\n\n# In[257]:\n\n\nfrom sklearn.ensemble import BaggingClassifier\nbgcl = BaggingClassifier( n_estimators=150,base_estimator=dTreeR,random_state=0)\nbgcl = bgcl.fit(X_train,y_train)\ny_predict = bgcl.predict(X_test)\nprint(bgcl.score(X_test,y_test))\n\n\n# ## Confusion_Matrix\n\n# In[258]:\n\n\nfrom sklearn import metrics\ncm=metrics.confusion_matrix(y_test, y_predict,labels=[0, 1])\n\ndf_cm = pd.DataFrame(cm, index = [i for i in [\"No\",\"Yes\"]],\n columns = [i for i in [\"No\",\"Yes\"]])\nplt.figure(figsize = (7,5))\nsns.heatmap(df_cm, annot=True ,fmt='g')\n\n\n# ## AdaBoost Classifier\n\n# In[259]:\n\n\nfrom sklearn.ensemble import AdaBoostClassifier\nabcl = AdaBoostClassifier(n_estimators = 120,random_state=0)\nabcl = abcl.fit(X_train, y_train)\ny_predict = abcl.predict(X_test)\nprint(abcl.score(X_test, y_test))\n\n\n# ## GradientBoosting Classifier\n\n# In[260]:\n\n\nfrom sklearn.ensemble import GradientBoostingClassifier\ngbcl = GradientBoostingClassifier(n_estimators = 200,random_state=0)\ngbcl = gbcl.fit(X_train, y_train)\ny_predict = gbcl.predict(X_test)\nprint(gbcl.score(X_test, y_test))\n\n\n# In[261]:\n\n\ncm=metrics.confusion_matrix(y_test, y_predict,labels=[0, 1])\n\ndf_cm = pd.DataFrame(cm, index = [i for i in [\"No\",\"Yes\"]],\n columns = [i for i in [\"No\",\"Yes\"]])\nplt.figure(figsize = (7,5))\nsns.heatmap(df_cm, annot=True ,fmt='g')\n\n\n# ## RandomForest Classifier\n\n# In[262]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\nrfcl = RandomForestClassifier(n_estimators = 160, random_state=0,max_features=3)\nrfcl = rfcl.fit(X_train, y_train)\n\n\n# In[263]:\n\n\ny_predict = rfcl.predict(X_test)\nprint(rfcl.score(X_test, y_test))\n\n\n# In[264]:\n\n\ncm=metrics.confusion_matrix(y_test, y_predict,labels=[0, 1])\n\ndf_cm = pd.DataFrame(cm, index = [i for i in [\"No\",\"Yes\"]],\n columns = [i for i in [\"No\",\"Yes\"]])\nplt.figure(figsize = (7,5))\nsns.heatmap(df_cm, annot=True ,fmt='g')\n\n\n# ## Feature Importance\n\n# In[265]:\n\n\nimportances = pd.Series(rfcl.feature_importances_, index=X.columns)\n\nimportances.plot(kind='barh', figsize=(12,8))\n\n\n# In[266]:\n\n\nX = train.drop('Loan_Status', 1)\ny = train.Loan_Status\n\n\n# ## Logistic Regression\n\n# In[267]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=42)\nregressor = LogisticRegression()\n\n\n# In[268]:\n\n\nregressor.fit(X, y)\ny_pred=regressor.predict(x_test)\n\naccuracy_score(y_test,y_pred)\n\n\n# ## RandomForest Classifier\n\n# In[269]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\nrfcl = RandomForestClassifier(n_estimators = 160, random_state=0,max_features=3)\nrfcl = rfcl.fit(X_train, y_train)\n\n\n# In[270]:\n\n\ny_predict = rfcl.predict(X_test)\naccuracy_score(y_test,y_predict)\n\n\n# ## GradientBoosting Classifier\n\n# In[271]:\n\n\nfrom sklearn.ensemble import GradientBoostingClassifier\ngbcl = GradientBoostingClassifier(n_estimators = 200,random_state=0)\ngbcl = gbcl.fit(X_train, y_train)\n\n\n# In[272]:\n\n\ny_predict1 = gbcl.predict(X_test)\naccuracy_score(y_test,y_predict1)\n\n\n# ## AdaBoost Classifier\n\n# In[273]:\n\n\nfrom sklearn.ensemble import AdaBoostClassifier\nabcl = AdaBoostClassifier(n_estimators = 120,random_state=0)\nabcl = abcl.fit(X_train, y_train)\n\n\n# In[274]:\n\n\ny_predict2 = abcl.predict(X_test)\naccuracy_score(y_test,y_predict2)\n\n\n# ## Bagging Classifier\n\n# In[275]:\n\n\nfrom sklearn.ensemble import BaggingClassifier\nbgcl = BaggingClassifier( n_estimators=150,base_estimator=dTreeR,random_state=0)\nbgcl = bgcl.fit(X_train,y_train)\n\n\n# In[276]:\n\n\ny_predict3 = bgcl.predict(X_test)\naccuracy_score(y_test,y_predict3)\n\n\n# ## DecisionTree Classifier\n\n# In[277]:\n\n\ndtree=DecisionTreeClassifier(criterion=\"gini\")\ndtree.fit(X_train,y_train)\n\n\n# In[278]:\n\n\ny_predict4 = dtree.predict(X_test)\n\n\n# In[279]:\n\n\naccuracy_score(y_test,y_predict4)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"RithikaRajendran/Loan_approval_prediction","sub_path":"loan_approval_prediction-master (2)/loan_approval_prediction-master/My_Project (1).py","file_name":"My_Project (1).py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8399354819","text":"from django.utils.timezone import now\nfrom django.core.exceptions import ObjectDoesNotExist\nimport sys\nfrom django.db import IntegrityError\nfrom rest_framework import viewsets\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.response import Response\nfrom django.http import HttpResponse, JsonResponse\nfrom .serializers import *\nfrom .models import *\nfrom events.models import *\nfrom auth.permissions import IsAuthenticated\nfrom common.pagination import CustomPagination\n\nclass ResourceViewset(viewsets.ModelViewSet):\n queryset = Resource.objects.all()\n serializer_class = ResourceSerializer\n pagination_class = CustomPagination\n permission_classes =[IsAuthenticated]\n filter_backends = [DjangoFilterBackend]\n filterset_fields = ['name']\n ordering_fields = ['name']\n search_fields = ['name']\n\n def create(self, request, *args, **kwargs):\n data = request.data\n print(data)\n try:\n if data == False:\n return HttpResponse(status=400)\n else:\n resource = Resource.objects.create(\n name=data['name'],\n )\n serializer = ResourceSerializer(resource, data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n event = Event( \n timestamp=now(),\n action='Resource Created',\n resource=Resource.objects.get(name=data['name'])\n )\n event.save()\n print(event)\n return Response(serializer.data)\n except IntegrityError:\n return HttpResponse(status=400)\n\n def partial_update(self, request, pk, *args, **kwargs):\n data = request.data\n try:\n resource = Resource.objects.get(id=pk)\n print(resource)\n serializer = ResourceSerializer(resource,data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n event = Event( \n timestamp=now(),\n action='Resource Updated',\n resource=Resource.objects.get(pk=resource.id)\n )\n event.save()\n print(event)\n return Response(serializer.data)\n except ObjectDoesNotExist:\n return JsonResponse({\"error\": 'Resource Does Not Exist'}) \n except IntegrityError: \n return HttpResponse(status=400)\n\n \n\n def destroy(self, request, pk, *args, **kwargs):\n try:\n instance = self.get_object()\n event = Event( \n timestamp=now(),\n action='Resource Deleted',\n resource=Resource.objects.get(name=instance.name)\n )\n self.perform_destroy(instance)\n print(event)\n return JsonResponse({\"success\": f'Object {pk} Deleted'})\n except ObjectDoesNotExist:\n return JsonResponse({\"error\": 'Object Does Not Exist'}) \n except:\n print(\"Unexpected errors:\", type(sys.exc_info()), sys.exc_info())\n return JsonResponse({\"error\": str(sys.exc_info())})\n","repo_name":"KingBoolean89/Bopis_poc","sub_path":"RBAC/resources/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38096626772","text":"import igCubedSphere\nimport vtk\nimport math\nimport numpy\n\nEPS = 1.e-14\n\ndef getLambdaTheta(x, y, z):\n # get lon/lat in radiants\n lam = math.atan2(y, x)\n rho = math.sqrt(x*x + y*y)\n the = math.atan2(z, rho)\n return lam, the\n\n\ndef getMinDLambda(lam0, lam1):\n # handle day line issue\n dlam = lam1 - lam0\n a = [abs(dlam + i*2*math.pi) for i in (-1, 0, 1)]\n index = numpy.argmin(a)\n return dlam + (index - 1)*2*math.pi\n\n\ndef integralDPhiDLambdaOverCosTheta(lama, lamb, thea, theb):\n dthe = theb - thea\n dlam = getMinDLambda(lama, lamb)\n if abs(dlam) > EPS:\n return dthe*(math.cos(lamb) - math.cos(lama))/dlam\n else:\n # lama == lamb:\n return -dthe*math.sin(lama)\n\n\ndef integralDPhiDThetaCosTheta(lama, lamb, thea, theb):\n dthe = theb - thea\n dlam = getMinDLambda(lama, lamb)\n la, lb = lama, lamb\n ta, tb = thea, theb\n return 0.5*dlam* (-2*dthe*math.cos(la)*math.cos(2*ta) + 2*dthe*math.cos(lb)*math.cos(2*tb) - \\\n 2*dlam*math.cos(ta)*math.sin(la)*math.sin(ta) + dlam*math.sin(lb)*math.sin(2*tb))/ \\\n (dlam**2 - 4*dthe**2)\n\ndef phi(lam, the):\n return math.cos(lam) * math.sin(the)\n\n\nn = 21\ncs = igCubedSphere.CubedSphere(n)\ngrid = cs.getUnstructuredGrid()\n\nnumCells = grid.GetNumberOfCells()\ndivData = numpy.zeros((numCells,), numpy.float64)\n\n# iterate over the cells\npoints = grid.GetPoints()\nfor cellId in range(numCells):\n cell = grid.GetCell(cellId)\n ptIds = cell.GetPointIds()\n numPoints = ptIds.GetNumberOfIds()\n #compute the closed loop integral\n divVal = 0.0\n for i0 in range(numPoints):\n\n i1 = (i0 + 1) % numPoints\n\n ptId0, ptId1 = ptIds.GetId(i0), ptIds.GetId(i1)\n\n x0, y0, z0 = points.GetPoint(ptId0)\n x1, y1, z1 = points.GetPoint(ptId1)\n\n # retreat by a tiny bit in order to capture multivalued jumps \n #x1 = x0 + (x1 - x0)*(1. - EPS)\n #y1 = y0 + (y1 - y0)*(1. - EPS)\n #z1 = z0 + (z1 - z0)*(1. - EPS)\n\n lama, thea = getLambdaTheta(x0, y0, z0)\n lamb, theb = getLambdaTheta(x1, y1, z1)\n\n divVal += integralDPhiDLambdaOverCosTheta(lama, lamb, thea, theb)\n divVal += integralDPhiDThetaCosTheta(lama, lamb, thea, theb)\n #divVal += 0.25*getMinDLambda(lama, lamb)\n\n divData[cellId] = divVal\n\n# attach cell centred values to the grid\ndataArray = vtk.vtkDoubleArray()\ndataArray.SetNumberOfComponents(1)\ndataArray.SetNumberOfTuples(numCells)\nsave = 1\ndataArray.SetVoidArray(divData, numCells, save)\n\ngrid.GetCellData().SetScalars(dataArray)\n\n# save/show\ncs.save('div2.vtk')\ncs.show()\n","repo_name":"pletzer/inugrid","sub_path":"py/igDivExample2.py","file_name":"igDivExample2.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37226898664","text":"import os\nimport random\nimport re\n\nTRAIN_DIR = r'training_set'\nTEST_DIR = r'testing_set'\nCHANCE = list(range(10))\n\n# create folders\nos.makedirs(\"{}/empty\".format(TRAIN_DIR),exist_ok=True)\nos.makedirs(\"{}/empty\".format(TEST_DIR),exist_ok=True)\nos.makedirs(\"{}/puppies\".format(TRAIN_DIR),exist_ok=True)\nos.makedirs(\"{}/puppies\".format(TEST_DIR),exist_ok=True)\n\nfor root, dirs, files in os.walk(\"Dataset\", topdown=False):\n for file in files:\n origin = \"{}/{}\".format(root, file)\n # 0.1 chance to be in test set, otherwise it will be used for training\n if random.choice(CHANCE) == 1:\n destination = TEST_DIR\n else:\n destination = TRAIN_DIR\n\n # depending on the root, it will either be placed in 'empty' or 'puppies'\n if re.search('empty', root) is not None:\n destination += \"/empty/{}\".format(file)\n else:\n destination += \"/puppies/{}\".format(file) \n\n # move file to new combined folder\n os.rename(origin, destination)\n\n # delete old folder\n os.rmdir(root)\n\n\n","repo_name":"runrunLauren/orange-puppy-project","sub_path":"scripts/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"24752873704","text":"# -*- coding: cp1252 -*-\n#!usr/bin/python3.2\n\n#programme d'écriture: stéganographie\n#Ouverture des fichiers\noriginal=input(\"Quel est le nom de l'image originale ?\")\nfinal=input(\"Quel est le nom de l'image à créer ?\")\ncode=input(\"Quel est le code à envoyer ?\")\n\nfile1=open(original+'.ppm','r')\nfile2=open(final+'.ppm','w')\n\n#On lit puis on copie les premières lignes\nligne1=str(file1.readline())\nprint(ligne1)\nfile2.write(ligne1)\n\nligne1=str(file1.readline())\nprint(ligne1)\nfile2.write(ligne1)\n\nligne1=str(file1.readline())\nprint(ligne1)\nfile2.write(ligne1)\n\nliste=ligne1.split()\nprint(liste)\npx=int(liste[0])*int(liste[1])*3 #si c'est une photo couleur\nprint(\"il y a \",px,\" pixels dans la photo\")\n\n\nligne1=str(file1.readline())\nfile2.write(ligne1)\nvmax=int(ligne1)\nprint(\"La valeur max d'un pixel vaut: \",vmax)\n\n#COPIE DES DONNEES IMAGE ET INSERTION CODE\n\nposition=int(px/len(code))-1\nfor j in range(len(code)):\n print(\"Etape n°\",j+1)\n\n ligne1=str(file1.readline())\n ajout=int(code[j])\n if ajout==0:\n ajout=10\n if int(ligne1)+10 bool:\n \"\"\"\n Visit the web application's given homepage url\n :return: bool\n \"\"\"\n try:\n self._browser.get(self.url)\n console_message = colored('[success] ', 'green',\n attrs=['bold']) + colored(\n '[Homepage OK!]', 'cyan')\n print(console_message)\n sleep(5)\n\n return True\n except Exception as e:\n console_message = colored('[failed] ', 'red', attrs=['bold']) + \\\n colored('Homepage not OK', 'cyan')\n print(console_message)\n print(colored(str(e), 'red'))\n\n return False\n\n def nav_bar_content_testing(self) -> bool:\n \"\"\"\n Test nav bar has all desired elements\n :return: bool\n \"\"\"\n try:\n nav_menu = self._browser.find_element_by_id(\"nav_menu\")\n nav_list = nav_menu.find_elements_by_tag_name('li')\n nav_element_matched = self.compare_nav_elements(nav_list)\n\n console_message = colored('[success] ', 'green',\n attrs=['bold']) + colored(\n '[Nav bar OK!]', 'cyan')\n print(console_message)\n\n return nav_element_matched\n except Exception as e:\n console_message = colored('[failed] ', 'red', attrs=['bold']) + \\\n colored('Nav bar is not OK!', 'cyan')\n print(console_message)\n print(colored(str(e), 'red'))\n self.close_browser()\n\n return False\n\n def compare_nav_elements(self, nav_list) -> bool:\n \"\"\"\n Compare given nav list and web page nav list\n :param nav_list: Fetched from web page\n :return: bool\n \"\"\"\n nav_elements_to_check = []\n for li in nav_list:\n nav_elements_to_check.append(li.text)\n\n nav_has_all_menu = all(element in self.nav_bar_elements\n for element in nav_elements_to_check)\n\n return nav_has_all_menu\n\n def click_nav_elements_on_fullscreen(self) -> bool:\n \"\"\"\n This will test nav bar element by clicking on their link while window\n size is in full screen\n :return: bool\n \"\"\"\n try:\n for element in FULL_WINDOW_NAV_ITEMS_X_PATH:\n self.click_on_element(element)\n sleep(2)\n console_message = colored('[success] ', 'green',\n attrs=['bold']) + colored(\n '[Nav elements link OK!]', 'cyan')\n print(console_message)\n\n return True\n except Exception as e:\n console_message = colored('[failed] ', 'red', attrs=['bold']) + \\\n colored('Nav elements link is not OK!', 'cyan')\n print(console_message)\n print(colored(str(e), 'red'))\n self.close_browser()\n\n return False\n\n def click_nav_elements_on_mobile_screen(self) -> bool:\n \"\"\"\n This will test nav bar element by clicking on their link after\n decreasing window size\n :return: bool\n \"\"\"\n try:\n self.set_window_screen()\n self.click_on_element(MOBILE_MENU_NAV_BAR_X_PATH)\n for element in MOBILE_SCREEN_NAV_ITEMS_X_PATH:\n self.click_on_element(element)\n console_message = colored('[success] ', 'green',\n attrs=['bold']) + colored(\n '[Nav elements link on full screen OK!]', 'cyan')\n print(console_message)\n\n return True\n except Exception as e:\n console_message = colored('[failed] ', 'red', attrs=['bold']) + \\\n colored('Nav elements link on mobile screen is '\n 'not OK!', 'cyan')\n print(console_message)\n print(colored(str(e), 'red'))\n self.close_browser()\n\n return False\n\n def click_on_element(self, x_path):\n \"\"\"\n Will click on given x_path of html element\n :param x_path: x_path of html element\n \"\"\"\n element = self._browser.find_element_by_xpath(x_path)\n element.click()\n sleep(1)\n\n def set_window_screen(self) -> bool:\n \"\"\"\n This method is resize the browser window\n :return: bool\n \"\"\"\n try:\n sleep(2)\n self._browser.set_window_size(768, 1024)\n console_message = colored('[success] ', 'green',\n attrs=['bold']) + colored(\n '[Custom window size OK!]', 'cyan')\n print(console_message)\n\n return True\n except Exception as e:\n console_message = colored('[failed] ', 'red', attrs=['bold']) + \\\n colored('Custom window size is not OK!', 'cyan')\n print(console_message)\n print(colored(str(e), 'red'))\n self.close_browser()\n\n return False\n\n def close_browser(self):\n \"\"\"\n Close the browser\n \"\"\"\n self._browser.quit()\n","repo_name":"zim0101/quiz_app_automation_test_with_python_selenium","sub_path":"homepage/homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3295862609","text":"from core.api.serializers import BaseSeralizer\nfrom empresas.domain.models import Empresas\n\n\nclass EmpresasSerializer(BaseSeralizer):\n class Meta:\n model = Empresas\n fields = [\n 'id',\n 'cnpj',\n 'logradouro',\n 'cidade',\n 'pais',\n 'ativo',\n ]\n read_only_fields = ['id', 'ativo', ]\n","repo_name":"jackteruya/gerenciador_rh","sub_path":"empresas/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3241347136","text":"\"\"\"\nThis module contains utility functions.\n\nauthor: Chinedu Ezeofor\n\"\"\"\n\n# Standard imports\nimport numpy as np\nimport pandas as pd\n\n# Sklearn\nfrom sklearn import metrics\nfrom sklearn.pipeline import Pipeline\n\nfrom pydantic import BaseModel\nimport mlflow\n\n# Built-in\nimport typing as tp\nimport warnings\n\n\ndef eval_metrics(actual: np.ndarray, pred: np.ndarray) -> tp.Tuple:\n \"\"\"This is used to evaluate the performance of the model.\"\"\"\n rmse = metrics.mean_squared_error(actual, pred, squared=False)\n mse = metrics.mean_squared_error(actual, pred, squared=True)\n mae = metrics.mean_absolute_error(actual, pred)\n r2 = metrics.r2_score(actual, pred)\n\n return (rmse, mse, mae, r2)\n\n\nEstimator = tp.Union[Pipeline, tp.Any] # Alias for estimator\n\n\nclass Experiment(BaseModel):\n \"\"\"This contains the experiment meta data\"\"\"\n\n experiment_name: str\n run_name: str\n model_name: str\n tracking_uri: str\n\n\nclass TrainingData(BaseModel):\n \"\"\"This is the training data.\"\"\"\n\n X_train: tp.Union[pd.DataFrame, np.ndarray]\n X_validate: tp.Union[pd.DataFrame, np.ndarray]\n y_train: tp.Union[pd.Series, np.ndarray]\n y_validate: tp.Union[pd.Series, np.ndarray]\n\n class Config:\n arbitrary_types_allowed = True\n\n\ndef run_experiment(\n *, experiment: Experiment, estimator: Estimator, training_data: TrainingData\n) -> None:\n \"\"\"This is used to track an MLFlow experiment.\n\n Params:\n -------\n experiment (Experiment): Experiment object which contains the experiment meta data.\n estimator (Estimator): Estimator object which contains the estimator meta data.\n training_data (TrainingData): Data used for training and validation.\n\n Returns:\n --------\n None\n \"\"\"\n\n from urllib.parse import urlparse\n import warnings\n import logging\n\n warnings.filterwarnings(\"ignore\") # Required\n\n delim = \"::\"\n format_ = f\"%(levelname)s {delim} %(asctime)s {delim} %(message)s\"\n\n logging.basicConfig(level=logging.INFO, format=format_)\n\n logger = logging.getLogger(__name__)\n\n # Config\n mlflow.set_tracking_uri(experiment.tracking_uri)\n mlflow.set_experiment(experiment.experiment_name)\n\n with mlflow.start_run(run_name=experiment.run_name):\n mlflow.sklearn.autolog()\n logger.info(f\"========= Training {experiment.model_name!r} =========\")\n estimator.fit(training_data.X_train, training_data.y_train)\n\n # Make predictions\n y_pred = estimator.predict(training_data.X_validate)\n\n (rmse, mse, mae, r2) = eval_metrics(\n actual=training_data.y_validate, pred=y_pred\n )\n print(f\" Model name: {experiment.model_name}\")\n print(f\" RMSE: {rmse}\")\n print(f\" MSE: {mse}\")\n print(f\" MAE: {mae}\")\n print(f\" R2: {r2}\")\n\n # Log params/metrics on MLFlow\n # I'm using autolog\n\n tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme\n\n if tracking_url_type_store != \"file\":\n\n # Register the model\n mlflow.sklearn.log_model(\n estimator, \"model\", registered_model_name=experiment.model_name\n )\n else:\n mlflow.sklearn.log_model(estimator, \"model\")\n logger.info(f\"========= Training {experiment.model_name!r} Done! =========\")\n ","repo_name":"chineidu/MLFlow_example","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"264515190","text":"#!/usr/bin/env python\nimport logging\nimport os\nfrom dataclasses import dataclass, field\nfrom io import BytesIO\nfrom typing import Any, BinaryIO, Dict, List, Tuple\n\nfrom binreader import BinaryReader\n\nfrom .package_parser import loads\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Package:\n\tpath: str\n\tparent_path: str\n\tdata: bytes\n\n\t@property\n\tdef content(self) -> Dict[str, Any]:\n\t\treturn loads(self.data.decode())\n\n\tdef get_full_content(self, packages_file: \"PackagesFile\") -> dict:\n\t\tif not self.parent_path:\n\t\t\treturn self.content\n\n\t\tif self.parent_path not in packages_file._packages:\n\t\t\tcontent = {}\n\t\telse:\n\t\t\tparent_package = packages_file._packages[self.parent_path]\n\t\t\tcontent = parent_package.get_full_content(packages_file)\n\t\tcontent.update(self.content)\n\t\treturn content\n\n\nclass PackagesFile:\n\tdef __init__(self, bin_file: BinaryIO) -> None:\n\t\tself._packages: Dict[str, Package] = {}\n\t\treader = BinaryReader(bin_file)\n\t\tself.hash = reader.read(29)\n\n\t\tdef read_length_prefixed_str() -> str:\n\t\t\tsz = reader.read_int32()\n\t\t\treturn reader.read_string(sz)\n\n\t\tself.structs: List[Tuple[str, int]] = []\n\t\tnum_structs = reader.read_int32()\n\t\tfor _ in range(num_structs):\n\t\t\tname = read_length_prefixed_str()\n\t\t\tunk = reader.read_int32()\n\t\t\tself.structs.append((name, unk))\n\n\t\tchunks: List[bytes] = []\n\t\tchunksize = reader.read_int32()\n\t\tchunk_reader = BinaryReader(BytesIO(reader.read(chunksize)))\n\t\tnum_chunks = reader.read_int32()\n\n\t\tfor i in range(num_chunks):\n\t\t\tchunks.append(chunk_reader.read_cstring())\n\n\t\tfor chunk in chunks:\n\t\t\tbase_path = read_length_prefixed_str()\n\t\t\tname = read_length_prefixed_str()\n\t\t\treader.read(5)\n\t\t\tparent_path = read_length_prefixed_str()\n\t\t\treader.read(4) # always 0\n\n\t\t\tpath = os.path.join(base_path, name)\n\t\t\tif parent_path:\n\t\t\t\tparent_path = os.path.join(base_path, parent_path)\n\n\t\t\tself._packages[path] = Package(path, parent_path, chunk)\n\n\tdef __getitem__(self, key: str) -> Dict[str, Any]:\n\t\treturn self._packages[key].get_full_content(self)\n\n\t@property\n\tdef packages(self):\n\t\treturn list(self._packages.values())\n","repo_name":"jleclanche/evoeng","sub_path":"evoeng/packages_extract.py","file_name":"packages_extract.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35718992128","text":"#-----------------------------------------------------------------------------#\n# #\n# This is a script which is designed to open the FITS files containing the #\n# smoothed CGPS data, and produce FITS files and images of local statistics #\n# calculated for the diagnostic quantities, e.g. the skewness of the #\n# polarisation gradient. This is performed for each final angular resolution #\n# that was used to smooth the data. The calculation of these quantities will #\n# occur in separate functions. The images will then be saved as FITS files in #\n# the same directory as the CGPS data. #\n# #\n# Author: Chris Herron #\n# Start Date: 25/8/2015 #\n# #\n#-----------------------------------------------------------------------------#\n\n# Import the various packages which are required for the proper functioning of\n# this script, scipy.stats for calculating statistical quantities\nimport numpy as np\nimport aplpy\nfrom astropy.io import fits\nfrom scipy import stats\n\n# Import utility functions\nfrom mat2FITS_Image import mat2FITS_Image\nfrom fits2aplpy import fits2aplpy\n\n# Import the function that will calculate all of the local statistics\n#from calc_local_stats import calc_local_stats # Cython version\n#from calc_local_stats_purepy import calc_local_stats # Python version\nfrom calc_sparse_stats import calc_sparse_stats # Sparse Python version\n\n# Create a string object which stores the directory of the CGPS data\ndata_loc = '/Users/chrisherron/Documents/PhD/CGPS_2015/'\n\n# Create a string that will be used to determine what quantity will be read \n# through the script and analysed, e.g. the polarisation gradient, or \n# polarisation intensity. FITS files for the chosen quantity will be loaded\n# by the script. Could be 'Polar_Grad' or 'Polar_Inten', for example\ndata_2_load = 'Polar_Grad'\n\n# Create a string that will be used to control what FITS files are used\n# to perform calculations, and that will be appended into the filename of \n# anything produced in this script. This is either 'high_lat' or 'plane'\nsave_append = 'plane_all_mask'\n\n# Specify the number of beams that should be across half the width of the\n# box used to calculate the statistics around each pixel\nnum_beams = 60\n\n# Create an array that specifies all of the final resolution values that were \n# used to create mosaics. This code will calculate quantities for each of the\n# resolutions given in this array\nfinal_res_array = np.array([75, 90, 105, 120, 135, 150, 165, 180, 195, 210,\\\n 225, 240, 255, 270, 285, 300, 315, 330, 345, 360, 375, 390, 405, 420, 450,\\\n 480, 510, 540, 570, 600, 630, 660, 720, 780, 840, 900, 960, 1020, 1080, 1140,\\\n 1200])\n# final_res_array = np.array([150])\n\n# Create a list that specifies all of the files for which we want to calculate\n# local statistics\ninput_files = [data_loc + '{}_{}_smoothed/'.format(data_2_load, save_append)\\\n+ '{}_{}_smooth2_{}.fits'.format(data_2_load, save_append, res) for res in final_res_array]\n\n# Create a list that specifies the filenames to use to save all of the \n# produced files\noutput_files = [data_loc + '{}_{}_smooth2_{}_beams{}'.format(data_2_load, save_append,\\\n res, num_beams) for res in final_res_array]\n\n# Create a list of strings, that will be used to control what local statistics\n# are calculated for the input data. For each statistic chosen, FITS files of\n# the locally calculated statistic will be produced for each final resolution\n# value, and saved in the same directory as the CGPS data.\n# Valid statistics include 'mean', 'stdev', skewness', 'kurtosis'\nstat_list = ['skewness']\n\n# Specify the size of each pixel in the CGPS survey, in degrees\npix_size_deg = 4.9999994 * np.power(10.0, -3.0)\n\n# Determine the half width of the box used to calculate the local skewness \n# around each pixel. This half width depends upon the angular resolution of\n# the image being considered, so that we always have a certain number of \n# beamwidths within the box. This is given as the largest integer that fits \n# the required number of beamwidths.\nbox_halfwidths = np.asarray([int(np.ceil(num_beams * res/(pix_size_deg * 3600)))\\\n for res in final_res_array])\n\n# Run the function that calculates the local statistics for each image. This\n# will calculate all statistics for each value of the final resolution, and then\n# save the resulting map as a FITS file.\ncalc_sparse_stats(input_files, output_files, stat_list, box_halfwidths)","repo_name":"ChrisTCH/phd_code","sub_path":"cgps_2015_local_stat_script.py","file_name":"cgps_2015_local_stat_script.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4066249086","text":"class ChatMenu():\n def choice(self):\n while 1:\n try:\n self.choice = int(input(\"press 1 to start\\npress 2 to stop\\n:>\"))\n except ValueError as err:\n print(\"This is not int, ERROR: {}.\".format(err))\n except (KeyboardInterrupt, SystemExit):\n raise\n if self.choice not in range(1, 3):\n print(\"not in range 1 - 2\")\n elif self.choice == 2:\n print(\"Exiting\")\n return self.choice\n break\n else:\n return self.choice\n\n","repo_name":"ppodbielski/myWork","sub_path":"communicator/version1/chat_menu.py","file_name":"chat_menu.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2739882779","text":"# import pandas as pd\nimport numpy as np\nimport random\nfrom math import ceil\n\nimport streamlit as st\n\nimport use_streamlit.Weather as Weather\n\nDEFAULT_CITY_NAME = 'London'\n\n# если api_key не введен, то выводится тект о том что api не доступно\napi_key = st.sidebar.text_input(label='Enter the api key:', value='')\n\n\ndef current(state: str, city: str, api_key: str):\n st.write('You selected:', state)\n data_request = Weather.get_current(city=city, api_key=api_key)\n # line 1\n st.subheader(data_request['weather']['group'])\n cols = st.columns(3)\n # # Придумать как сделать это автоматически !!!\n cols[0].image(\n image=data_request['weather']['icon'],\n caption='{}'.format(data_request['weather']['description']))\n cols[1].metric(\n label=data_request['temp']['description'],\n value='{values} {units}'.format(\n values=data_request['temp']['values'],\n units=data_request['temp']['units']),\n delta='{delta}{values} {units}'.format(\n delta='-' if data_request[\n 'temp_feels_like'][\n 'values'] < data_request['temp']['values'] else '+',\n values=data_request['temp_feels_like']['values'],\n units=data_request['temp_feels_like']['units']))\n\n cols[2].metric(\n label=data_request['pressure']['description'],\n value='{values} {units}'.format(\n values=data_request['pressure']['values'],\n units=data_request['pressure']['units']))\n\n # Убираю лишнее из результата\n k = [i for i in data_request.keys()\n if (not (i in ['pressure', 'temp', 'weather'])\n and data_request[i] is not None)]\n for i in range(ceil(len(k)/3)):\n keys = k[i*3:i*3+3]\n cols = st.columns(len(keys))\n for col, key in zip(cols, keys):\n col.metric(\n label=data_request[key]['description'],\n value='{values} {units}'.format(\n values=data_request[key]['values'],\n units=data_request[key]['units']))\n\n\ndef historycal(state: str, city: str, api_key: str):\n st.write('You selected:', state)\n data_test1 = [random.random() for i in range(50)]\n st.line_chart(data=data_test1)\n # columns1\n col1, col2 = st.columns(2)\n col1.metric(label='temp', value='{}'.format(np.mean(data_test1)))\n col2.metric(label='pressure', value='{}'.format(np.max(data_test1)))\n # 2\n data_test2 = [random.random() for i in range(50)]\n st.line_chart(data=data_test2)\n # columns1\n col1, col2 = st.columns(2)\n col1.metric(label='temp', value='{}'.format(np.mean(data_test2)))\n col2.metric(label='pressure', value='{}'.format(np.max(data_test2)))\n\n\ndef forecast(city: str, api_key: str):\n return Weather.get_current(city=city, api_key=api_key)\n\n\nif api_key == '':\n # Добавить описание того что это за app'ка\n st.text('Enter api key')\nelse:\n input_city = st.sidebar.text_input(\n label='Write the city whose data you want to see:',\n value=DEFAULT_CITY_NAME)\n\n st.title('Weather data')\n 'You selected:', input_city\n\n state = st.selectbox('What kind of data do you want to see??',\n ('Current', 'Historycal', 'Forecast'))\n if state == \"Current\":\n current(state=state, city=input_city, api_key=api_key)\n elif state == 'Historycal':\n historycal(state=state, city=input_city, api_key=api_key)\n elif state == \"Forecast\":\n forecast(city=input_city, api_key=api_key)\n","repo_name":"hinevics/weather-city","sub_path":"use_streamlit/weather-gui.py","file_name":"weather-gui.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12850129354","text":"\"\"\"\nANSI escape code utilities, see\nhttp://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf\n\"\"\"\n\n\ngraph_prefix = \"\\x1b[\"\ngraph_suffix = \"m\"\ncodes = {\n \"reset\": \"0\",\n \"bold\": \"1\",\n \"faint\": \"2\",\n \"italic\": \"3\",\n \"underline\": \"4\",\n \"blink\": \"5\",\n \"slow_blink\": \"5\",\n \"fast_blink\": \"6\",\n \"inverse\": \"7\",\n \"conceal\": \"8\",\n \"strike\": \"9\",\n \"primary_font\": \"10\",\n \"reset_font\": \"10\",\n \"font_0\": \"10\",\n \"font_1\": \"11\",\n \"font_2\": \"12\",\n \"font_3\": \"13\",\n \"font_4\": \"14\",\n \"font_5\": \"15\",\n \"font_6\": \"16\",\n \"font_7\": \"17\",\n \"font_8\": \"18\",\n \"font_9\": \"19\",\n \"fraktur\": \"20\",\n \"double_underline\": \"21\",\n \"end_bold\": \"21\",\n \"normal_intensity\": \"22\",\n \"end_italic\": \"23\",\n \"end_fraktur\": \"23\",\n \"end_underline\": \"24\", # single or double\n \"end_blink\": \"25\",\n \"end_inverse\": \"27\",\n \"end_conceal\": \"28\",\n \"end_strike\": \"29\",\n \"black\": \"30\",\n \"red\": \"31\",\n \"green\": \"32\",\n \"yellow\": \"33\",\n \"blue\": \"34\",\n \"magenta\": \"35\",\n \"cyan\": \"36\",\n \"white\": \"37\",\n \"extended\": \"38\",\n \"default\": \"39\",\n \"fg_black\": \"30\",\n \"fg_red\": \"31\",\n \"fg_green\": \"32\",\n \"fg_yellow\": \"33\",\n \"fg_blue\": \"34\",\n \"fg_magenta\": \"35\",\n \"fg_cyan\": \"36\",\n \"fg_white\": \"37\",\n \"fg_extended\": \"38\",\n \"fg_default\": \"39\",\n \"bg_black\": \"40\",\n \"bg_red\": \"41\",\n \"bg_green\": \"42\",\n \"bg_yellow\": \"44\",\n \"bg_blue\": \"44\",\n \"bg_magenta\": \"45\",\n \"bg_cyan\": \"46\",\n \"bg_white\": \"47\",\n \"bg_extended\": \"48\",\n \"bg_default\": \"49\",\n \"frame\": \"51\",\n \"encircle\": \"52\",\n \"overline\": \"53\",\n \"end_frame\": \"54\",\n \"end_encircle\": \"54\",\n \"end_overline\": \"55\",\n \"ideogram_underline\": \"60\",\n \"right_line\": \"60\",\n \"ideogram_double_underline\": \"61\",\n \"right_double_line\": \"61\",\n \"ideogram_overline\": \"62\",\n \"left_line\": \"62\",\n \"ideogram_double_overline\": \"63\",\n \"left_double_line\": \"63\",\n \"ideogram_stress\": \"64\",\n \"reset_ideogram\": \"65\",\n}\n\n\nclass TextFormat:\n \"\"\"\n ANSI Select Graphic Rendition (SGR) code escape sequence.\n \"\"\"\n\n def __init__(self, *attrs, **kwargs):\n \"\"\"\n :param attrs: are the attribute names of any format codes in `codes`\n\n :param kwargs: may contain\n\n `x`, an integer in the range [0-255] that selects the corresponding\n color from the extended ANSI 256 color space for foreground text\n\n `rgb`, an iterable of 3 integers in the range [0-255] that select the\n corresponding colors from the extended ANSI 256^3 color space for\n foreground text\n\n `bg_x`, an integer in the range [0-255] that selects the corresponding\n color from the extended ANSI 256 color space for background text\n\n `bg_rgb`, an iterable of 3 integers in the range [0-255] that select\n the corresponding colors from the extended ANSI 256^3 color space for\n background text\n\n `reset`, prepend reset SGR code to sequence (default `True`)\n\n Examples:\n\n .. code-block:: python\n\n red_underlined = TextFormat('red', 'underline')\n\n nuanced_text = TextFormat(x=29, bg_x=71)\n\n magenta_on_green = TextFormat('magenta', 'bg_green')\n print('{}Can you read this?{}'.format(magenta_on_green, TextFormat('reset')))\n \"\"\"\n self.codes = [codes[attr.lower()] for attr in attrs if isinstance(attr, str)]\n\n if kwargs.get(\"reset\", True):\n self.codes[:0] = [codes[\"reset\"]]\n\n def qualify_int(i):\n if isinstance(i, int):\n return i % 256 # set i to base element of its equivalence class\n\n def qualify_triple_int(t):\n if isinstance(t, (list, tuple)) and len(t) == 3:\n return qualify_int(t[0]), qualify_int(t[1]), qualify_int(t[2])\n\n if kwargs.get(\"x\", None) is not None:\n self.codes.extend((codes[\"extended\"], \"5\", qualify_int(kwargs[\"x\"])))\n elif kwargs.get(\"rgb\", None) is not None:\n self.codes.extend((codes[\"extended\"], \"2\"))\n self.codes.extend(*qualify_triple_int(kwargs[\"rgb\"]))\n\n if kwargs.get(\"bg_x\", None) is not None:\n self.codes.extend((codes[\"extended\"], \"5\", qualify_int(kwargs[\"bg_x\"])))\n elif kwargs.get(\"bg_rgb\", None) is not None:\n self.codes.extend((codes[\"extended\"], \"2\"))\n self.codes.extend(*qualify_triple_int(kwargs[\"bg_rgb\"]))\n\n self.sequence = \"{}{}{}\".format(\n graph_prefix, \";\".join(self.codes), graph_suffix\n )\n\n def __call__(self, text, reset=True):\n \"\"\"\n Format :param text: by prefixing `self.sequence` and suffixing the\n reset sequence if :param reset: is `True`.\n\n Examples:\n\n .. code-block:: python\n\n green_blink_text = TextFormat('blink', 'green')\n 'The answer is: {0}'.format(green_blink_text(42))\n \"\"\"\n end = TextFormat(\"reset\") if reset else \"\"\n return \"{}{}{}\".format(self.sequence, text, end)\n\n def __str__(self):\n return self.sequence\n\n def __repr__(self):\n return self.sequence\n","repo_name":"saltstack/salt","sub_path":"salt/utils/textformat.py","file_name":"textformat.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"37173071633","text":"# -*- coding: utf-8 -*-\n\n\nimport requests\nimport json\nimport time\nfrom pymodbus.client.sync import ModbusTcpClient\nfrom pymodbus.constants import Defaults\nclass API:\n # 1. constructor : gets call every time when create a new class\n # requirements for instantiation1. model, 2.type, 3.api, 4. address\n def __init__(self,**kwargs):\n # Initialized common attributes\n self.variables = kwargs\n self.debug = True\n\n\n def set_variable(self,k,v): # k=key, v=value\n self.variables[k] = v\n\n def get_variable(self,k):\n return self.variables.get(k, None) # default of get_variable is none\n\n # 2. Attributes from Attributes table\n\n '''\n Attributes:\n ------------------------------------------------------------------------------------------\n status GET,SET open/close airconditioner\n set_temperature GET,SET change set temperature\n current_temperature GET show current temperature\n mode GET,SET represents the operating mode\n set_humidity GET represents the target humidity\n ------------------------------------------------------------------------------------------\n\n '''\n # 3. Capabilites (methods) from Capabilities table\n '''\n API3 available methods:\n 1. getDeviceStatus() GET\n 2. setDeviceStatus() SET\n '''\n\n # ----------------------------------------------------------------------\n # getDeviceStatus(), getDeviceStatusJson(data), printDeviceStatus()\n def getDeviceStatus(self):\n\n self.set_variable('status', 'OFF')\n self.set_variable('current_temperature', '30')\n self.set_variable('set_temperature', '25')\n self.set_variable('set_humidity', '70')\n self.set_variable('mode', 'COLD')\n self.set_variable('fan', 'AUTO')\n self.set_variable('swing', 'silent')\n\n url = str(self.get_variable(\"url\"))\n\n Defaults.Parity = 'E'\n Defaults.Baudrate = 9600\n # read temp\n client = ModbusTcpClient(url, port=502)\n client.connect()\n print(client.connect())\n result = client.read_holding_registers(2000, 5, unit=1)\n\n status = (str(hex(result.registers[0])))\n statusraw = status[-1:]\n stemp = result.registers[1]\n client.close()\n\n if statusraw == \"1\":\n status = \"ON\"\n elif statusraw == \"0\":\n status = \"OFF\"\n else:\n status = \"ON\"\n\n set_temperature = int(stemp/10)\n\n if set_temperature > 30:\n set_temperature = 26\n set_humidity = '70'\n\n if set_temperature == '--':\n set_temperature = '20'\n\n if set_humidity == '--':\n set_humidity = '70'\n\n\n mode = '1'\n\n if mode == '1':\n strmode = 'COLD'\n if mode == '2':\n strmode = 'DEHUMDIFICATOR'\n if mode == '4':\n strmode = 'HOT'\n if mode == '0':\n strmode = 'FAN'\n\n\n\n fan = '5'\n\n if fan == '3':\n fan = '1'\n\n if fan == '4':\n fan = '2'\n\n if fan == '5':\n fan = '3'\n\n if fan == '6':\n fan = '4'\n\n if fan == '7':\n fan = '5'\n\n if fan == 'A':\n fan = 'AUTO'\n\n if fan == 'B':\n fan = 'SILENT'\n\n swing = '3'\n if swing == '0':\n swing = 'silent'\n\n if swing == '1':\n swing = 'vertical'\n\n if swing == '2':\n swing = 'horizontal'\n\n if swing == '3':\n swing = 'VH'\n\n self.set_variable('status', status)\n self.set_variable('current_temperature', (set_temperature))\n self.set_variable('set_temperature', set_temperature)\n self.set_variable('set_humidity', set_humidity)\n self.set_variable('mode', strmode)\n self.set_variable('fan', fan)\n self.set_variable('swing', swing)\n self.printDeviceStatus()\n\n\n def printDeviceStatus(self):\n\n # now we can access the contents of the JSON like any other Python object\n print(\" the current status is as follows:\")\n print(\" status = {}\".format(self.get_variable('STATUS')))\n print(\" current_temperature = {}\".format(self.get_variable('TEMPERATURE')))\n print(\" set_temperature = {}\".format(self.get_variable('SET_TEMPERATURE')))\n print(\" set_humidity = {}\".format(self.get_variable('SET_HUMIDITY')))\n print(\" mode = {}\".format(self.get_variable('MODE')))\n print(\" fan = {}\".format(self.get_variable('FAN_SPEED')))\n # print(\" swing = {}\".format(self.get_variable('swing')))\n print(\"---------------------------------------------\")\n\n # setDeviceStatus(postmsg), isPostmsgValid(postmsg), convertPostMsg(postmsg)\n def setDeviceStatus(self, postmsg):\n url = str(self.get_variable(\"url\"))\n # postmsg = str(postmsg)\n\n Defaults.Parity = 'E'\n Defaults.Baudrate = 9600\n # read temp\n\n for k, v in postmsg.items():\n if k == 'status':\n if (postmsg['status']) == \"ON\":\n\n # seton\n client = ModbusTcpClient(url, port=502)\n client.connect()\n print(client.connect())\n result = client.write_register(2000, 1101, unit=1) # open /close\n print(\"on\")\n client.close()\n\n\n elif (postmsg['status']) == \"OFF\":\n # set off\n client = ModbusTcpClient(url, port=502)\n client.connect()\n print(client.connect())\n result = client.write_register(2000, 1100, unit=1) # open /close\n print(\"off\")\n client.close()\n\n if k == 'mode':\n\n if (postmsg['mode']) == \"COOL\":\n print(\"mode\")\n\n\n elif (postmsg['mode']) == \"DEHUMDIFICATOR\":\n print(\"mode\")\n\n\n\n\n\n elif (postmsg['mode']) == \"FAN\":\n print(\"mode\")\n\n if k == 'fan':\n if (postmsg['fan']) == \"1\":\n print(\"fan\")\n\n elif (postmsg['fan']) == \"2\":\n print(\"fan\")\n\n elif (postmsg['fan']) == \"3\":\n print(\"fan\")\n\n elif (postmsg['fan']) == \"4\":\n print(\"fan\")\n\n elif (postmsg['fan']) == \"5\":\n print(\"fan\")\n\n elif (postmsg['fan']) == \"AUTO\":\n print(\"fan\")\n\n elif (postmsg['fan']) == \"SILENT\":\n print(\"fan\")\n\n if k == 'swing':\n if (postmsg['swing']) == \"ON\":\n print(\"fan\")\n\n elif (postmsg['swing']) == \"OFF\":\n print(\"fan\")\n\n if k == 'stemp':\n print ('start settemp')\n stemp = postmsg['stemp']\n client = ModbusTcpClient(url, port=502)\n client.connect()\n print(client.connect())\n settemp= (int(stemp)*10)\n result = client.write_register(2001, settemp, unit=1) # open /close\n print (\"settemp\")\n client.close()\n\n client.close()\n\n def isPostMsgValid(self, postmsg): # check validity of postmsg\n dataValidity = True\n # TODO algo to check whether postmsg is valid\n return dataValidity\n\n\n# This main method will not be executed when this class is used as a module\ndef main():\n # create an object with initialized data from DeviceDiscovery Agent\n # requirements for instantiation1. model, 2.type, 3.api, 4. address\n AC = API(model='daikin', type='AC', api='API', agent_id='ACAgent', url='192.168.10.239',\n port=502, parity='E', baudrate=9600, startregis=2006, startregisr=2012)\n\n # example>>>>>>>>>>>>>>>\n # AC.setDeviceStatus({\"status\": \"ON\",\"username\": \"hive5\"})\n # time.sleep(2)\n # AC.setDeviceStatus({'stemp':'22'})\n # AC.setDeviceStatus({\"status\": \"OFF\"})\n\n # AC.setDeviceStatus({'status': 'ON', 'mode': 'COLD', 'device': '1DAIK1200138'})\n # AC.setDeviceStatus({'status': 'ON', 'device': '1DAIK1200138'})\n # time.sleep(6)\n # time.sleep(3)\n AC.getDeviceStatus()\n # AC.setDeviceStatus({'stemp':'25'})\n # time.sleep(3)\n # AC.getDeviceStatus()\n # AC.setDeviceStatus({'swing':'ON','device': '1DAIK1200138'})\nif __name__ == \"__main__\": main()\n\n","repo_name":"Soulweed/Agent","sub_path":"DaikinAgent/daikinagent/extension/api_modbus.py","file_name":"api_modbus.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1551949189","text":"from django import forms\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\n\nfrom tournament.models import (\n OnlineTournamentRegistration,\n Tournament,\n TournamentApplication,\n TournamentRegistration,\n TournamentResult,\n)\n\n\nclass TournamentForm(forms.ModelForm):\n class Meta:\n model = Tournament\n exclude = [\"name\", \"registration_description\", \"results_description\"]\n\n\nclass TournamentAdmin(admin.ModelAdmin):\n form = TournamentForm\n\n prepopulated_fields = {\"slug\": [\"name_en\"]}\n list_display = [\"name\", \"country\", \"end_date\", \"is_upcoming\", \"export\"]\n list_filter = [\"is_event\", \"tournament_type\", \"russian_cup\", \"country\"]\n search_fields = [\"name_ru\", \"name_en\"]\n\n ordering = [\"-end_date\"]\n\n filter_horizontal = [\"clubs\"]\n\n def export(self, obj):\n return mark_safe(\n 'Export to EMA'.format(\n reverse(\"export_tournament_results\", kwargs={\"tournament_id\": obj.id})\n )\n )\n\n\nclass TournamentRegistrationAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"is_approved\",\n \"tournament\",\n \"first_name\",\n \"last_name\",\n \"city\",\n \"phone\",\n \"player\",\n \"city_object\",\n \"allow_to_save_data\",\n ]\n\n raw_id_fields = [\"tournament\", \"player\", \"city_object\"]\n\n\nclass OnlineTournamentRegistrationAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"is_approved\",\n \"tournament\",\n \"first_name\",\n \"last_name\",\n \"city\",\n \"tenhou_nickname\",\n \"contact\",\n \"player\",\n \"city_object\",\n \"allow_to_save_data\",\n ]\n\n raw_id_fields = [\"tournament\", \"player\", \"city_object\"]\n\n\nclass TournamentApplicationAdmin(admin.ModelAdmin):\n list_display = [\"tournament_name\", \"city\", \"start_date\", \"created_on\"]\n\n\nclass TournamentResultAdmin(admin.ModelAdmin):\n list_display = [\"tournament\", \"player\", \"place\", \"scores\"]\n search_fields = [\n \"tournament__name\",\n \"player__last_name_ru\",\n \"player__first_name_ru\",\n \"player__last_name_en\",\n \"player__first_name_en\",\n \"player_string\",\n ]\n raw_id_fields = [\"tournament\", \"player\"]\n\n\nadmin.site.register(Tournament, TournamentAdmin)\nadmin.site.register(TournamentRegistration, TournamentRegistrationAdmin)\nadmin.site.register(OnlineTournamentRegistration, OnlineTournamentRegistrationAdmin)\nadmin.site.register(TournamentApplication, TournamentApplicationAdmin)\nadmin.site.register(TournamentResult, TournamentResultAdmin)\n","repo_name":"MahjongRepository/mahjong-portal","sub_path":"server/tournament/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"71320174012","text":"import math\n\n\ndef getSigns(values):\n signs = []\n\n for i in range(len(values) - 1):\n if values[i + 1] > values[i]:\n signs.append('+')\n else:\n signs.append('-')\n\n return signs\n\n\ndef countSigns(signs):\n return len(signs)\n\n\ndef getRuns(signs):\n if not signs:\n return 0\n\n runs = 1\n\n for i in range(len(signs) - 1):\n if signs[i + 1] != signs[i]:\n runs += 1\n\n return runs\n\n\ndef getExpectedValue(n, decimals):\n return round(((2 * n) - 1) / 3, decimals)\n\n\ndef getVariance(n, decimals):\n return round(math.sqrt(((16 * n) - 29) / 90), decimals)\n\n\ndef getStandardScore(expectedValue, variance, runs, decimals):\n return round((runs - expectedValue) / variance, decimals)\n\n\ndef getRunsTestResult(zscore, tableValue):\n return abs(zscore) < abs(tableValue)\n\n\ndef readValues(fileName, decimals):\n file_data = open(fileName, 'r')\n raw_data = []\n\n for element in file_data.readlines():\n raw_data.append(round(float(element), decimals))\n\n return raw_data\n\n\ndef runsTest(file_name, decimals):\n tableValue = 1.96\n\n values = readValues(file_name, decimals)\n signs = getSigns(values)\n runs = getRuns(signs)\n n = countSigns(signs)\n expectedValue = getExpectedValue(n, decimals)\n variance = getVariance(n, decimals)\n zscore = getStandardScore(expectedValue, variance, runs, decimals)\n res = getRunsTestResult(zscore, tableValue)\n\n print()\n print('Runs test:')\n print('Generated signs')\n print(' '.join(signs))\n print(f'total: {n}')\n print(f'total runs: {runs}')\n print('Statistics')\n print(f'Miu: {expectedValue}')\n print(f'Sigma: {variance}')\n print(f'Zscore: {zscore}')\n print()\n print('H0: Appereance of the numbers is random')\n print('H1: Appereance of the numbers is not random')\n\n if res:\n print(f'Since |{zscore}| < |{tableValue}|, H0 is not rejected')\n else:\n print(f'Since |{zscore}| > |{tableValue}|, H0 is rejected')\n","repo_name":"Josekeitor/SimulationProject","sub_path":"proyecto-1/runsTest.py","file_name":"runsTest.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2291018619","text":"class Solution(object):\n\n def replaceWords(self, dict, sentence):\n \"\"\"\n :type dict: List[str]\n :type sentence: str\n :rtype: str\n \"\"\"\n sentence = sentence.split(' ')\n dict.sort(key=len)\n ret = [[] for i in range(26)]\n for each in dict:\n ret[ord(each[0]) - 97].append(each)\n res = []\n for each in sentence:\n for word in ret[ord(each[0]) - 97]:\n if len(each) < len(word):\n break\n if each[:len(word)] == word:\n each = word\n res.append(each)\n return ' '.join(res)\n","repo_name":"kotori233/LeetCode","sub_path":"[0648]_Replace_Words/Replace_Words.py","file_name":"Replace_Words.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"34729663932","text":"import requests\nfrom typing import Dict, Any\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ValidationError\n\nfrom rest_framework_jwt.settings import api_settings\nfrom rest_framework_jwt.compat import set_cookie_with_token\n\nfrom django.utils import timezone\nfrom django.contrib.auth import get_user_model\n\n\nUser = get_user_model()\n\nGOOGLE_ID_TOKEN_INFO_URL = 'https://www.googleapis.com/oauth2/v3/tokeninfo'\nGOOGLE_ACCESS_TOKEN_OBTAIN_URL = 'https://oauth2.googleapis.com/token'\nGOOGLE_USER_INFO_URL = 'https://www.googleapis.com/oauth2/v3/userinfo'\n\n\ndef jwt_login(*, response: HttpResponse, user: User) -> HttpResponse:\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n\n if api_settings.JWT_AUTH_COOKIE:\n # Reference: https://github.com/Styria-Digital/django-rest-framework-jwt/blob/master/src/rest_framework_jwt/compat.py#L43\n set_cookie_with_token(response, api_settings.JWT_AUTH_COOKIE, token)\n\n user.last_login = timezone.now()\n\n return response\n\n\ndef google_validate_id_token(*, id_token: str) -> bool:\n # Reference: https://developers.google.com/identity/sign-in/web/backend-auth#verify-the-integrity-of-the-id-token\n response = requests.get(\n GOOGLE_ID_TOKEN_INFO_URL,\n params={'id_token': id_token}\n )\n\n if not response.ok:\n raise ValidationError('id_token is invalid.')\n\n audience = response.json()['aud']\n\n if audience != settings.GOOGLE_OAUTH2_CLIENT_ID:\n raise ValidationError('Invalid audience.')\n\n return True\n\n\ndef google_get_access_token(*, code: str, redirect_uri: str) -> str:\n # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#obtainingaccesstokens\n data = {\n 'code': code,\n 'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,\n 'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code'\n }\n\n response = requests.post(GOOGLE_ACCESS_TOKEN_OBTAIN_URL, data=data)\n\n if not response.ok:\n raise ValidationError('Failed to obtain access token from Google.')\n\n access_token = response.json()['access_token']\n\n return access_token\n\n\ndef google_get_user_info(*, access_token: str) -> Dict[str, Any]:\n # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#callinganapi\n response = requests.get(\n GOOGLE_USER_INFO_URL,\n params={'access_token': access_token}\n )\n\n if not response.ok:\n raise ValidationError('Failed to obtain user info from Google.')\n\n return response.json()\n\n\n\nfrom random import SystemRandom\nfrom typing import Any, Dict\nfrom urllib.parse import urlencode\n\nimport jwt\nimport requests\n# from attrs import define\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.urls import reverse_lazy\nfrom oauthlib.common import UNICODE_ASCII_CHARACTER_SET\n\nfrom core.utils import Exception\n\n\n# @define\nclass GoogleRawLoginCredentials:\n client_id: str\n client_secret: str\n project_id: str\n\n\n# @define\nclass GoogleAccessTokens:\n id_token: str\n access_token: str\n\n def decode_id_token(self) -> Dict[str, str]:\n id_token = self.id_token\n decoded_token = jwt.decode(jwt=id_token, options={\"verify_signature\": False})\n return decoded_token\n\n\nclass GoogleRawLoginFlowService:\n API_URI = reverse_lazy(\"redirect-raw\")\n\n GOOGLE_AUTH_URL = \"https://accounts.google.com/o/oauth2/auth\"\n GOOGLE_ACCESS_TOKEN_OBTAIN_URL = \"https://oauth2.googleapis.com/token\"\n GOOGLE_USER_INFO_URL = \"https://www.googleapis.com/oauth2/v3/userinfo\"\n\n SCOPES = [\n \"https://www.googleapis.com/auth/userinfo.email\",\n \"https://www.googleapis.com/auth/userinfo.profile\",\n \"openid\",\n ]\n\n def __init__(self):\n self._credentials = google_raw_login_get_credentials()\n\n @staticmethod\n def _generate_state_session_token(length=30, chars=UNICODE_ASCII_CHARACTER_SET):\n # This is how it's implemented in the official SDK\n rand = SystemRandom()\n state = \"\".join(rand.choice(chars) for _ in range(length))\n return state\n\n def _get_redirect_uri(self):\n domain = settings.BASE_BACKEND_URL\n api_uri = self.API_URI\n # redirect_uri = f\"{domain}{api_uri}\"\n redirect_uri = f'{domain}/accounts/redirect/'\n return redirect_uri\n\n def get_authorization_url(self):\n redirect_uri = self._get_redirect_uri()\n\n state = self._generate_state_session_token()\n\n params = {\n \"response_type\": \"code\",\n \"client_id\": self._credentials['client_id'],\n \"redirect_uri\": redirect_uri,\n \"scope\": \" \".join(self.SCOPES),\n \"state\": state,\n \"access_type\": \"offline\",\n \"include_granted_scopes\": \"true\",\n \"prompt\": \"select_account\",\n }\n\n query_params = urlencode(params)\n authorization_url = f\"{self.GOOGLE_AUTH_URL}?{query_params}\"\n\n return authorization_url, state\n\n def get_tokens(self, *, code: str) -> GoogleAccessTokens:\n redirect_uri = self._get_redirect_uri()\n\n # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#obtainingaccesstokens\n data = {\n \"code\": code,\n \"client_id\": self._credentials.client_id,\n \"client_secret\": self._credentials.client_secret,\n \"redirect_uri\": redirect_uri,\n \"grant_type\": \"authorization_code\",\n }\n\n response = requests.post(self.GOOGLE_ACCESS_TOKEN_OBTAIN_URL, data=data)\n\n if not response.ok:\n raise Exception(\"Failed to obtain access token from Google.\")\n\n tokens = response.json()\n google_tokens = GoogleAccessTokens(id_token=tokens[\"id_token\"], access_token=tokens[\"access_token\"])\n\n return google_tokens\n\n def get_user_info(self, *, google_tokens: GoogleAccessTokens) -> Dict[str, Any]:\n access_token = google_tokens.access_token\n # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#callinganapi\n response = requests.get(self.GOOGLE_USER_INFO_URL, params={\"access_token\": access_token})\n\n if not response.ok:\n raise Exception(\"Failed to obtain user info from Google.\")\n\n return response.json()\n\n\ndef google_raw_login_get_credentials():\n client_id = settings.GOOGLE_OAUTH2_CLIENT_ID\n client_secret = settings.GOOGLE_OAUTH2_CLIENT_SECRET\n project_id = settings.GOOGLE_OAUTH2_PROJECT_ID\n\n if not client_id:\n raise ImproperlyConfigured(\"GOOGLE_OAUTH2_CLIENT_ID missing in env.\")\n\n if not client_secret:\n raise ImproperlyConfigured(\"GOOGLE_OAUTH2_CLIENT_SECRET missing in env.\")\n\n if not project_id:\n raise ImproperlyConfigured(\"GOOGLE_OAUTH2_PROJECT_ID missing in env.\")\n\n credentials = {\n 'client_id': client_id, \n \"client_secret\": client_secret, \n 'project_id': project_id\n }\n\n return credentials","repo_name":"quad400/Learnit-Server","sub_path":"account/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72670892412","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\nclass Solution:\n def connect(self, root: 'Optional[Node]') -> 'Optional[Node]':\n # Without using queue\n if root == None:\n return None\n leftmost=root\n\n while leftmost.left:\n curr=leftmost\n while curr: # curr level \n curr.left.next = curr.right\n if curr.next:\n curr.right.next = curr.next.left\n curr=curr.next\n # curr level is over\n leftmost=leftmost.left\n return root\n\n","repo_name":"Sameer-Pal/Leetcode","sub_path":"0116-populating-next-right-pointers-in-each-node/0116-populating-next-right-pointers-in-each-node.py","file_name":"0116-populating-next-right-pointers-in-each-node.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19476089002","text":"import krpc\nimport time\nimport math\nfrom enum import Enum\n\n# ----------------------------------------------------------------------------\n# Launch parameters\n# ----------------------------------------------------------------------------\n\nG = 9.82 # standard G\nREFRESH_FREQ = 5 # refresh rate in hz\nTELEM_DELAY = 1.0 #number of seconds between telemetry updates\nMAX_PHYSICS_WARP = 3 # valid values are 0 (none) through 3 (4x)\n\n\n# ----------------------------------------------------------------------------\n# Main loop\n# ----------------------------------------------------------------------------\n\ndef main():\n '''\n main function is run when you just execute this file, but NOT when you\n import it into another file - thus you can choose to call ascent() later\n to go to space, or just use the other functions in this file.\n '''\n # AscentController('Launch').to_orbit()\n mac = ModularAscentControl('Launch')\n # mac.register_controller('throttle', AnnoyingThrottle)\n mac.to_orbit()\n\n# ----------------------------------------------------------------------------\n# Class Definitions\n# ----------------------------------------------------------------------------\nclass Status(Enum):\n IDLE = 0\n PRELAUNCH = 10\n LAUNCH = 20\n LIFTOFF = 30\n PITCH = 40\n COAST = 50\n CIRCULARIZE = 60\n FINALIZE = 70\n DONE = 80\n\nclass FuelTypes(Enum):\n LIQUIDFUEL = 'LiquidFuel'\n SOLIDFUEL = 'SolidFuel'\n\nclass MissionParameters(object):\n '''\n All mission parameters are stored in a single object to easily\n pass around\n '''\n def __init__(self,\n max_auto_stage=0,\n no_orbital_debris=True,\n orbit_alt=100000,\n grav_turn_finish=55000,\n inclination=0,\n roll=90,\n deploy_solar=True,\n max_q=20000):\n self.max_auto_stage = max_auto_stage\n self.no_orbital_debris = no_orbital_debris\n self.orbit_alt = orbit_alt\n self.grav_turn_finish = grav_turn_finish\n self.inclination = inclination\n self.roll = roll\n self.deploy_solar = deploy_solar\n self.max_q = max_q\n\nclass Display(object):\n def __init__(self):\n self.last_update = time.time()\n\n def telemetry(self, t):\n '''\n Take a Telemetry object t and display it in a pleasing way\n '''\n # define the data to be displayed in as many columns needed\n col1 = ('Apoapsis: {apoapsis:8,.0f}',\n 'Time to apo: {time_to_apo:5,.0f}',\n 'Altitude: {altitude:6,.0f}',\n 'Orbital velocity: {velocity:5,.0f}',\n 'Latitude: {lat:5.1f}',\n 'Dynamic Pressure: {q:6,.0f}')\n\n col2 = ('Periapsis: {periapsis: 8,.0f}',\n 'Time to peri: {time_to_peri:5,.0f}',\n 'Inclination: {inclination: 3.0f}\\n',\n 'Vertical speed: {vertical_speed: 5,.0f}',\n 'Longitude: {lon:5.1f}\\n',\n 'G-force: {g:4.1f}')\n # zip the columns together and display them\n print('-' * 50)\n for display_line in zip(col1, col2):\n print(' '.join(display_line).format(**t.__dict__))\n print('-' * 50)\n print('\\n')\n self.last_update = time.time()\n\n def status(self, s):\n '''\n Print status message\n\n s: string\n '''\n print(s)\n\nclass Telemetry(object):\n def __init__(self, vessel, flight):\n self.apoapsis = vessel.orbit.apoapsis_altitude\n self.periapsis = vessel.orbit.periapsis_altitude\n self.time_to_apo = vessel.orbit.time_to_apoapsis\n self.time_to_peri = vessel.orbit.time_to_periapsis\n self.velocity = vessel.orbit.speed\n self.inclination = math.degrees(vessel.orbit.inclination)\n self.altitude = flight.mean_altitude\n self.vertical_speed = flight.vertical_speed\n self.lat = flight.latitude\n self.lon = flight.longitude\n self.q = flight.dynamic_pressure\n self.g = flight.g_force\n\nclass ModularAscentControl(object):\n '''\n Framework for a controlled ascent with modular components\n '''\n def __init__(self, name, mission_parameters=None):\n '''\n Mission parameters are provided through a MissionParameters\n object, if none are given, defaults are used\n\n name: string, the name of the connection in KSP\n mission_parameters: MissionParameters, things like target alt, etc.\n '''\n # flight parameters\n if mission_parameters == None:\n self.param = MissionParameters()\n else:\n self.param = mission_parameters\n # set process variables\n self.conn = krpc.connect(name=name)\n self.sc = self.conn.space_center\n self.vessel = self.sc.active_vessel\n self.flight = self.vessel.flight(self.vessel.orbit.body.reference_frame)\n self.status = Status.IDLE\n # initialize controllers\n self.controllers = ('guidance', 'throttle', 'staging',\n 'warp', 'finalize')\n self.guidance = self.create_controller(GuidanceController)\n self.throttle = self.create_controller(ThrottleController)\n self.staging = self.create_controller(StagingController)\n self.warp = self.create_controller(WarpController)\n self.finalize = self.create_controller(FinalizeController)\n # create a hook to the display functions\n # a simple print-to-console class if nothing else if provided\n self.D = Display()\n self.display_telemetry = self.D.telemetry\n self.display_status = self.D.status\n self.last_telemetry = time.time()\n\n def create_controller(self, controller):\n '''\n Mini factory for creating controller objects; to remove clutter\n from the __init__ procedure\n '''\n return controller(self.sc, self.vessel, self.flight, self.param)\n\n def register_controller(self, controller_name, controller_class):\n '''\n Allow the default controller to be replaced with an alternate\n at runtime.\n Example: mac.register_controller('throttle', FancyThrottle) --\n This will replace the default Throttle Controller with on based on a\n class called \"FancyThrottle.\"\n Controller classes must be subclassed from the Controller base class,\n\n controller_name: string, must be in ModularAscentControl.controllers\n controller_class: Controller type class\n '''\n # rudimentary checking to prevent runtime problems\n if controller_name not in self.controllers:\n raise KeyError('Invalid controller name - must be in controllers')\n if not issubclass(controller_class, Controller):\n raise TypeError('Controller Class needed')\n new_controller = controller_class(self.sc, self.vessel, self.flight)\n setattr(self, controller_name, new_controller)\n\n def to_orbit(self):\n try:\n self.set_status(Status.PRELAUNCH)\n while self.status != Status.DONE:\n self.guidance.process()\n self.throttle.process()\n self.staging.process()\n self.warp.process()\n self.finalize.process()\n # only update telemetry in set intervals\n if time.time() > self.last_telemetry + TELEM_DELAY:\n self.telemetry()\n self.last_telemetry = time.time()\n self.update_status()\n time.sleep(1.0 / REFRESH_FREQ)\n except Exception as e:\n # the software crashed! Prepare emergency evac!\n self.vessel.control.throttle = 0\n self.set_status('SOFTWARE CRASH: ' + str(e))\n\n def telemetry(self):\n '''\n provide a telemtry update to the outside world\n '''\n self.display_telemetry(Telemetry(self.vessel, self.flight))\n\n def set_status(self, status):\n '''\n set the status of the launch process\n propagate status throughout various controllers\n\n status: Status type enum\n '''\n self.display_status(status.name)\n self.status = status\n self.guidance.set_status(status)\n self.throttle.set_status(status)\n self.staging.set_status(status)\n self.warp.set_status(status)\n self.finalize.set_status(status)\n\n def update_status(self):\n '''\n based on the flight profile, update the status,\n activing different modes in the controllers\n\n status: Status type enum\n '''\n\n # perhaps these should be split out to separate functions?\n # make sure to run them in reverse order, from FINALIZE to\n # PRELAUNCH, else they can trigger each other...\n if self.status == Status.PRELAUNCH:\n # at this point, all controllers have gone through their\n # precheck launch; advance to launch!\n self.set_status(Status.LAUNCH)\n elif self.status == Status.LAUNCH:\n # at this point, all controllers have gone through launch;\n # advance to LIFTOFF\n self.set_status(Status.LIFTOFF)\n elif self.status == Status.LIFTOFF:\n # before advancing to pitch manuever, ensure that\n # minimum height and velocity are achieved\n frame = self.vessel.surface_velocity_reference_frame\n speed = self.magnitude(self.vessel.velocity(frame))\n min_height = self.flight.mean_altitude > 100\n min_velocity = self.vessel.orbit.speed > 50\n if min_height and min_velocity:\n self.set_status(Status.PITCH)\n elif self.status == Status.PITCH:\n # cut to coasting once apoapsis is achieved within\n # desirable limits. The guidance module will set a node\n # to circularize once coasting mode is entered\n if self.vessel.orbit.apoapsis_altitude > self.param.orbit_alt * 0.95:\n self.set_status(Status.COAST)\n elif self.status == Status.COAST:\n # keep coasting until it's time to burn\n node = self.vessel.control.nodes[0]\n if node.time_to <= self.calc_burn_time():\n self.set_status(Status.CIRCULARIZE)\n elif self.status == Status.CIRCULARIZE:\n # circularize will stop once periapsis\n # is close to target apoapsis_altitude\n # or when apoapsis starts to balloon\n min_peri = self.vessel.orbit.periapsis_altitude > self.param.orbit_alt * 0.90\n max_apo = self.vessel.orbit.apoapsis_altitude > self.param.orbit_alt * 1.10\n if min_peri or max_apo:\n self.set_status(Status.FINALIZE)\n elif self.status == Status.FINALIZE:\n # all controllers have cycled through their\n # finalize task\n self.set_status(Status.DONE)\n\n def calc_burn_time(self):\n '''\n Return burn time required to execute next node\n '''\n node = self.vessel.control.nodes[0]\n m = self.vessel.mass\n isp = self.vessel.specific_impulse\n dv = node.delta_v\n F = self.vessel.available_thrust\n # calculate burn time based on rocket equation\n return (m - (m / math.exp(dv / (isp * G)))) / (F / (isp * G))\n\n def magnitude(self, vector):\n '''\n Return the magnitude (length) of a vector\n '''\n return math.sqrt(sum(x * x for x in vector))\n\n\nclass Controller(object):\n '''\n Baseclass for the various controllers that are utilized during a launch\n '''\n def __init__(self, spacecenter, vessel, flight, param):\n self.spacecenter = spacecenter\n self.vessel = vessel\n self.flight = flight\n self.status = Status.IDLE\n self.param = param\n\n def process(self):\n '''\n The process class is where all the controller action happens\n The controller will perform actions, based on the readouts of\n the vessel, and the status the controller is in\n '''\n pass\n\n def set_status(self, status):\n '''\n Set the status through a function, so the controller is aware\n of status changes.\n\n status: Status type enum\n '''\n self.status = status\n\nclass GuidanceController(Controller):\n def process(self):\n '''\n Set direction during ascent\n '''\n if self.status == Status.PRELAUNCH:\n self.prelaunch()\n if self.status == Status.PITCH:\n self.pitch()\n if self.status == Status.COAST:\n self.prograde()\n if self.status == Status.CIRCULARIZE:\n try:\n self.target_node()\n except:\n self.prograde()\n if self.status == Status.FINALIZE:\n self.prograde()\n\n\n def prelaunch(self):\n '''\n Set up autopilot for initial flight headings\n '''\n self.stabilize()\n\n def pitch(self):\n '''\n Perform the pitch manuever (sometimes referred to as \"gravity turn\")\n Based on Robet Penner's easing equations (EaseOut)\n '''\n progress = self.flight.mean_altitude / self.param.grav_turn_finish\n target_pitch = 90 - (-90 * progress * (progress - 2))\n target_heading = self.inc_to_heading(self.param.inclination)\n\n # print('Heading: {:3.0f} Pitch: {:3.0f}'.format(target_heading, target_pitch))\n self.vessel.auto_pilot.engage()\n self.vessel.auto_pilot.target_pitch = target_pitch\n self.vessel.auto_pilot.target_heading = target_heading\n if self.param.roll != None:\n self.vessel.auto_pilot.target_roll = self.param.roll\n\n def set_sas_mode(self, mode):\n '''\n Turn off autopilot mode if it's not in sas mode\n Turn on autopilot in sas mode\n Set autopilot to desired mode if it's not already\n '''\n if not self.vessel.auto_pilot.sas:\n self.vessel.auto_pilot.disengage()\n self.vessel.auto_pilot.sas = True\n time.sleep(0.1)\n if self.vessel.auto_pilot.sas_mode != mode:\n self.vessel.auto_pilot.sas_mode = mode\n\n def target_node(self):\n '''\n Set autopilot to node\n '''\n self.set_sas_mode(self.vessel.auto_pilot.sas_mode.manuever)\n\n def prograde(self):\n '''\n Set autopilot to prograde\n '''\n self.set_sas_mode(self.vessel.auto_pilot.sas_mode.prograde)\n\n def stabilize(self):\n '''\n Set autopilot to stablize\n '''\n self.set_sas_mode(self.vessel.auto_pilot.sas_mode.stability_assist)\n\n def inc_to_heading(self, inc):\n '''\n Converts desired inclination to a compass heading that can be\n tracked by the autopilot\n\n inc: inclination in degrees\n '''\n if inc > 180 or inc < -180:\n return 90 #invalid entries get set to 0 inclination\n if inc >= 0:\n value = 90 - inc\n if inc < 0:\n value = -(inc - 90)\n if value < 0:\n value += 360\n return value\n\n def set_status(self, status):\n '''\n Overriding the set_status function of the controller base class\n if status set to COAST, create node for circularization\n\n status: Status type enum\n '''\n self.status = status\n if status == Status.COAST:\n self.create_circularize_node()\n\n def create_circularize_node(self):\n '''\n Create a node for circularization\n '''\n grav_param = self.vessel.orbit.body.gravitational_parameter\n apo = self.vessel.orbit.apoapsis\n sma = self.vessel.orbit.semi_major_axis\n v1 = math.sqrt(grav_param * ((2.0 / apo) - (1.0 / sma)))\n v2 = math.sqrt(grav_param * ((2.0 / apo) - (1.0 / apo)))\n v = v2 - v1\n t = self.spacecenter.ut + self.vessel.orbit.time_to_apoapsis\n self.vessel.control.add_node(t, prograde=v)\n\nclass ThrottleController(Controller):\n def process(self):\n '''\n Set throttle according to various stages of launch process\n '''\n if self.status == Status.PRELAUNCH:\n self.vessel.control.throttle = 1.0\n elif self.status in (Status.COAST, Status.FINALIZE, Status.DONE):\n self.vessel.control.throttle = 0.0\n elif self.status == Status.CIRCULARIZE:\n bt = self.calc_burn_time()\n node = self.vessel.control.nodes[0]\n # print(spacecenter.ut, bt, node.ut)\n burn = self.spacecenter.ut + bt/2 >= node.ut\n if burn:\n self.vessel.control.throttle = 1.0\n else:\n self.vessel.control.throttle = 0.0\n\n def calc_burn_time(self):\n '''\n Return burn time required to execute next node\n '''\n node = self.vessel.control.nodes[0]\n m = self.vessel.mass\n isp = self.vessel.specific_impulse\n dv = node.delta_v\n F = self.vessel.available_thrust\n # calculate burn time based on rocket equation\n return (m - (m / math.exp(dv / (isp * G)))) / (F / (isp * G))\n\n\nclass AnnoyingThrottle(Controller):\n def process(self):\n print('REPORTING FOR DUTY, SIR!!')\n if self.status == Status.PRELAUNCH:\n self.vessel.control.throttle = 1.0\n elif self.status in (Status.COAST, Status.FINALIZE, Status.DONE):\n self.vessel.control.throttle = 0.0\n elif self.status == Status.CIRCULARIZE:\n self.vessel.control.throttle = 1.0\n\n\nclass StagingController(Controller):\n def process(self):\n '''\n check if a stage should be activated\n '''\n # out of stages?\n if self.vessel.control.current_stage <= self.param.max_auto_stage:\n return\n # check for preventing non-final stages to reach orbit\n self.cleanup_debris()\n # check current stage for staging\n interstage = True\n for fueltype in FuelTypes:\n if self.carries_fuel(fueltype):\n interstage = False\n if not self.has_fuel(fueltype):\n self.vessel.control.activate_next_stage()\n return\n if interstage:\n self.vessel.control.activate_next_stage()\n\n def cleanup_debris(self):\n '''\n If no_debris status is active, perform check to ensure that all\n non-final stages are released before reaching orbit\n '''\n # check for validity\n if not self.param.no_orbital_debris:\n return\n if self.status != Status.CIRCULARIZE:\n return\n if self.vessel.orbit.periapsis_altitude < 10000.0:\n return\n if self.vessel.control.current_stage <= self.param.max_auto_stage + 1:\n return\n # release all non-final stages; first, cut throttle to prevent\n # the active stage from burning itself into orbit\n current_throttle = self.vessel.control.throttle\n self.vessel.control.throttle = 0\n time.sleep(0.5) # allow time for engines to cut\n while self.vessel.control.current_stage > self.param.max_auto_stage + 1:\n self.vessel.control.activate_next_stage()\n # allow time for fairings, etc to float away before restoring throttle\n time.sleep(1.5)\n self.vessel.control.throttle = current_throttle\n\n def resource(self):\n '''\n return the resources in the decouple stage\n '''\n return self.vessel.resources_in_decouple_stage(\n self.vessel.control.current_stage - 1,\n cumulative=False)\n\n def carries_fuel(self, fueltype):\n '''\n return True if a fuel capacity for the fueltype is available\n in the current stage\n\n fueltype: FuelType enum\n '''\n return self.resource().max(fueltype.value) > 0\n\n def has_fuel(self, fueltype):\n '''\n return True if fuel of the fueltype is actually available in\n the current stage\n\n fueltype: FuelType enum\n '''\n return self.resource().amount(fueltype.value) > 0\n\nclass WarpController(Controller):\n def process(self):\n sc = self.spacecenter\n if self.status == Status.COAST and self.in_atmo():\n # no reason to not max warp while coastin in atmosphere\n sc.physics_warp_factor = MAX_PHYSICS_WARP\n elif self.status == Status.COAST and sc.physics_warp_factor > 0:\n # if not in atmosphere, check and still on physics warp:\n # disable physics warp and warp to node\n sc.physics_warp_factor = 0\n bt = self.calc_burn_time()\n node=self.vessel.control.nodes[0]\n sc.warp_to(node.ut - (bt / 2.0) - 5.0)\n\n def calc_burn_time(self):\n '''\n Return burn time required to execute next node\n '''\n node = self.vessel.control.nodes[0]\n m = self.vessel.mass\n isp = self.vessel.specific_impulse\n dv = node.delta_v\n F = self.vessel.available_thrust\n # calculate burn time based on rocket equation\n return (m - (m / math.exp(dv / (isp * G)))) / (F / (isp * G))\n\n def in_atmo(self):\n '''\n Return True if vessel still in atmosphere\n '''\n alt = self.flight.mean_altitude\n atmo = self.vessel.orbit.body.atmosphere_depth\n return alt <= atmo\n\nclass FinalizeController(Controller):\n def process(self):\n # check for status\n if self.status != Status.FINALIZE:\n return\n # perform finalize tasks\n self.vessel.control.remove_nodes()\n if self.param.deploy_solar:\n self.vessel.control.solar_panels = True\n\n# ----------------------------------------------------------------------------\n# Activate main loop, assuming we are executing THIS file explicitly.\n# ----------------------------------------------------------------------------\nif __name__ == \"__main__\" :\n main()\n","repo_name":"bart-r-willems/krpc-launch","sub_path":"launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":22103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34257170882","text":"def modpow(x, y, m):\n p = x\n ans = 1\n for i in range(30):\n if y & (1 << i):\n ans = (ans * p) % m\n p = (p * p) % m\n return ans\n\n\nMOD = 10**9 + 7\n\na, b = map(int, input().split())\nprint(modpow(a, b, MOD))\n\n# a, b = map(int, input().split())\n# print(pow(a, b, 1000000007))\n","repo_name":"hy-sksem/AtCoder","sub_path":"math-and-algorithm/050_Power.py","file_name":"050_Power.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6647575715","text":"import numpy as np\nimport xarray as xr\nimport re\nfrom math import ceil, sqrt\n\ndef distance(val, ref):\n return abs(ref - val)\nvectDistance = np.vectorize(distance)\n\ndef getClosest(sortedMatrix, column, val):\n while len(sortedMatrix) > 3:\n half = int(len(sortedMatrix) / 2)\n sortedMatrix = sortedMatrix[-half - 1:] if sortedMatrix[half, column] < val else sortedMatrix[: half + 1]\n if len(sortedMatrix) == 1:\n result = sortedMatrix[0].copy()\n result[column] = val\n return result\n else:\n safecopy = sortedMatrix.copy()\n safecopy[:, column] = vectDistance(safecopy[:, column], val)\n minidx = np.argmin(safecopy[:, column])\n safecopy = safecopy[minidx, :].A1\n safecopy[column] = val\n return safecopy\n\ndef convert(column, samples, matrix):\n return np.matrix([getClosest(matrix, column, t) for t in samples])\n\ndef valueOrEmptySet(k, d):\n return (d[k] if isinstance(d[k], set) else {d[k]}) if k in d else set()\n\ndef mergeDicts(d1, d2):\n \"\"\"\n Creates a new dictionary whose keys are the union of the keys of two\n dictionaries, and whose values are the union of values.\n\n Parameters\n ----------\n d1: dict\n dictionary whose values are sets\n d2: dict\n dictionary whose values are sets\n\n Returns\n -------\n dict\n A dict whose keys are the union of the keys of two dictionaries,\n and whose values are the union of values\n\n \"\"\"\n res = {}\n for k in d1.keys() | d2.keys():\n res[k] = valueOrEmptySet(k, d1) | valueOrEmptySet(k, d2)\n return res\n\ndef is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False\n\ndef extractCoordinates(filename):\n \"\"\"\n Scans the header of an Alchemist file in search of the variables.\n\n Parameters\n ----------\n filename : str\n path to the target file\n mergewith : dict\n a dictionary whose dimensions will be merged with the returned one\n\n Returns\n -------\n dict\n A dictionary whose keys are strings (coordinate name) and values are\n lists (set of variable values)\n\n \"\"\"\n with open(filename, 'r') as file:\n regex = re.compile(' (?P[a-zA-Z]+) = (?P(?:[-+]?\\d*\\.?\\d+(?:[eE][-+]?\\d+)?)|[a-zA-Z-_]*)?')\n dataBegin = re.compile('\\d')\n for line in file:\n match = regex.findall(line)\n if match:\n return {var : (float(value) if is_float(value) else value) for var, value in match}\n elif dataBegin.match(line[0]):\n return {}\n\ndef extractVariableNames(filename):\n \"\"\"\n Gets the variable names from the Alchemist data files header.\n\n Parameters\n ----------\n filename : str\n path to the target file\n\n Returns\n -------\n list of list\n A matrix with the values of the csv file\n\n \"\"\"\n with open(filename, 'r') as file:\n dataBegin = re.compile('\\d')\n lastHeaderLine = ''\n for line in file:\n if dataBegin.match(line[0]):\n break\n else:\n lastHeaderLine = line\n if lastHeaderLine:\n regex = re.compile(' (?P\\S+)')\n return regex.findall(lastHeaderLine)\n return []\n\ndef openCsv(path):\n \"\"\"\n Converts an Alchemist export file into a list of lists representing the matrix of values.\n\n Parameters\n ----------\n path : str\n path to the target file\n\n Returns\n -------\n list of list\n A matrix with the values of the csv file\n\n \"\"\"\n regex = re.compile('\\d')\n with open(path, 'r') as file:\n lines = filter(lambda x: regex.match(x[0]), file.readlines())\n return [[float(x) for x in line.split()] for line in lines]\n\nif __name__ == '__main__':\n # CONFIGURE SCRIPT\n directory = 'data'\n charts_dir = 'charts/'\n pickleOutput = 'data_summary'\n experiments = ['simulations']\n floatPrecision = '{: 0.2f}'\n seedVars = ['Seed']\n timeSamples = 2000\n minTime = 0\n maxTime = 2000\n timeColumnName = 'time'\n logarithmicTime = False\n \n # Setup libraries\n np.set_printoptions(formatter={'float': floatPrecision.format})\n # Read the last time the data was processed, reprocess only if new data exists, otherwise just load\n import pickle\n import os\n newestFileTime = max(os.path.getmtime(directory + '/' + file) for file in os.listdir(directory))\n try:\n lastTimeProcessed = pickle.load(open('timeprocessed', 'rb'))\n except:\n lastTimeProcessed = -1\n shouldRecompute = newestFileTime != lastTimeProcessed\n datasets = dict()\n if not shouldRecompute:\n try:\n #means = pickle.load(open(pickleOutput + '_mean', 'rb'))\n #stdevs = pickle.load(open(pickleOutput + '_std', 'rb'))\n datasets = pickle.load(open(pickleOutput + '_datasets', 'rb'))\n except:\n shouldRecompute = True\n \n if shouldRecompute:\n timefun = np.logspace if logarithmicTime else np.linspace\n means = {}\n stdevs = {}\n for experiment in experiments:\n # Collect all files for the experiment of interest\n import fnmatch\n allfiles = filter(lambda file: fnmatch.fnmatch(file, experiment + '_*.txt'), os.listdir(directory))\n allfiles = [directory + '/' + name for name in allfiles]\n allfiles.sort()\n # From the file name, extract the independent variables\n dimensions = {}\n for file in allfiles:\n dimensions = mergeDicts(dimensions, extractCoordinates(file))\n dimensions = {k: sorted(v) for k, v in dimensions.items()}\n # Add time to the independent variables\n dimensions[timeColumnName] = range(0, timeSamples)\n # Compute the matrix shape\n shape = tuple(len(v) for k, v in dimensions.items())\n # Prepare the Dataset\n dataset = xr.Dataset()\n for k, v in dimensions.items():\n dataset.coords[k] = v\n varNames = extractVariableNames(allfiles[0])\n for v in varNames:\n if v != timeColumnName:\n novals = np.ndarray(shape)\n novals.fill(float('nan'))\n dataset[v] = (dimensions.keys(), novals)\n # Compute maximum and minimum time, create the resample\n timeColumn = varNames.index(timeColumnName)\n allData = { file: np.matrix(openCsv(file)) for file in allfiles }\n computeMin = minTime is None\n computeMax = maxTime is None\n if computeMax:\n maxTime = float('-inf')\n for data in allData.values():\n maxTime = max(maxTime, data[-1, timeColumn])\n if computeMin:\n minTime = float('inf')\n for data in allData.values():\n minTime = min(minTime, data[0, timeColumn])\n #print(allData)\n timeline = timefun(minTime, maxTime, timeSamples)\n # Resample\n for file in allData:\n allData[file] = convert(timeColumn, timeline, allData[file])\n \n # Populate the dataset\n for file, data in allData.items():\n dataset[timeColumnName] = timeline\n for idx, v in enumerate(varNames):\n if v != timeColumnName:\n darray = dataset[v]\n experimentVars = extractCoordinates(file)\n darray.loc[experimentVars] = data[:, idx].A1\n #print(dataset)\n # Fold the dataset along the seed variables, producing the mean and stdev datasets\n #means[experiment] = dataset.mean(seedVars)\n #stdevs[experiment] = dataset.std(seedVars)\n datasets[experiment] = dataset\n # Save the datasets\n #pickle.dump(means, open(pickleOutput + '_mean', 'wb'), protocol=-1)\n #pickle.dump(stdevs, open(pickleOutput + '_std', 'wb'), protocol=-1)\n pickle.dump(datasets, open(pickleOutput + '_datasets', 'wb'), protocol=-1)\n pickle.dump(newestFileTime, open('timeprocessed', 'wb'))\n\n # Prepare the charting system\n import matplotlib\n import matplotlib.pyplot as plt\n import matplotlib.cm as cmx\n from mpl_toolkits.mplot3d import Axes3D # needed for 3d projection\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n figure_size=(6, 6)\n matplotlib.rcParams.update({'axes.titlesize': 13})\n matplotlib.rcParams.update({'axes.labelsize': 12})\n \n kcovColors = ['#00d0ebFF','#61a72cFF','#e30000FF']\n kcovEcolors = ['#0300ebFF', '#8cff9dFF', '#f5b342FF'] # error bars\n kcovVariables = ['1-coverage','2-coverage','3-coverage']\n kcovTrans = ['1-cov','2-cov','3-cov']\n algos = ['ff_linpro', 'zz_linpro','ff_linproF', 'zz_linproF', 'ff_nocomm', 'nocomm', 'sm_av', 'bc_re']#data.coords['Algorithm'].data.tolist()\n \n data = datasets['simulations']\n # now load data from previous simulations\n #print(\"loading old data...\")\n #oldData = pickle.load(open('data_summary_datasets_20200106', 'rb'))['simulations']\n #print(\"merging data...\")\n #data = xr.combine_by_coords([data, oldData])\n #print(\"generating charts...\")\n #mergedDatasets = {'simulations': data}\n #pickle.dump(mergedDatasets, open(pickleOutput + '_datasets_merged', 'wb'), protocol=-1)\n \n dataMean = data.mean('time')\n dataKcovsMean = dataMean.mean('Seed')\n dataKcovsStd = dataMean.std('Seed')\n \n dataDist = data.sum('time').assign(MovEfficiency = lambda d: d.ObjDist / d.CamDist)\n dataDistMean = dataDist.mean('Seed')\n dataDistStd = dataDist.std('Seed')\n \n simRatios = data.coords['CamObjRatio'].data.tolist()\n simRatios.reverse()\n commRanges = data.coords['CommunicationRange'].data.tolist()\n commRanges.reverse()\n \n def noOdds(lst): # replaces odds numbers in lst with empty strings\n return list(map(lambda x: x if round(x * 10, 0) % 2 == 0 else '', lst))\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n kcov 3D\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n #oldParams = matplotlib.rcParams.copy()\n #labelsize = 22\n #titlesize = 25\n #matplotlib.rcParams.update({'axes.titlesize': titlesize})\n #matplotlib.rcParams.update({'axes.labelsize': labelsize})\n def getSurfData(dataarray, xcord, ycord):\n xs = []\n ys = []\n zs = []\n for xd in dataarray:\n for yd in xd:\n xs.append(xd[xcord].values.tolist())\n ys.append(yd[ycord].values.tolist())\n zs.append(yd.values.tolist())\n return xs, ys, zs\n \n fig = plt.figure(figsize=(12,16))\n for idx, algo in enumerate(algos):\n cols = 2\n rows = ceil(len(algos) / 2)\n ax = fig.add_subplot(rows,cols,idx+1, projection='3d')\n\n #ax.tick_params(labelsize=labelsize)\n ax.set_xlabel(\"r\")\n ax.set_ylabel(\"n/m\")\n if idx%cols == cols-1:\n ax.set_zlabel(\"Coverage (%)\")\n #else:\n # ax.set_zticklabels([])\n ax.set_xlim([max(commRanges),min(commRanges)])\n ax.set_ylim([min(simRatios),max(simRatios)])\n ax.set_zlim([0,1])\n ax.set_title(algo)\n \n fakeLinesForLegend = []\n def kcov(whichKCov,x,y):\n return dataKcovsMean[whichKCov].sel(Algorithm=algo, CamObjRatio=y, CommunicationRange=x).values.tolist()#[0]\n forKcovVars = [kcovVariables[0], kcovVariables[-1]]\n forKcovTrans = []\n for k, whichKCov in enumerate(kcovVariables):\n if not whichKCov in forKcovVars:\n continue\n x,y,z = getSurfData(dataKcovsMean[whichKCov].sel(Algorithm=algo), 'CommunicationRange', 'CamObjRatio')\n ax.plot_trisurf(x,y,z, linewidth=2, antialiased=False, shade=True, alpha=0.5, color=kcovColors[k])\n fakeLinesForLegend.append(matplotlib.lines.Line2D([0],[0], linestyle='none', c=kcovColors[k], marker='o'))\n forKcovTrans.append(kcovTrans[k])\n if idx == cols-1:\n ax.legend(fakeLinesForLegend, forKcovTrans, numpoints=1)\n \n plt.tight_layout()\n fig.savefig(charts_dir + 'KCov_3D.pdf')\n plt.close(fig)\n #matplotlib.rcParams.update(oldParams)\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n kcov in time\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n timeLimit = 100\n selAlgos = ['ff_linpro', 'zz_linpro', 'ff_nocomm', 'nocomm']\n selRatios = ['0.4', '0.8', '1.2', '1.8']\n selKcov = ['1-coverage', '3-coverage']\n selCommRange = 100\n dataInTime = data.mean('Seed')\n for whichKCov in selKcov:\n rows = 2\n cols = 2\n fig, axes = plt.subplots(rows, cols, figsize=(8,5), sharex='col', sharey='row')\n for idx, whichRatio in enumerate(selRatios):\n r = int(idx / cols)\n c = int(idx % cols)\n xdata = dataInTime.sel(CamObjRatio=whichRatio, CommunicationRange=selCommRange, Algorithm=selAlgos)['time']\n ydata = dataInTime.sel(CamObjRatio=whichRatio, CommunicationRange=selCommRange, Algorithm=selAlgos)[whichKCov].transpose()\n timeLimitIdx = next((i for i,x in enumerate(xdata) if x >= timeLimit)) # first idx of time > timeLimit\n xdata = xdata[:timeLimitIdx]\n ydata = ydata[:timeLimitIdx]\n axes[r][c].plot(xdata, ydata)\n axes[r][c].set_title('n/m = ' + whichRatio)\n axes[r][c].set_ylim([0,1])\n if c == 0:\n axes[r][c].set_ylabel(whichKCov + ' (%)')\n if r == rows-1:\n axes[r][c].set_xlabel('t')\n if r == 0 and c == cols -1:\n axes[r][c].legend(ydata.coords['Algorithm'].data.tolist())\n fig.savefig(charts_dir + whichKCov + '_InTime.pdf')\n plt.close(fig)\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n heatmaps\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n simRatios.reverse()\n commRanges.reverse()\n import seaborn as sns\n rows = 4\n cols = 2\n gridspec_kw={'width_ratios': [1,1,0.05], 'height_ratios': [1,1,1,1]}\n for whichKCov in kcovVariables:\n fig, axes = plt.subplots(rows, cols+1, figsize=(8,10), sharex='col', gridspec_kw=gridspec_kw)\n plt.xlim([min(simRatios), max(simRatios)])\n plt.ylim([0,1])\n for idx,algo in enumerate(algos):\n r = int(idx / cols)\n c = int(idx % cols)\n data = dataKcovsMean.sel(Algorithm=algo)[whichKCov]\n cbar = idx%cols == cols - 1 # only charts to the right have the bar\n ax = sns.heatmap(data, vmin=0, vmax=1, ax=axes[r][c], cbar=cbar, cbar_ax=axes[r][cols], cbar_kws={'label': whichKCov + ' (%)'})\n if idx%cols == 0:\n ax.set_ylabel('r')\n ax.set_yticklabels([str(int(x)) for x in commRanges])\n else:\n ax.set_yticklabels([])\n if idx >= cols * (rows - 1):\n ax.set_xlabel('n/m')\n ax.set_xticklabels(noOdds(simRatios))\n \n ax.invert_yaxis()\n ax.set_title(algo)\n fig.savefig(charts_dir + whichKCov + '_heatmap.pdf')\n plt.close(fig)\n simRatios.reverse()\n commRanges.reverse()\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n kcov lines\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n simRatios.reverse()\n commRanges.reverse()\n for commRange in commRanges:\n fig = plt.figure(figsize=(8,10))\n for idx,algo in enumerate(algos):\n #size = ceil(sqrt(len(algos)))\n rows = 4\n cols = 2\n ax = fig.add_subplot(rows,cols,idx+1)\n ax.set_ylim([0,1])\n ax.set_xlim([min(simRatios) - 0.1, max(simRatios) + 0.1])\n #plt.xticks(rotation=35, ha='right')\n if idx%cols == 0:\n ax.set_ylabel(\"Coverage (%)\")\n else:\n ax.set_yticklabels([])\n if idx >= cols * (rows - 1):\n ax.set_xlabel(\"n/m\")\n #if idx%rows > 0:\n # ax.set_yticklabels([])\n ax.set_title(algo)\n ax.set_xticks([0] + simRatios + [max(simRatios) + 0.1])\n ax.set_xticklabels([\"\"] + noOdds(simRatios) + [\"\"])\n #if idx < 6:\n # ax.set_xticklabels([])\n chartdataMean = dataKcovsMean.sel(Algorithm=algo, CommunicationRange=commRange)\n chartdataStd = dataKcovsStd.sel(Algorithm=algo, CommunicationRange=commRange)\n #xax = np.linspace(min(simRatios),max(simRatios),len(simRatios))\n for i,s in enumerate(kcovVariables):\n values = chartdataMean[s].values.tolist()\n #values.reverse()\n errors = chartdataStd[s].values.tolist()\n #.reverse()\n ax.plot(simRatios, values, label=kcovTrans[i], color=kcovColors[i])\n for j,r in enumerate(simRatios):\n ax.errorbar(r, values[j], yerr=errors[j], fmt='', color=kcovColors[i], elinewidth=1, capsize=0)\n if idx == cols-1:\n ax.legend()\n plt.tight_layout()\n fig.savefig(charts_dir + 'KCov_lines_CommRange-'+str(int(commRange))+'_CamObjRatio-variable.pdf')\n plt.close(fig)\n \n \n for simRatio in simRatios:\n fig = plt.figure(figsize=(8,10))\n for idx,algo in enumerate(algos):\n #size = ceil(sqrt(len(algos)))\n rows = 4\n cols = 2\n ax = fig.add_subplot(rows,cols,idx+1)\n minRange = min(commRanges) - 10\n maxRange = max(commRanges) + 10\n ax.set_ylim([0,1])\n ax.set_xlim([minRange, maxRange])\n plt.xticks(rotation=35, ha='right')\n if idx%cols == 0:\n ax.set_ylabel(\"Coverage (%)\")\n if idx < cols:\n ax.set_xlabel(\"r\")\n if idx%rows != 0:\n ax.set_yticklabels([])\n ax.set_title(algo)\n ax.set_xticks([minRange] + commRanges + [maxRange])\n ax.set_xticklabels([\"\"] + [str(round(c)) for c in commRanges] + [\"\"])\n chartdataMean = dataKcovsMean.sel(Algorithm=algo, CamObjRatio=simRatio)\n chartdataStd = dataKcovsStd.sel(Algorithm=algo, CamObjRatio=simRatio)\n for i,s in enumerate(kcovVariables):\n values = chartdataMean[s].values.tolist()\n errors = chartdataStd[s].values.tolist()\n ax.plot(commRanges, values, label=kcovTrans[i], color=kcovColors[i])\n for j,r in enumerate(commRanges):\n ax.errorbar(r, values[j], yerr=errors[j], fmt='', color=kcovColors[i], elinewidth=1, capsize=0)\n if idx == cols-1:\n ax.legend()\n plt.tight_layout()\n fig.savefig(charts_dir + 'KCov_lines_CommRange-variable_CamObjRatio-'+str(simRatio)+'.pdf')\n plt.close(fig)\n \n simRatios.reverse()\n commRanges.reverse()\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n LaTeX table\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n import textwrap\n selKcov = '3-coverage'\n selCommRanges = [25, 50, 100]\n selRatios = [0.2, 0.6, 1, 1.2, 1.6, 2]\n txt = r'''\n \\begin{table}\n \\centering\n \\tiny\n \\begin{tabular}{lccccccc}%{lcccccccccccccccccccccccc}\n\n \\toprule\n \\multirow{2}{*}{$r$} & \\multirow{2}{*}{\\textsc{Approach}} \n & \\multicolumn{6}{c}{\\textsc{Ratio} $n/m$}\\\\\n \\cline{3-8}\n & & ''' + '&'.join(['{:.1f}'.format(r) for r in selRatios]) + r'\\\\'\n for commRange in selCommRanges:\n txt += \"\\n\\n \" + r'\\midrule \\multirow{8}{*}{' + str(commRange) + \"}\\n\"\n for algo in algos:\n txt += \" & \" + algo.replace('_', r'\\_') + ' '\n for ratio in selRatios:\n txt += '& {:.2f}'.format(dataKcovsMean[selKcov].sel(Algorithm=algo, CommunicationRange=commRange, CamObjRatio=ratio).values.tolist())\n txt += ' ({:.2f}'.format(dataKcovsStd[selKcov].sel(Algorithm=algo, CommunicationRange=commRange, CamObjRatio=ratio).values.tolist()) + ') '\n txt += r'\\\\' + \"\\n\"\n txt += r'''\n \\bottomrule\n \\end{tabular}\n \\caption{Comparison of mean $OMC_k$ achieved by different approaches with \n different communications ranges $r$ and different ratios for \n objects/cameras, standard deviation is indicated in brackets.}\n \\label{tab:results}\n \\end{table}\n '''\n txt = textwrap.dedent(txt.strip())\n with open(charts_dir + 'KCov_latex.txt', 'w') as f:\n f.write(txt)\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n kcoverage comparison\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n for r,commRange in enumerate(commRanges):\n fig = plt.figure(figsize=(22,20))\n for j,simRatio in enumerate(simRatios):\n # rows, columns, index\n #size = ceil(sqrt(len(simRatios)))\n rows = 4#size-1\n cols = 5#size\n ax = fig.add_subplot(rows, cols,j+1)\n ax.set_ylim([0,1])\n #if j bool:\n emojis = [\"⭕\", \"❌\"]\n for emoji in emojis:\n await message.add_reaction(emoji)\n\n def _check(reaction, user):\n return reaction.message.id == message.id and user == ctx.author\n\n try:\n reaction, _ = await ctx.bot.wait_for(\"reaction_add\", check=_check, timeout=60.0)\n return reaction.emoji == \"⭕\"\n except asyncio.TimeoutError:\n return False\n\n\nasync def wait_for_reaction(\n ctx: commands.Context, message: discord.Message, emoji: str\n):\n await message.add_reaction(emoji)\n\n def _check(reaction, user):\n return (\n reaction.message.id == message.id\n and user == ctx.author\n and str(reaction) == emoji\n )\n\n await ctx.bot.wait_for(\"reaction_add\", check=_check)\n\n\nasync def wait_for_multiple_reactions(\n ctx: commands.Context, message: discord.Message, emojis: List[str]\n) -> str:\n await asyncio.gather(*[message.add_reaction(emoji) for emoji in emojis])\n\n def _check(reaction, user):\n return (\n reaction.message.id == message.id\n and user == ctx.author\n and str(reaction) in emojis\n )\n\n reaction, _ = await ctx.bot.wait_for(\"reaction_add\", check=_check)\n return str(reaction)\n","repo_name":"team-crescendo/3rd-anniversary-bot","sub_path":"src/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"14838168411","text":"# Задайте список. Напишите программу, которая определит, присутствует ли в заданном списке строк некое число.\n\nimport random\n\ndef input_number(text: str) -> int:\n try:\n number = int(input(text))\n except ValueError:\n print(\"это не число!\")\n return number\n\ndef create_array(size=10):\n list = [f\"{random.randint(0,10)}\" for i in range(size)]\n return list\n\ndef find_num(num_list, search_num: int) -> bool:\n return True if [elem for elem in num_list if str(search_num) == elem] else False\n\nlist = create_array()\nbool = find_num(list, input_number(\"Введите число, чтобы проверить есть ли оно в списке: \"))\nprint(list)\nprint(bool)","repo_name":"externalcharm/pythonEducation","sub_path":"seminar6/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74902664573","text":"import os\nfrom flask import Flask, flash, request, redirect, url_for, render_template\nfrom werkzeug.utils import secure_filename\nfrom PIL import Image\nimport torchvision.transforms as T\nimport torchvision\nimport numpy as np\n\n\nUPLOAD_FOLDER = '/photo'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\napp = Flask(__name__)\ncnt = 0\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n # print(file.read())\n\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n # filename = file.filename\n # file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n fl = open(str(cnt) + '.' + file.filename.rsplit('.', 1)[1].lower(), 'wb+')\n fl.write(file.read())\n fl.close()\n\n\n im = Image.open(str(cnt) + \".jpg\")\n\n a = np.array(im)\n t = T.Compose([T.ToTensor()])(im)\n c = []\n c.append(t)\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n model.eval()\n res: [{torch.tensor}]\n res = model(c)\n\n humanScore: float = 0.0\n ind = -1\n for key, t in res[0].items():\n if (key == 'labels'):\n for i in range(len(t)):\n if (t[i] == 1):\n ind = i\n if (key == 'scores' and ind != -1):\n humanScore = t[ind]\n\n if (humanScore > 0.75):\n return redirect(url_for('ok'))\n else:\n return redirect(url_for('no'))\n else:\n flash('Error')\n return redirect(request.url)\n return render_template('base.html')\n\n\nfrom flask import send_from_directory\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n@app.route('/ok')\ndef ok():\n return render_template('ok.html')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ilnurvaleev/HomeWork","sub_path":"classification of photos/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"26881198952","text":"# -*- coding: utf-8 -*-\n# Create your views here.\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom lists_app.forms import CreateOrderForm, CreateItemForm, UpdateOrderForm\nfrom lists_app.models import Order\n\n\ndef home(req):\n orders = Order.objects.all()\n return render(req, 'list_app/all_orders.html', {'all_orders': orders})\n\n\ndef create_order(req):\n message = None\n if req.POST:\n form = CreateOrderForm(req.POST)\n if form.is_valid():\n form.save()\n message = u\"Заказ добавлен успешно!\"\n else:\n form = CreateOrderForm()\n return render(req, 'list_app/create_order.html', {'form': form, 'message': message})\n\n\ndef create_item(req):\n redirect_url = \"/\"\n if req.POST:\n form = CreateItemForm(req.POST)\n if form.is_valid():\n form.save()\n redirect_address = req.POST.get('from', '/')\n return HttpResponseRedirect(redirect_address)\n else:\n redirect_url = req.GET.get('from', '/')\n form = CreateItemForm()\n return render(req, 'list_app/create_item.html', {'form': form, 'from': redirect_url})\n\n\ndef update_order(request, pk):\n order = get_object_or_404(Order, id=pk)\n if request.POST:\n form = UpdateOrderForm(request.POST, instance=order)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(\"/home\")\n else:\n form = UpdateOrderForm(instance=order)\n\n executor_verbose_name = Order._meta.get_field(\"executor\").verbose_name.title()\n item_verbose_name = order.item._meta.verbose_name\n\n return render(request, 'list_app/show_order.html',\n {'form': form, 'order': order, 'executor_verbose_name': executor_verbose_name,\n 'item_verbose_name': item_verbose_name})","repo_name":"logart/lists","sub_path":"lists_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"539040317","text":"from django.db import models\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom store.models import Vote\nfrom base import mods\nfrom base.models import Auth, Key\nfrom django.utils import timezone\nfrom postproc.models import PostprocTypeEnum\nfrom datetime import datetime\nfrom django.core.validators import URLValidator\nimport requests\nfrom io import StringIO\nfrom django.core.exceptions import ValidationError\nfrom django.utils.safestring import mark_safe\n\nclass Question(models.Model):\n desc = models.TextField()\n TYPES = [\n ('C', 'Classic question'),\n ('S', 'Score question'),\n ('R', 'Ranked question'),\n ('B', 'Yes/No question'),\n ('I', 'Image'),\n ]\n type = models.CharField(max_length=1, choices=TYPES, default='C') \n create_ordination = models.BooleanField(verbose_name='Create ordination', default=False)\n\n def save(self):\n super().save()\n if self.type == 'B':\n import voting.views # Importo aquí porque si lo hago arriba da error por importacion circular\n voting.views.create_yes_no_question(self)\n elif self.type == 'R' and self.create_ordination:\n import voting.views\n voting.views.create_ranked_question(self)\n elif self.type == 'S':\n import voting.views\n voting.views.create_score_question(self)\n\n def __str__(self):\n return self.desc\n\n\nclass QuestionOption(models.Model):\n question = models.ForeignKey(Question, related_name='options', on_delete=models.CASCADE)\n number = models.PositiveIntegerField(blank=True, null=True)\n option = models.TextField()\n\n def clean(self):\n if self.question.type == 'I':\n validator = URLValidator()\n validator(self.option)\n image_formats = (\"image/png\", \"image/jpeg\", \"image/jpg\")\n r = requests.get(self.option)\n if r.headers[\"content-type\"] not in image_formats:\n raise ValidationError(\"Url does not contain a compatible image\")\n\n def save(self, *args, **kwargs):\n if self.question.type == 'B':\n if not self.option == 'Sí' and not self.option == 'No':\n return \"\"\n else:\n if not self.number:\n self.number = self.question.options.count() + 2\n return super().save()\n\n def __str__(self):\n return '{} ({})'.format(self.option, self.number)\n \n def image_tag(self):\n from django.utils.html import escape\n if self.question.type == 'I':\n return mark_safe(u'' % escape(self.option))\n else:\n return \"\"\n image_tag.short_description = 'Image'\n image_tag.allow_tags = True\n\n\nclass Voting(models.Model):\n name = models.CharField(max_length=200)\n desc = models.TextField(blank=True, null=True)\n question = models.ForeignKey(Question, related_name='voting', on_delete=models.CASCADE)\n\n postproc_type = models.CharField(max_length=255, choices=PostprocTypeEnum.choices(), default='IDENTITY')\n number_seats = models.PositiveIntegerField(default=1)\n\n start_date = models.DateTimeField(blank=True, null=True)\n end_date = models.DateTimeField(blank=True, null=True)\n\n future_start = models.DateTimeField(blank=True, null=True)\n future_stop = models.DateTimeField(blank=True, null=True)\n\n pub_key = models.OneToOneField(Key, related_name='voting', blank=True, null=True, on_delete=models.SET_NULL)\n auths = models.ManyToManyField(Auth, related_name='votings')\n\n tally = JSONField(blank=True, null=True)\n postproc = JSONField(blank=True, null=True)\n\n file = models.FileField(blank=True)\n\n def read_file(self):\n import warnings\n warnings.filterwarnings(\"ignore\")\n text_buffer = self.file.open(\"rb\")\n auths = []\n voting_desc = \"\"\n voting_name = \"\"\n question_desc = \"\"\n question_type = \"\"\n options = []\n future_start = None\n future_stop = None\n voting:Voting = None\n question:Question = None\n lines = text_buffer.readlines()\n for line in lines:\n line = line.decode(\"utf-8\")\n if line.find(\"auths\") != -1:\n auths_list = line.split(\":\",1)[1].replace(\"[(\",\"\").replace(\")]\",\"\").strip().split(\"),(\")\n for auth_str in auths_list:\n try:\n auth_split = auth_str.split(\",\")\n auth = Auth(name=auth_split[0].strip(), url=auth_split[1].strip())\n auths.append(auth)\n except:\n raise ValidationError(\"You need to add a valid auth\")\n if line.find(\"question_desc\") != -1:\n question_desc = line.split(\":\",1)[1].strip()\n if line.find(\"voting_desc\") != -1:\n voting_desc = line.split(\":\",1)[1].strip()\n if line.find(\"options\") != -1:\n options_str_list = line.split(\":\",1)[1].replace(\"[(\",\"\").replace(\")]\",\"\").strip().split(\"),(\")\n for option_str in options_str_list:\n option = option_str.split(\",\")\n options.append(option)\n if line.find(\"voting_name\") != -1:\n voting_name = line.split(\":\",1)[1].strip()\n if line.find(\"question_type\") != -1:\n question_type = line.split(\":\",1)[1].strip()\n if line.find(\"future_start\") != -1:\n future_start_str = line.split(\":\",1)[1].strip()\n future_start = datetime.strptime(future_start_str, \"%Y-%m-%d %H:%M:%S\")\n if line.find(\"future_stop\") != -1:\n future_end_str = line.split(\":\",1)[1]\n future_stop = datetime.strptime(future_start_str, \"%Y-%m-%d %H:%M:%S\")\n text_buffer.close\n question = Question(desc=question_desc,type=question_type)\n try:\n question.full_clean()\n question.save()\n except:\n raise ValidationError(\"You need to add a question\")\n \n for option in options:\n new_option = QuestionOption(question=question, number=int(option[0]), option=option[1])\n new_option.save()\n self.question = question\n self.desc = voting_desc\n self.name = voting_name\n if self.name == \"\":\n raise ValidationError(\"You need to add a name\")\n self.future_start=future_start\n self.future_stop = future_stop\n self.save()\n for auth in auths:\n try:\n auth.save()\n auth.full_clean()\n self.auths.add(auth)\n except:\n raise ValidationError(\"You need to add a valid auth\")\n \n\n def create_pubkey(self):\n if self.pub_key or not self.auths.count():\n return\n\n auth = self.auths.first()\n data = {\n \"voting\": self.id,\n \"auths\": [ {\"name\": a.name, \"url\": a.url} for a in self.auths.all() ],\n }\n key = mods.post('mixnet', baseurl=auth.url, json=data)\n pk = Key(p=key[\"p\"], g=key[\"g\"], y=key[\"y\"])\n pk.save()\n self.pub_key = pk\n self.save()\n\n def get_votes(self, token=''):\n # gettings votes from store\n votes = mods.get('store', params={'voting_id': self.id}, HTTP_AUTHORIZATION='Token ' + token)\n # anon votes\n return [[i['a'], i['b']] for i in votes]\n\n def tally_votes(self, token=''):\n '''\n The tally is a shuffle and then a decrypt\n '''\n\n votes = self.get_votes(token)\n\n auth = self.auths.first()\n shuffle_url = \"/shuffle/{}/\".format(self.id)\n decrypt_url = \"/decrypt/{}/\".format(self.id)\n auths = [{\"name\": a.name, \"url\": a.url} for a in self.auths.all()]\n\n # first, we do the shuffle\n data = { \"msgs\": votes }\n response = mods.post('mixnet', entry_point=shuffle_url, baseurl=auth.url, json=data,\n response=True)\n if response.status_code != 200:\n # TODO: manage error\n pass\n\n # then, we can decrypt that\n data = {\"msgs\": response.json()}\n response = mods.post('mixnet', entry_point=decrypt_url, baseurl=auth.url, json=data,\n response=True)\n\n if response.status_code != 200:\n # TODO: manage error\n pass\n\n self.tally = response.json()\n self.save()\n\n self.do_postproc()\n\n def do_postproc(self):\n tally = self.tally\n options = self.question.options.all()\n\n opts = []\n for opt in options:\n if isinstance(tally, list):\n votes = tally.count(opt.number)\n else:\n votes = 0\n opts.append({\n 'option': opt.option,\n 'number': opt.number,\n 'votes': votes,\n 'borda': '',\n })\n \n data = { 'type': self.postproc_type, 'seats': self.number_seats, 'options': opts }\n postp = mods.post('postproc', json=data)\n\n self.postproc = postp\n self.save()\n\n def save_file(self):\n if self.postproc:\n file_name = \"[\" + str(self.id) + \"]\" + self.name + \".txt\"\n path = \"voting/files/\" + file_name\n file = open(path, \"w\")\n file.write(\"Id: \" + str(self.id) + \"\\n\")\n file.write(\"Nombre: \" + self.name + \"\\n\")\n file.write(\"Tipo de votación: \" + self.question.type+ \"\\n\")\n if self.desc:\n file.write(\"Descripción: \" + self.desc + \"\\n\")\n file.write(\"Fecha de inicio: \" + self.start_date.strftime('%d/%m/%y %H:%M:%S') + \"\\n\")\n file.write(\"Fecha de fin: \" + self.end_date.strftime('%d/%m/%y %H:%M:%S') + \"\\n\\n\")\n file.write(\"Pregunta: \" + str(self.question) + \"\\n\")\n file.write(\"Resultado: \\n\")\n for opt in self.postproc:\n file.write(\" - Opción: \" + str(opt.get('option')))\n file.write(\" Puntuación: \" + str(opt.get('postproc')))\n file.write(\" Votos: \" + str(opt.get('votes')) + \"\\n\")\n file.close()\n self.file = path\n self.save() \n \n \n\n def __str__(self):\n return self.name\n\nclass VotingFromFile(models.Model):\n voting = models.ForeignKey(Voting(), related_name='voting', on_delete=models.CASCADE, blank=True)\n file_voting = models.FileField()\n def clean(self):\n v = Voting(file=self.file_voting)\n v.read_file()\n self.voting = v\n\ndef update_votings():\n \n fecha_hora = timezone.now()\n votaciones = list(Voting.objects.all())\n try:\n for v in votaciones :\n if(v.future_start <= fecha_hora):\n v.start_date = v.future_start\n if(v.future_stop <= fecha_hora):\n v.end_date = v.future_stop\n v.save()\n except:\n print(\"UPDATING PROCESS HAD AN ERROR\")\n\n","repo_name":"pabramber/decide-part-chiclana","sub_path":"decide/voting/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11128,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"21286518055","text":"import Constantes\n\nclass codewriter():\n def __init__(self, nome_saida):\n self.codigo = Constantes.CODIGO\n self.tabela_simbolos = Constantes.TABELA_SIMBOLOS\n\n self.nome_saida = nome_saida\n self.arquivo_saida = open(self.nome_saida, 'a', encoding='UTF-8')\n\n\n def writeArithmetic(self, comando, ind):\n \"\"\"Escreve os comandos aritméticos no arquivo de saída.\"\"\"\n\n if comando in ['eq','lt','gt']:\n code = self.codigo[comando].format(ind, ind, ind, ind)\n else:\n code= self.codigo[comando]\n self.arquivo_saida.write(code)\n\n\n def writePushPop(self, comando, segmento, indice):\n \"\"\"Escreve os comandos POP/PUSH no arquivo de saída.\"\"\"\n\n if comando == 'push':\n if segmento in ['local', 'argument', 'this', 'that']:\n code = self.codigo['push_basico'].format(segmento, indice, indice, self.tabela_simbolos[segmento])\n self.arquivo_saida.write(code)\n \n elif segmento == 'constant':\n code = self.codigo['push_const'].format(indice, indice)\n self.arquivo_saida.write(code)\n\n elif segmento == 'static':\n code = self.codigo['push_static'].format(indice, indice)\n self.arquivo_saida.write(code)\n \n elif segmento == 'temp':\n code = self.codigo['push_temp'].format(indice, indice)\n self.arquivo_saida.write(code)\n\n elif segmento == 'pointer':\n code = self.codigo['push_pointer'].format(indice, self.tabela_simbolos[str(indice)])\n self.arquivo_saida.write(code)\n\n\n elif comando == 'pop':\n if segmento in ['local', 'argument', 'this', 'that']:\n code = self.codigo['pop_basico'].format(segmento, indice, indice, self.tabela_simbolos[segmento])\n self.arquivo_saida.write(code)\n\n elif segmento == 'static':\n code = self.codigo['pop_static'].format(indice, indice)\n self.arquivo_saida.write(code)\n\n elif segmento == 'temp':\n code = self.codigo['pop_temp'].format(indice, indice)\n self.arquivo_saida.write(code)\n\n elif segmento == 'pointer':\n code = self.codigo['pop_pointer'].format(indice, self.tabela_simbolos[str(indice)])\n self.arquivo_saida.write(code)\n\n\n def close(self):\n \"\"\"Fecha o arquivo de saída.\"\"\"\n self.arquivo_saida.close()","repo_name":"Stefanyvitoria/Nand2Tetris","sub_path":"project07/CodeWriter.py","file_name":"CodeWriter.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15384771097","text":"#Creación de las visualizaciones\n\n\"1. Exploración de los datos--------------------------------------------------------------\"\n#Graficos de pie\ndef Conexion_DB_():\n import pandas as pd\n import psycopg2\n import os\n from dotenv import load_dotenv\n # path to env file\n env_path='Informacion.env'\n # load env \n load_dotenv(dotenv_path=env_path)\n # extract env variables\n DBUSER=os.getenv('DBUSER')\n DBPASSWORD=os.getenv('DBPASSWORD')\n DBHOST=os.getenv('DBHOST')\n DBPORT=os.getenv('DBPORT')\n DBNAME=os.getenv('DBNAME')\n #connect to DB\n engine = psycopg2.connect(\n dbname=DBNAME,\n user=DBUSER,\n password=DBPASSWORD,\n host=DBHOST,\n port=DBPORT\n )\n \n cursor = engine.cursor()\n \n query = \"SELECT * FROM mytable;\"\n \n cursor.execute(query)\n data=cursor.fetchall()\n df=pd.DataFrame(data,columns=['id','age', 'sex', 'cpt', 'pressure', 'chol', 'sugar', 'ecg', 'maxbpm',\n 'angina', 'oldpeak', 'slope', 'flourosopy', 'thal', 'diagnosis'])\n datos=df.drop(\"id\", axis=1)\n return datos\n\ndef crear_visualizaciones(datos):\n #############################################################################################\n #Creación de las visualizaciones\n import numpy as np\n import matplotlib.pyplot as plt\n \n \"1. Exploración de los datos--------------------------------------------------------------\"\n #Graficos de pie\n \n \n #Género\n fig, ax = plt.subplots()\n etiquetas = [\"Mujeres\",\"Hombres\"]\n valores = [datos.loc[(datos['sex'] == 0)].shape[0],(datos[\"sex\"]==1).sum()]\n colores=[\"#D7F47C\", \"#81E2DF\"]\n ax.pie(valores, labels = etiquetas ,colors=colores, autopct='%1.1f%%', textprops = {'fontsize': 14})\n plt.title(\"Sexo de la muestra\", fontsize = 18)\n fig.subplots_adjust(top=0.9,bottom=0.01,right=0.9)\n plt.savefig('Exploracion1.png')\n \n \n #Age \n fig, ax = plt.subplots()\n etiquetas = [\"Joven Adulto\",\"Adultos\",\"Adultos Mayores\",\"Tercera Edad\"]\n valores = [(datos[\"age\"]==1).sum(),(datos[\"age\"]==2).sum(),(datos[\"age\"]==3).sum(),(datos[\"age\"]==4).sum()]\n colores=[\"#CFEFFC\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"]\n ax.pie(valores, labels = etiquetas ,colors=colores, autopct='%1.1f%%', textprops = {'fontsize': 14})\n plt.title(\"Edad de la muestra\", fontsize = 18)\n fig.subplots_adjust(top=0.9,bottom=0.01,left=0.008)\n plt.savefig('Exploracion2.png')\n \n \n #\"Chol\"\n fig, ax = plt.subplots()\n etiquetas = [\"Deseable\",\"Elevado\",\"Muy Elevado\"]\n valores = [(datos[\"chol\"]==1).sum(),(datos[\"chol\"]==2).sum(),(datos[\"chol\"]==3).sum()]\n colores=[\"#87CEFA\", \"#81E2DF\",\"#C1E9FC\"]\n ax.pie(valores, labels = etiquetas ,colors=colores, autopct='%1.1f%%', textprops = {'fontsize': 14})\n plt.title(\"Colesterol sérico de la muestra\", fontsize = 18)\n fig.subplots_adjust(top=0.9,bottom=0.01,left=0.008)\n plt.savefig('Exploracion4.png')\n \n \n #\"Thalach\"\n fig, ax = plt.subplots()\n etiquetas = [\"Reposo\",\"Ej Aerobico\",\"Ej Intenso\"]\n valores = [(datos[\"maxbpm\"]==1).sum(),(datos[\"maxbpm\"]==2).sum(),(datos[\"maxbpm\"]==3).sum()]\n colores=[\"#D7F47C\", \"#12B687\",\"#5EC160\"]\n ax.pie(valores, labels = etiquetas ,colors=colores, autopct='%1.1f%%', textprops = {'fontsize': 14})\n plt.title(\"Frecuencia cardíaca máxima de la muestra\", fontsize = 18)\n fig.subplots_adjust(top=0.9,bottom=0.01,left=0.01)\n plt.savefig('Exploracion5.png')\n \n \n \n #Genero por edad\n #Mujeres\n M_Jovenes = datos.loc[(datos['age'] == 1) & (datos['sex'] == 0)].shape[0]\n M_Adultos = datos.loc[(datos['age'] == 2) & (datos['sex'] == 0)].shape[0]\n M_AdultosMay = datos.loc[(datos['age'] == 3) & (datos['sex'] == 0)].shape[0]\n M_Tercera = datos.loc[(datos['age'] == 4) & (datos['sex'] == 0)].shape[0]\n \n y = [M_Jovenes, M_Adultos, M_AdultosMay,M_Tercera]\n x = ['1','2','3','4']\n # crear gráfica de barras\n fig, ax = plt.subplots()\n fig.subplots_adjust(top=1)\n #ax.bar(x, y, color=[\"#D7F47C\", \"#12B687\",\"#5EC160\",\"#90E0AE\",\"#CFEFFC\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"],label=\"Adultos\")\n ax.bar(x, y, color=[\"#D7F47C\", \"#12B687\",\"#5EC160\",\"#90E0AE\"],label=\"Adultos\")\n ax.bar(x, y, color=[\"#12B687\", \"#12B687\",\"#5EC160\",\"#90E0AE\"],label=\"Adultos Mayores\")\n ax.bar(x, y, color=[\"#5EC160\", \"#12B687\",\"#5EC160\",\"#90E0AE\"],label=\"Tercera Edad\")\n ax.bar(x, y, color=[\"#90E0AE\", \"#12B687\",\"#5EC160\",\"#90E0AE\"],label=\"Jovenes\")\n ax.bar(x, y, color=[\"#D7F47C\", \"#12B687\",\"#5EC160\",\"#90E0AE\"])\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n \n for i, v in enumerate(y):\n plt.text(i, v + 0.8, str(v), color='black', ha='center')\n \n # agregar leyenda\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.1], ncol=2, fontsize= 14)\n ax.set_title('Edad de las mujeres de la muestra', fontsize = 18)\n fig.subplots_adjust(top=0.9,bottom=0.3)\n plt.savefig('Exploracion7.png')\n \n \n \n #Hombres\n H_Jovenes = datos.loc[(datos['age'] == 1) & (datos['sex'] == 1)].shape[0]\n H_Adultos = datos.loc[(datos['age'] == 2) & (datos['sex'] == 1)].shape[0]\n H_AdultosMay = datos.loc[(datos['age'] == 3) & (datos['sex'] == 1)].shape[0]\n H_Tercera = datos.loc[(datos['age'] == 4) & (datos['sex'] == 1)].shape[0]\n \n y = [H_Jovenes,H_Adultos,H_AdultosMay,H_Tercera]\n x = ['1','2','3','4']\n \n # crear gráfica de barras\n fig, ax = plt.subplots()\n fig.subplots_adjust(top=1)\n \n #ax.bar(x, y, color=[\"#D7F47C\", \"#12B687\",\"#5EC160\",\"#90E0AE\",\"#CFEFFC\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"],label=\"Adultos\")\n ax.bar(x, y, color=[\"#CFEFFC\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"],label=\"Adultos\")\n ax.bar(x, y, color=[\"#8AD6F4\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"],label=\"Adultos Mayores\")\n ax.bar(x, y, color=[\"#3EAEF4\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"],label=\"Tercera Edad\")\n ax.bar(x, y, color=[\"#81E2DF\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"],label=\"Jovenes\")\n ax.bar(x, y, color=[\"#CFEFFC\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"])\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n \n for i, v in enumerate(y):\n plt.text(i, v + .8, str(v), color='black', ha='center')\n \n # agregar leyenda\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.1], ncol=2, fontsize= 14)\n ax.set_title('Edad de los hombres de la muestra', fontsize = 18)\n fig.subplots_adjust(top=0.9,bottom=0.3)\n plt.savefig('Exploracion8.png')\n \n \n \n \n \n \n ###########################################################################\n \n \n \n \n import seaborn as sns\n Correlacion= datos.corr()\n fig, ax = plt.subplots(figsize=(15,15))\n matriz_redondeada=np.round(Correlacion, decimals=2)\n sns.heatmap(matriz_redondeada, annot=True, cmap='coolwarm', linewidths=1, ax=ax)\n fig.subplots_adjust(top=0.98,bottom=0.05,right=0.95)\n ax.set_xticklabels(ax.get_xticklabels(), fontsize=14)\n ax.set_yticklabels(ax.get_yticklabels(), fontsize=14)\n fig.savefig(\"tabla_de_correlacion.png\")\n \n \n \n \n \n ###################################################################################################\n \n \n \n \n \n \"3. Quienes son más propensos a tener la enfermedad\"\n # Diagramas de los Enfermos\n \n \"--------------------------PRIMER GRÁFICO: SEXO-------------------------------\"\n Mujeres_enfermas = datos.loc[(datos['sex'] == 0) & (datos['diagnosis'] == 1)].shape[0]\n Hombres_enfermos = datos.loc[(datos['sex'] == 1) & (datos['diagnosis'] == 1)].shape[0]\n \n Mujeres_sanas = datos.loc[(datos['sex'] == 0) & (datos['diagnosis'] == 0)].shape[0]\n Hombres_sanos = datos.loc[(datos['sex'] == 1) & (datos['diagnosis'] == 0)].shape[0]\n \n # crear lista con datos y nombres de cada barra\n y = [Hombres_enfermos, Mujeres_enfermas, Hombres_sanos,Mujeres_sanas]\n x = ['Enfermos', 'Enfermas','Sanos', 'Sanas']\n \n # crear gráfica de barras\n fig, ax = plt.subplots()\n fig.subplots_adjust(top=1)\n ax.bar(x, y, color=[\"#81E2DF\",\"#D7F47C\",\"#DAF6F5\", \"#F3FCD8\"],label=\"Hombres\")\n ax.bar(x, y, color=[\"#D7F47C\",\"#D7F47C\",\"#DAF6F5\", \"#F3FCD8\"],label=\"Mujeres\")\n ax.bar(x, y, color=[\"#81E2DF\",\"#D7F47C\",\"#DAF6F5\", \"#F3FCD8\"])\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n \n for i, v in enumerate(y):\n plt.text(i, v + 3, str(v), color='black', ha='center')\n \n # agregar leyenda\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.1], ncol=2, fontsize= 14)\n ax.set_title('Cantidad de personas enfermas y sanas por sexo', fontsize = 18)\n ax.set_xticks(x)\n ax.set_xticklabels([' Enfermos','',' Sanos',''])\n fig.subplots_adjust(top=0.9,bottom=0.2,left=0.08)\n plt.savefig('Propension1.png')\n \n \n \n \"--------------------------SEGUNDO GRÁFICO: EDAD-------------------------------\"\n Jovenes_enfermas = datos.loc[(datos['age'] == 1) & (datos['diagnosis'] == 1)].shape[0]\n Adultos_enfermos = datos.loc[(datos['age'] == 2) & (datos['diagnosis'] == 1)].shape[0]\n AdultosMay_enfermos = datos.loc[(datos['age'] == 3) & (datos['diagnosis'] == 1)].shape[0]\n Tercera_enfermos = datos.loc[(datos['age'] == 4) & (datos['diagnosis'] == 1)].shape[0]\n \n \n Jovenes_sanos = datos.loc[(datos['age'] == 1) & (datos['diagnosis'] == 0)].shape[0]\n Adultos_sanos = datos.loc[(datos['age'] == 2) & (datos['diagnosis'] == 0)].shape[0]\n AdultosMay_sanos = datos.loc[(datos['age'] == 3) & (datos['diagnosis'] == 0)].shape[0]\n Tercera_sanos = datos.loc[(datos['age'] == 4) & (datos['diagnosis'] == 0)].shape[0]\n \n # crear lista con datos y nombres de cada barra\n y = [Jovenes_enfermas, Adultos_enfermos, AdultosMay_enfermos,Tercera_enfermos,\n Jovenes_sanos,Adultos_sanos,AdultosMay_sanos,Tercera_sanos]\n x = ['1','2','3','4','5','6','7','8']\n \n # crear gráfica de barras\n fig, ax = plt.subplots()\n fig.subplots_adjust(top=1)\n #\"#CFEFFC\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\"\n ax.bar(x, y, color=[\"#99EEF9\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\",\"#DBF9FD\",\"#CFEFFC\",\"#B3E0FB\",\"#DAF6F5\"],label=\"Jovenes\")\n ax.bar(x, y, color=[\"#8AD6F4\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\",\"#DFF4FD\",\"#8BBDF5\",\"#CDE9DC\",\"#D8EACC\"],label=\"Adultos\")\n ax.bar(x, y, color=[\"#3EAEF4\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\",\"#E9FEB4\",\"#EEF8E4\",\"#CDE9DC\",\"#D8EACC\"],label=\"Adultos Mayores\")\n ax.bar(x, y, color=[\"#81E2DF\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\",\"#E9FEB4\",\"#EEF8E4\",\"#CDE9DC\",\"#D8EACC\"],label=\"Tercera Edad\")\n ax.bar(x, y, color=[\"#99EEF9\", \"#8AD6F4\",\"#3EAEF4\",\"#81E2DF\",\"#DBF9FD\",\"#CFEFFC\",\"#B3E0FB\",\"#DAF6F5\"])\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n \n for i, v in enumerate(y):\n plt.text(i, v + 1, str(v), color='black', ha='center')\n \n # agregar leyenda\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.1], ncol=2, fontsize= 14)\n ax.set_title('Cantidad de personas enfermas y sanas por edad', fontsize = 18)\n ax.set_xticks(x)\n ax.set_xticklabels([' Enfermos','','','',' Sanos','','',''])\n fig.subplots_adjust(top=0.9,bottom=0.3)\n plt.savefig('Propension2.png')\n \n \n \n \"------------------------------TERCER GRÁFICO-----------------------------------\"\n pressure_J1 = datos.loc[(datos['age'] == 1) & (datos['pressure'] == 1)].shape[0]\n pressure_A1 = datos.loc[(datos['age'] == 2) & (datos['pressure'] == 1)].shape[0]\n pressure_AM1 = datos.loc[(datos['age'] == 3) & (datos['pressure'] == 1)].shape[0]\n pressure_T1 = datos.loc[(datos['age'] == 4) & (datos['pressure'] == 1)].shape[0]\n \n \n pressure_J2 = datos.loc[(datos['age'] == 1) & (datos['pressure'] == 2)].shape[0]\n pressure_A2 = datos.loc[(datos['age'] == 2) & (datos['pressure'] == 2)].shape[0]\n pressure_AM2 = datos.loc[(datos['age'] == 3) & (datos['pressure'] == 2)].shape[0]\n pressure_T2 = datos.loc[(datos['age'] == 4) & (datos['pressure'] == 2)].shape[0]\n \n pressure_J3 = datos.loc[(datos['age'] == 1) & (datos['pressure'] == 3)].shape[0]\n pressure_A3 = datos.loc[(datos['age'] == 2) & (datos['pressure'] == 3)].shape[0]\n pressure_AM3 = datos.loc[(datos['age'] == 3) & (datos['pressure'] == 3)].shape[0]\n pressure_T3 = datos.loc[(datos['age'] == 4) & (datos['pressure'] == 3)].shape[0]\n \n pressure_J4 = datos.loc[(datos['age'] == 1) & (datos['pressure'] == 4)].shape[0]\n pressure_A4 = datos.loc[(datos['age'] == 2) & (datos['pressure'] == 4)].shape[0]\n pressure_AM4 = datos.loc[(datos['age'] == 3) & (datos['pressure'] == 4)].shape[0]\n pressure_T4 = datos.loc[(datos['age'] == 4) & (datos['pressure'] == 4)].shape[0]\n \n pressure_J5 = datos.loc[(datos['age'] == 1) & (datos['pressure'] == 5)].shape[0]\n pressure_A5 = datos.loc[(datos['age'] == 2) & (datos['pressure'] == 5)].shape[0]\n pressure_AM5 = datos.loc[(datos['age'] == 3) & (datos['pressure'] == 5)].shape[0]\n pressure_T5 = datos.loc[(datos['age'] == 4) & (datos['pressure'] == 5)].shape[0]\n \n y1 = [pressure_J1, pressure_A1, pressure_AM1,pressure_T1]\n y2 = [pressure_J2, pressure_A2, pressure_AM2,pressure_T2]\n y3 = [pressure_J3, pressure_A3, pressure_AM3,pressure_T3]\n y4 = [pressure_J4, pressure_A4, pressure_AM4,pressure_T4]\n y5 = [pressure_J5, pressure_A5, pressure_AM5,pressure_T5]\n \n x1 = ['1','2','3','4']\n x2 = ['5','6','7','8']\n x3 = ['9','10','11','12']\n x4 = ['13','14','15','16']\n x5 = ['17','18','19','20']\n \n \n # Gráfico de líneas\n fig, ax = plt.subplots()\n ax.plot(x1, y1, marker = \"o\", label = \"Presión normal \" ,color=\"#CFEFFC\", linewidth=3)\n ax.plot(x2, y2, marker = \"o\", label = \"Prehipertensión\",color=\"#3EAEF4\", linewidth=3)\n ax.plot(x3, y3, marker = \"o\", label = \"Hipertensión E1\",color=\"#8AD6F4\", linewidth=3)\n ax.plot(x4, y4, marker = \"o\", label = \"Hipertensión E2\",color=\"#81E2DF\", linewidth=3)\n ax.plot(x5, y5, marker = \"o\", label = \"Crisis Hipertensiva\",color=\"#0070C0\", linewidth=3)\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n ax.set_xticks([])\n plt.title(\"Presión arterial según la edad\", fontsize = 18)\n #descripcion=\"El primer punto corresponde a los Jovenes, \\nel segundo a los Adultos, el tercero a los Adultos Mayores.\\nPor ultimo los Aducltos de la Tercera Edad\"\n \n plt.ylim(bottom=0)\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.02], ncol=2, fontsize= 14)\n fig.subplots_adjust(top=0.9,bottom=0.3)\n plt.savefig('Propension3.png')\n \n \n \"------------------------------CUARTA GRÁFICA-----------------------------------\"\n chol_J1 = datos.loc[(datos['age'] == 1) & (datos['chol'] == 1)].shape[0]\n chol_A1 = datos.loc[(datos['age'] == 2) & (datos['chol'] == 1)].shape[0]\n chol_AM1 = datos.loc[(datos['age'] == 3) & (datos['chol'] == 1)].shape[0]\n chol_T1 = datos.loc[(datos['age'] == 4) & (datos['chol'] == 1)].shape[0]\n \n chol_J2 = datos.loc[(datos['age'] == 1) & (datos['chol'] == 2)].shape[0]\n chol_A2 = datos.loc[(datos['age'] == 2) & (datos['chol'] == 2)].shape[0]\n chol_AM2 = datos.loc[(datos['age'] == 3) & (datos['chol'] == 2)].shape[0]\n chol_T2 = datos.loc[(datos['age'] == 4) & (datos['chol'] == 2)].shape[0]\n \n chol_J3 = datos.loc[(datos['age'] == 1) & (datos['chol'] == 3)].shape[0]\n chol_A3 = datos.loc[(datos['age'] == 2) & (datos['chol'] == 3)].shape[0]\n chol_AM3 = datos.loc[(datos['age'] == 3) & (datos['chol'] == 3)].shape[0]\n chol_T3 = datos.loc[(datos['age'] == 4) & (datos['chol'] == 3)].shape[0]\n \n y1 = [chol_J1, chol_A1, chol_AM1, chol_T1]\n y2 = [chol_J2, chol_A2, chol_AM2, chol_T2]\n y3 = [chol_J3, chol_A3, chol_AM3, chol_T3]\n \n x1 = ['1','2','3','4']\n x2 = ['5','6','7','8']\n x3 = ['9','10','11','12']\n \n # Gráfico de líneas\n fig, ax = plt.subplots()\n ax.plot(x1, y1, marker = \"o\", label = \"Deseable \" ,color=\"#D7F47C\", linewidth=3)\n ax.plot(x2, y2, marker = \"o\", label = \"Elevado\",color=\"#12B687\", linewidth=3)\n ax.plot(x3, y3, marker = \"o\", label = \"Muy Elevado\",color=\"#90E0AE\", linewidth=3)\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n ax.set_xticks([])\n plt.title(\"Colesterol según la edad\", fontsize = 18)\n #descripcion=\"El primer punto corresponde a los Jovenes, \\nel segundo a los Adultos, el tercero a los Adultos Mayores.\\nPor ultimo los Aducltos de la Tercera Edad\"\n \n plt.ylim(bottom=0)\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.02], ncol=3, fontsize= 14)\n plt.savefig('Propension4.png')\n \n \n \"------------------------------QUINTO GRÁFICO-----------------------------------\"\n maxbpm_J1 = datos.loc[(datos['age'] == 1) & (datos['maxbpm'] == 1)].shape[0]\n maxbpm_A1 = datos.loc[(datos['age'] == 2) & (datos['maxbpm'] == 1)].shape[0]\n maxbpm_AM1 = datos.loc[(datos['age'] == 3) & (datos['maxbpm'] == 1)].shape[0]\n maxbpm_T1 = datos.loc[(datos['age'] == 4) & (datos['maxbpm'] == 1)].shape[0]\n \n maxbpm_J2 = datos.loc[(datos['age'] == 1) & (datos['maxbpm'] == 2)].shape[0]\n maxbpm_A2 = datos.loc[(datos['age'] == 2) & (datos['maxbpm'] == 2)].shape[0]\n maxbpm_AM2 = datos.loc[(datos['age'] == 3) & (datos['maxbpm'] == 2)].shape[0]\n maxbpm_T2 = datos.loc[(datos['age'] == 4) & (datos['maxbpm'] == 2)].shape[0]\n \n maxbpm_J3 = datos.loc[(datos['age'] == 1) & (datos['maxbpm'] == 3)].shape[0]\n maxbpm_A3 = datos.loc[(datos['age'] == 2) & (datos['maxbpm'] == 3)].shape[0]\n maxbpm_AM3 = datos.loc[(datos['age'] == 3) & (datos['maxbpm'] == 3)].shape[0]\n maxbpm_T3 = datos.loc[(datos['age'] == 4) & (datos['maxbpm'] == 3)].shape[0]\n \n y1 = [maxbpm_J1, maxbpm_A1, maxbpm_AM1,maxbpm_T1]\n y2 = [maxbpm_J2, maxbpm_A2, maxbpm_AM2,maxbpm_T2]\n y3 = [maxbpm_J3, maxbpm_A3, maxbpm_AM3,maxbpm_T3]\n \n x1 = ['1','2','3','4']\n x2 = ['5','6','7','8']\n x3 = ['9','10','11','12']\n \n # Gráfico de líneas\n fig, ax = plt.subplots()\n ax.plot(x1, y1, marker = \"o\", label = \"Deseable \" ,color=\"#99EEF9\", linewidth=3)\n ax.plot(x2, y2, marker = \"o\", label = \"Elevado\",color=\"#8AD6F4\", linewidth=3)\n ax.plot(x3, y3, marker = \"o\", label = \"Muy Elevado\",color=\"#3EAEF4\", linewidth=3)\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n ax.set_xticks([])\n plt.title(\"Frecuencia cardíaca según la edad\", fontsize = 18)\n #descripcion=\"El primer punto corresponde a los Jovenes, \\nel segundo a los Adultos, el tercero a los Adultos Mayores.\\nPor ultimo los Aducltos de la Tercera Edad\"\n \n plt.ylim(bottom=0)\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.02], ncol=3, fontsize= 14)\n plt.savefig('Propension5.png')\n \n \n \n \n \n \"------------------------------SEXTO GRÁFICO-----------------------------------\"\n pressure_J2 = datos.loc[(datos['age'] == 1) & (datos['oldpeak'] == 2)].shape[0]\n pressure_A2 = datos.loc[(datos['age'] == 2) & (datos['oldpeak'] == 2)].shape[0]\n pressure_AM2 = datos.loc[(datos['age'] == 3) & (datos['oldpeak'] == 2)].shape[0]\n pressure_T2 = datos.loc[(datos['age'] == 4) & (datos['oldpeak'] == 2)].shape[0]\n \n pressure_J3 = datos.loc[(datos['age'] == 1) & (datos['oldpeak'] == 3)].shape[0]\n pressure_A3 = datos.loc[(datos['age'] == 2) & (datos['oldpeak'] == 3)].shape[0]\n pressure_AM3 = datos.loc[(datos['age'] == 3) & (datos['oldpeak'] == 3)].shape[0]\n pressure_T3 = datos.loc[(datos['age'] == 4) & (datos['oldpeak'] == 3)].shape[0]\n \n pressure_J4 = datos.loc[(datos['age'] == 1) & (datos['oldpeak'] == 4)].shape[0]\n pressure_A4 = datos.loc[(datos['age'] == 2) & (datos['oldpeak'] == 4)].shape[0]\n pressure_AM4 = datos.loc[(datos['age'] == 3) & (datos['oldpeak'] == 4)].shape[0]\n pressure_T4 = datos.loc[(datos['age'] == 4) & (datos['oldpeak'] == 4)].shape[0]\n \n y1 = [pressure_J1, pressure_A1, pressure_AM1,pressure_T1]\n y2 = [pressure_J2, pressure_A2, pressure_AM2,pressure_T2]\n y3 = [pressure_J3, pressure_A3, pressure_AM3,pressure_T3]\n y4 = [pressure_J4, pressure_A4, pressure_AM4,pressure_T4]\n \n x1 = ['1','2','3','4']\n x2 = ['5','6','7','8']\n x3 = ['9','10','11','12']\n x4 = ['13','14','15','16']\n \n \n # Gráfico de líneas\n fig, ax = plt.subplots()\n ax.plot(x1, y1, marker = \"o\", label = \"Normal \" ,color=\"#D7F47C\", linewidth=3)\n ax.plot(x2, y2, marker = \"o\", label = \"Ligeramente Elevado\",color=\"#12B687\", linewidth=3)\n ax.plot(x3, y3, marker = \"o\", label = \"Moderadamente Elevado\",color=\"#5EC160\", linewidth=3)\n ax.plot(x4, y4, marker = \"o\", label = \"Altamente Elevado\",color=\"#90E0AE\", linewidth=3)\n \n # quitar los bordes del gráfico y los valores del eje y\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().set_yticks([])\n ax.set_xticks([])\n plt.title(\"Depresión ST según la edad\", fontsize = 18)\n #descripcion=\"El primer punto corresponde a los Jovenes, \\nel segundo a los Adultos, el tercero a los Adultos Mayores.\\nPor ultimo los Aducltos de la Tercera Edad\"\n \n plt.ylim(bottom=0)\n ax.legend(loc=\"upper center\", bbox_to_anchor=[0.5,-0.02], ncol=2, fontsize= 14)\n fig.subplots_adjust(top=0.9,bottom=0.3)\n plt.savefig('Propension6.png')\n \n \n ############\n return None\n\ndef estimar(radio1,radio2, radio3, dropdown1, dropdown2, dropdown3, dropdown4, dropdown5, dropdown6, dropdown7, dropdown8, dropdown9, dropdown10):\n from pgmpy.inference import VariableElimination\n from pgmpy.readwrite import BIFReader\n\n Sex=9\n if radio1 == 'Hombre':\n Sex=\"1\"\n elif radio1 == 'Mujer':\n Sex=\"0\"\n \n Exang=9\n if radio2 == 'Si':\n Exang=\"1\"\n elif radio2 == 'No':\n Exang=\"0\"\n \n \n Fbs=9\n if radio3 == 'Si':\n Fbs=\"1\"\n elif radio3 == 'No':\n Fbs=\"0\"\n \n \n edad = 9\n if dropdown1 == \"Entre 29 y 39 años\":\n edad = \"1\"\n elif dropdown1 == \"Entre 40 y 54 años\":\n edad = \"2\"\n elif dropdown1 == \"Entre 55 y 64 años\":\n edad = \"3\"\n elif dropdown1 == \"Entre 65 y 79 años\":\n edad =\"4\"\n \n CP = 9\n if dropdown2 == 'Angina típica':\n CP = \"1\"\n elif dropdown2 == 'Angina atípica':\n CP = \"2\"\n elif dropdown2 == 'Dolor no anginoso':\n CP = \"3\"\n elif dropdown2 == 'Asintomático':\n CP = \"4\"\n \n Trestbps =9\n \n if dropdown3 == 'Entre 94 y 120':\n Trestbps = \"1\"\n elif dropdown3 == 'Entre 121 y 129':\n Trestbps = \"2\"\n elif dropdown3 == 'Entre 130 y 139':\n Trestbps = \"3\"\n elif dropdown3 == 'Entre 140 y 180':\n Trestbps = \"4\"\n elif dropdown3 == 'Entre 181 y 210':\n Trestbps = \"5\"\n \n Chol =9\n \n if dropdown4 == 'Deseable':\n Chol = \"1\"\n elif dropdown4 == 'Elevado':\n Chol = \"2\"\n elif dropdown4 == 'Muy Elevado':\n Chol = \"3\"\n \n ecg=9\n if dropdown5 == 'Normal':\n ecg = \"0\"\n elif dropdown5 =='Anormalidad de la onda ST-T':\n ecg = \"1\"\n elif dropdown5 =='Hipertrofia ventricular':\n ecg = \"2\"\n \n maxbpm=9 \n if dropdown6 =='Entre 71 y 139':\n maxbpm=\"1\"\n elif dropdown6 =='Entre 140 y 169':\n maxbpm=\"2\"\n elif dropdown6 =='Entre 170 y 210':\n maxbpm=\"3\"\n \n slope = 9\n if dropdown7 =='Pendiente ascendente':\n slope=\"1\"\n elif dropdown7 =='Plano':\n slope=\"2\"\n elif dropdown7 =='Pendiente descendente':\n slope=\"3\"\n\n ca=9\n if dropdown8 =='1':\n ca=\"1\"\n elif dropdown8 =='2':\n ca=\"2\"\n elif dropdown8 =='3':\n ca=\"3\"\n \n Oldpeak=9\n if dropdown9 == 'Normal':\n Oldpeak=\"1\"\n elif dropdown9 == 'Lig. Elevada':\n Oldpeak=\"2\"\n elif dropdown9 == 'Mod. Elevada':\n Oldpeak=\"3\"\n elif dropdown9 == 'Alt. Elevada':\n Oldpeak=\"4\"\n \n \n Thal=9\n if dropdown10 == 'Normal':\n Thal=\"3\"\n elif dropdown10 == 'Defecto Fijo':\n Thal=\"6\"\n elif dropdown10 == 'Defecto Reversible':\n Thal=\"7\"\n \n modelo = BIFReader(\"Modelo.bif\").get_model()\n inferencia = VariableElimination(modelo)\n evidencia = {}\n \n if Sex !=9: \n evidencia[\"sex\"]= Sex\n if Exang !=9: \n evidencia[\"angina\"]= Exang\n if edad !=9: \n evidencia[\"age\"]= edad\n if Fbs !=9: \n evidencia[\"sugar\"]= Fbs\n if CP !=9: \n evidencia[\"cpt\"]= CP\n if Trestbps !=9: \n evidencia[\"pressure\"]= Trestbps\n if Chol !=9: \n evidencia[\"chol\"]= Chol\n if ecg !=9: \n evidencia[\"ecg\"]= ecg\n if maxbpm !=9: \n evidencia[\"maxbpm\"]= maxbpm\n if slope !=9: \n evidencia[\"slope\"]= slope\n if ca !=9: \n evidencia[\"flourosopy\"]= ca\n if Oldpeak !=9: \n evidencia[\"oldpeak\"]= Oldpeak\n if Thal !=9: \n evidencia[\"thal\"]= Thal\n \n resultado = inferencia.query(['diagnosis'],evidence=evidencia).values\n return resultado\n\n","repo_name":"Sebas19812002/Proyecto_2","sub_path":"Funciones.py","file_name":"Funciones.py","file_ext":"py","file_size_in_byte":26276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30659416862","text":"from genericpath import isfile\nfrom os import makedirs\n\nimport yaml\nfrom ruamel import yaml\n\nplayfield_folder_path_format = \"{}/Playfields/{}\"\nplayfield_file_path_format = \"{}/playfield.yaml\"\n\n\ndef write_playfields(playfield_dict: dict, output_scenario_path: str):\n for k, v in playfield_dict.items():\n playfield_folder = playfield_folder_path_format.format(output_scenario_path, k)\n makedirs(playfield_folder, exist_ok=True)\n playfield_file_path = playfield_file_path_format.format(playfield_folder)\n with open(playfield_file_path, 'w') as outfile:\n yaml.dump(v, outfile, default_flow_style=False)\n\n\ndef get_playfield(playfield_path:str) -> map:\n if not isfile(playfield_path):\n return None\n\n with open(playfield_path, \"rb\") as input_stream:\n data = input_stream.read()\n\n contents = data.decode(\"utf-8\")\n\n safe_contents = contents.replace(\"\\t\", \" \")\n doc = yaml.safe_load(safe_contents)\n return doc\n\nplayfield_path_format = \"{}/Playfields/{}/playfield.yaml\"\n","repo_name":"lostinplace/egs-scenario-updater","sub_path":"utils/playfield_manager.py","file_name":"playfield_manager.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27550314932","text":"import sys, copy\nfrom collections import deque\ninput = sys.stdin.readline\nsys.setrecursionlimit(300000)\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, 1, -1]\n\nqueue = deque()\n\n#빙산 개수\ndef bfs(x,y):\n queue.append((x,y))\n visited[x][y] = True\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m and not visited[nx][ny] and g[nx][ny] != 0:\n visited[nx][ny] = True\n queue.append((nx,ny))\n return 1\n\n#바닷물에 녹는\ndef melt():\n g2 = copy.deepcopy(g)\n for i in range(n):\n for j in range(m):\n if g[i][j] != 0:\n for i2 in range(4):\n nx = i + dx[i2]\n ny = j + dy[i2]\n if 0 <= nx < n and 0 <= ny < m:\n if g[nx][ny] == 0: g2[i][j] -= 1\n if g2[i][j] == 0: break\n return g2\n\nn, m = map(int, input().split())\nyear = 0\ng = []\n\nfor _ in range(n):\n g.append(list(map(int, input().split())))\n\nwhile True:\n visited = [[False] * m for _ in range(n)]\n c = 0\n\n for i in range(n):\n for j in range(m):\n if not visited[i][j] and g[i][j] != 0:\n c += bfs(i, j)\n\n if c >= 2:\n print(year)\n break\n if c == 0:\n print(0)\n break\n\n g = melt()\n year += 1\n","repo_name":"lee-yujinn/Algorithm","sub_path":"백준/DFS:BFS/2573_빙산.py","file_name":"2573_빙산.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"71316939132","text":"import sys\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QMenu\nfrom PyQt5.QtGui import QFont, QIcon\nfrom PyQt5.QtCore import Qt\n\nclass MainForm(QMainWindow) :\n def __init__(self) :\n super(MainForm, self).__init__()\n self.setWindowTitle(\"QMenu()\")\n self.setWindowIcon(QIcon('icon.jpg'))\n self.setGeometry(200, 200, 500, 500)\n \n def contextMenuEvent(self, event) :\n contextmenu = QMenu(self)\n newaction = contextmenu.addAction(\"New\")\n openaction = contextmenu.addAction(\"Open\")\n quitaction = contextmenu.addAction(\"Quit\")\n\n action = contextmenu.exec_(self.mapToGlobal(event.pos())) ## mapToGlobal => window içinden context menu açılır, mapFromGlobal => pencere dışında context menu açılır\n if action == quitaction :\n self.close()\n \n \n \n\ndef window() :\n app = QApplication(sys.argv)\n win = MainForm()\n win.show()\n sys.exit(app.exec_()) # x ile kapatılabilme\n\nwindow()\n","repo_name":"bm-snnsmsk/my_workshop","sub_path":"python/002_pyqt5/QMenu_context_menu.py","file_name":"QMenu_context_menu.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27298646529","text":"\"\"\"Messaging Framework for Python\"\"\"\nfrom __future__ import absolute_import\n\nVERSION = (2, 1, 7)\n__version__ = \".\".join(map(str, VERSION[0:3])) + \"\".join(VERSION[3:])\n__author__ = \"Ask Solem\"\n__contact__ = \"ask@celeryproject.org\"\n__homepage__ = \"http://github.com/ask/kombu/\"\n__docformat__ = \"restructuredtext en\"\n\n# -eof meta-\n\nimport os\nimport sys\n\nif sys.version_info < (2, 5): # pragma: no cover\n if sys.version_info >= (2, 4):\n raise Exception(\n \"Python 2.4 is not supported by this version. \"\n \"Please use Kombu versions 1.x.\")\n else:\n raise Exception(\"Kombu requires Python versions 2.5 or later.\")\n\n# Lazy loading.\n# - See werkzeug/__init__.py for the rationale behind this.\nfrom types import ModuleType\n\nall_by_module = {\n \"kombu.connection\": [\"BrokerConnection\", \"Connection\"],\n \"kombu.entity\": [\"Exchange\", \"Queue\"],\n \"kombu.messaging\": [\"Consumer\", \"Producer\"],\n \"kombu.pools\": [\"connections\", \"producers\"],\n}\n\nobject_origins = {}\nfor module, items in all_by_module.iteritems():\n for item in items:\n object_origins[item] = module\n\n\nclass module(ModuleType):\n\n def __getattr__(self, name):\n if name in object_origins:\n module = __import__(object_origins[name], None, None, [name])\n for extra_name in all_by_module[module.__name__]:\n setattr(self, extra_name, getattr(module, extra_name))\n return getattr(module, name)\n return ModuleType.__getattribute__(self, name)\n\n def __dir__(self):\n result = list(new_module.__all__)\n result.extend((\"__file__\", \"__path__\", \"__doc__\", \"__all__\",\n \"__docformat__\", \"__name__\", \"__path__\", \"VERSION\",\n \"__package__\", \"__version__\", \"__author__\",\n \"__contact__\", \"__homepage__\", \"__docformat__\"))\n return result\n\n# 2.5 does not define __package__\ntry:\n package = __package__\nexcept NameError:\n package = \"kombu\"\n\n# keep a reference to this module so that it's not garbage collected\nold_module = sys.modules[__name__]\n\nnew_module = sys.modules[__name__] = module(__name__)\nnew_module.__dict__.update({\n \"__file__\": __file__,\n \"__path__\": __path__,\n \"__doc__\": __doc__,\n \"__all__\": tuple(object_origins),\n \"__version__\": __version__,\n \"__author__\": __author__,\n \"__contact__\": __contact__,\n \"__homepage__\": __homepage__,\n \"__docformat__\": __docformat__,\n \"__package__\": package,\n \"VERSION\": VERSION})\n\nif os.environ.get(\"KOMBU_LOG_DEBUG\"):\n os.environ.update(KOMBU_LOG_CHANNEL=\"1\", KOMBU_LOG_CONNECTION=\"1\")\n from .utils import debug\n debug.setup_logging()\n","repo_name":"mozilla/make.mozilla.org","sub_path":"vendor-local/lib/python/kombu/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"78"} +{"seq_id":"1910754782","text":"import cmath\nimport math\nimport numpy\nimport pygame\n\nwindow_width = 2.5\nwindow_height = 2.5\npixels_width = 512\npixels_height = 512\nrow_width = 3\ncol_width = 3\nshow_axes = False\n\n\ndef f(z):\n return cmath.cos(z)\n\n\ndef show(image):\n screen = pygame.display.get_surface()\n screen.blit(image, (0, 0))\n pygame.display.flip()\n while 1:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n raise SystemExit\n\n\ndef getAngle(x, y):\n pi = math.pi\n angle: float\n hypotenuse = getDistance(x, y)\n\n if y == 0 and x >= 0:\n angle = 0\n elif y == 0 and x < 0:\n angle = pi\n elif x > 0:\n angle = math.asin(y / hypotenuse)\n elif y > 0:\n angle = pi - math.asin(y / hypotenuse)\n else:\n angle = -pi - math.asin(y / hypotenuse)\n\n return angle\n\n\ndef getPureColor(x, y):\n # x and y in terms of the xy-plane, not the array\n pi = math.pi\n tr, tg, tb = 0, 0, 0\n angle = getAngle(x, y)\n\n # -pi/3 to pi/3\n # the right third\n if -pi / 3 <= angle <= pi / 3:\n tr = 255\n if angle > 0:\n tg = 255*angle/(pi / 3)\n else:\n tb = 255 * -angle / (pi / 3)\n\n # pi/3 to pi\n # top left third\n if angle > math.pi/3:\n if angle < 2*pi / 3:\n tr = 255 - 255*(angle - pi/3) / (pi/3)\n tg = 255\n if angle > 2*pi / 3:\n tb = 255*(angle - 2*pi/3) / (pi/3)\n\n # -pi/3 to -pi\n # bottom left third\n if angle < -pi/3:\n if angle > -2*pi / 3:\n tr = 255*(angle + 2*pi/3) / (pi/3)\n if angle < -2*pi / 3:\n tg = 255 - 255*(angle + pi) / (pi/3)\n tb = 255\n\n return tr, tg, tb\n\n\ndef getDistance(x, y):\n return math.sqrt(math.pow(x, 2) + math.pow(y, 2))\n\n\ndef getColorMultiplier(x, y) -> float:\n distance = getDistance(x, y)\n maxDist = getDistance(window_width, window_height)\n # higher -> smaller spot around zeroes\n mult = 20\n return (mult*distance / maxDist) / (1 + (mult*distance / maxDist))\n\n\ndef getColor(x, y):\n pure_color_r = getPureColor(x, y)[0]\n pure_color_g = getPureColor(x, y)[1]\n pure_color_b = getPureColor(x, y)[2]\n\n multiplier = getColorMultiplier(x, y)\n\n color_r = pure_color_r * multiplier\n color_g = pure_color_g * multiplier\n color_b = pure_color_b * multiplier\n\n return int(color_r), int(color_g), int(color_b)\n\n\ndef main():\n pygame.init()\n\n pygame.display.set_mode((pixels_width, pixels_height))\n surface = pygame.Surface((pixels_width, pixels_height))\n\n pygame.display.flip()\n\n # Create the PixelArray.\n ar = pygame.PixelArray(surface)\n\n # graph some complex function\n xcoords = numpy.arange(-window_width/2, window_width/2,\n window_width/pixels_width)\n ycoords = numpy.arange(-window_height/2, window_height/2,\n window_height/pixels_height)\n for x in range(0, pixels_width, col_width):\n for y in range(0, pixels_height, row_width):\n xc = xcoords[x]\n yc = ycoords[y]\n z = xc + yc*1j\n if not getDistance(xc, yc) == 0:\n coords = [f(z).real, f(z).imag]\n fx = coords[0]\n fy = coords[1]\n else:\n fx = 0\n fy = 0\n r, g, b = getColor(fx, fy)\n for num1 in range(col_width):\n for num2 in range(row_width):\n if x+num1 < pixels_width and y+num2+1 < pixels_height:\n ar[x+num1, pixels_height-1-(y+num2)] = (r, g, b)\n\n # draw axes\n if show_axes:\n for x in range(pixels_width):\n ar[x, pixels_height//2] = (0, 0, 0)\n ar[x, pixels_height // 2 + 1] = (0, 0, 0)\n for y in range(pixels_height):\n ar[pixels_width//2, y] = (0, 0, 0)\n ar[pixels_width // 2 + 1, y] = (0, 0, 0)\n del ar\n show(surface)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nickLayman/Personal-Small-Projects","sub_path":"Python/MTH-402_complex-analysis/2D_grapher.py","file_name":"2D_grapher.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5102671181","text":"import scipy.stats as st\n\ndef knownSD(listMeasurement, standardDeviation):\n numMeasurement = len(listMeasurement)\n sumMeasurement = 0\n for measurement in listMeasurement:\n sumMeasurement+= measurement\n meanMeasurement = sumMeasurement/numMeasurement\n\n rangeBelow = meanMeasurement-1.959963984540054*standardDeviation/pow(numMeasurement, 0.5)\n rangeAbove = meanMeasurement+1.959963984540054*standardDeviation/pow(numMeasurement, 0.5)\n print(\"***************************************************\\n\")\n print(\"RESULT:\",rangeBelow, \" -> \",rangeAbove)\n print(\"RESULT:\",round(rangeBelow, 2), \" -> \",round(rangeAbove, 2))\n print(\"\\n***************************************************\")\n\ndef unknownSD(listMeasurement, confidentLevel = 0.95):\n numMeasurement = len(listMeasurement)\n sumMeasurement = 0\n for measurement in listMeasurement:\n sumMeasurement+= measurement\n meanMeasurement = sumMeasurement/numMeasurement\n #calculate S\n S2 = 0\n for measurement in listMeasurement:\n S2 += pow((measurement-meanMeasurement), 2)\n S2/=(numMeasurement-1)\n S = pow(S2, 0.5)\n\n #calculate T-distribution (LOL wtf is this)\n T_distribution = abs(st.t.ppf((1 - confidentLevel)/2, numMeasurement-1)) #95%\n\n #calculate 95% range\n rangeBelow = meanMeasurement-T_distribution*S/pow(numMeasurement, 0.5)\n rangeAbove = meanMeasurement+T_distribution*S/pow(numMeasurement, 0.5)\n print(\"***************************************************\\n\")\n print(\"RESULT:\",rangeBelow, \" -> \",rangeAbove)\n print(\"RESULT:\",round(rangeBelow, 2), \" -> \",round(rangeAbove, 2))\n print(\"Unbiased Variance =\", round(S*S, 2), S*S)\n print(\"Unbiased Standard Deviation =\", round(S, 2), S)\n print(\"\\n***************************************************\")\n\n\ndef __MAIN__():\n listMeasurement = [666.1426, 681.1522, 635.557, 655.4282, 689.601, 672.7978, 660.1482]\n standardDeviation = -1\n #listMeasurement = []\n if(len(listMeasurement)==0):\n numMeasurement = int(input(\"numMeasurement:\"))\n for i in range(0,numMeasurement):\n a = float(input(\"Result measurement number \"+str(i)+\": \"))\n listMeasurement.append(a)\n if(standardDeviation == 0):\n standardDeviation = float(input(\"standardDeviation (-1 if unknown):\"))\n if( standardDeviation>=0 ):\n knownSD(listMeasurement, standardDeviation)\n else:\n unknownSD(listMeasurement, 0.95)\n\n__MAIN__()\n\n\n\n\n\n\n\n","repo_name":"danganhvu1998/myINIAD","sub_path":"code/iniadStatistic/95PercentResultRange.py","file_name":"95PercentResultRange.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2123649446","text":"import numpy as np\nimport matplotlib.pyplot as plot\nimport pandas as pd\n\narquivo = r'C:/Users/dsadm/Downloads/dados_seguros/dados_pop.csv'\ndados_originais = pd.read_csv(arquivo, header=1)\n\ndados = dados_originais.to_dict('list')\n\nq1 = np.percentile(dados[\"População\"], 25, method=\"averaged_inverted_cdf\")\nq3 = np.percentile(dados[\"População\"], 75, method=\"averaged_inverted_cdf\")\ndq = q3 - q1\n\nlim_inf = np.fmax(min(dados[\"População\"]), q1 - 1.5*dq)\nlim_sup = np.fmin(max(dados[\"População\"]), q3 + 1.5*dq)\n\ndiagrama = plot.boxplot(dados[\"População\"], labels=['Brasil Inteiro'], positions=[1])\n\ndados_sudeste = {'Município': [], 'População': []}\n\nfor x in range(int(len(dados['Município']))):\n if dados['Estado'][x] == 'SP' or dados['Estado'][x] == 'RJ' or dados['Estado'][x] == 'BH' or dados['Estado'][x] == 'ES':\n dados_sudeste['Município'].append(dados['Município'][x])\n dados_sudeste['População'].append(dados['População'][x])\n\nq1 = np.percentile(dados_sudeste[\"População\"], 25, method=\"averaged_inverted_cdf\")\nq3 = np.percentile(dados_sudeste[\"População\"], 75, method=\"averaged_inverted_cdf\")\ndq = q3 - q1\n\nlim_inf = np.fmax(min(dados_sudeste[\"População\"]), q1 - 1.5*dq)\nlim_sup = np.fmin(max(dados_sudeste[\"População\"]), q3 - 1.5*dq)\n\ndiagrama = plot.boxplot(dados_sudeste[\"População\"], labels=['Sudeste Apenas'], positions=[2])\n\n# tupla_exemplo = [('campinas', 1000), ('piracicaba', 100)]\ncontador = range(len(dados['Município']))\ntupla_nordeste = list(zip()) \n\nestados_nordeste = ['BA', 'SE', 'AL', 'CE', 'RN', 'MA', 'PE', 'PI', 'PB']\n\nfor i in contador:\n if(dados['Estado'][i] in estados_nordeste):\n t = (dados['População'][i], dados['Município'][i])\n tupla_nordeste.append(t)\n\ntupla_nordeste.sort()\n\nprint(tupla_nordeste[0], tupla_nordeste[1], tupla_nordeste[2])\n\nplot.title(\"Box Plot dos Estados\")\nplot.ylabel(\"População\")\nplot.show()","repo_name":"ThiagoMargoni/Senai-University-Files","sub_path":"2023.1/Data Science/Aula 4/csv_ex2.py","file_name":"csv_ex2.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2303279577","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n\nimport argparse\nimport os\nimport sys\nfrom functools import partial\nfrom typing import NamedTuple, Type, Optional\n\nimport argcomplete\n\nfrom benji.exception import InternalError\nfrom benji.io.factory import IOFactory\nfrom benji.storage.factory import StorageFactory\n\n\nclass _ExceptionMapping(NamedTuple):\n exception: Type[BaseException]\n exit_code: int\n include_stacktrace: bool\n\n\ndef completion(shell: str) -> None:\n print(argcomplete.shellcode(sys.argv[0], shell=shell))\n\n\ndef integer_range(minimum: int, maximum: int, arg: str) -> Optional[int]:\n if arg is None:\n return None\n\n try:\n value = int(arg)\n except ValueError as err:\n raise argparse.ArgumentTypeError(str(err))\n\n if value < minimum or (maximum is not None and value > maximum):\n raise argparse.ArgumentTypeError('Expected a value between {} and {}, got {}.'.format(minimum, maximum, value))\n\n return value\n\n\ndef main():\n if sys.hexversion < 0x030605F0:\n # We're using features introduced with Python 3.6. In addition Python versions before 3.6.5 have some\n # shortcomings in the concurrent.futures implementation which lead to an excessive memory usage.\n raise InternalError('Benji only supports Python 3.6.5 or above.')\n\n enable_experimental = os.getenv('BENJI_EXPERIMENTAL', default='0') == '1'\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, allow_abbrev=False)\n\n parser.add_argument('-c', '--config-file', default=None, type=str, help='Specify a non-default configuration file')\n parser.add_argument('-m',\n '--machine-output',\n action='store_true',\n default=False,\n help='Enable machine-readable JSON output')\n parser.add_argument('--log-level',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],\n default='INFO',\n help='Only log messages of this level or above on the console')\n parser.add_argument('--no-color',\n action='store_true',\n default=False,\n help='Disable colorization of console logging')\n\n subparsers_root = parser.add_subparsers(title='commands')\n\n # BACKUP\n p = subparsers_root.add_parser('backup', help='Perform a backup')\n p.add_argument('-u',\n '--uid',\n dest='version_uid',\n default=None,\n help='Unique ID of created version (will be generated automatically if not specified)')\n p.add_argument('-s', '--snapshot', default='', help='Snapshot name (e.g. the name of the RBD snapshot)')\n p.add_argument('-r', '--rbd-hints', default=None, help='Hints in rbd diff JSON format')\n p.add_argument('-f', '--base-version', dest='base_version_uid', default=None, help='Base version UID')\n p.add_argument('-b', '--block-size', type=int, default=None, help='Block size in bytes')\n p.add_argument('-l',\n '--label',\n action='append',\n dest='labels',\n metavar='label',\n default=None,\n help='Labels for this version (can be repeated)')\n p.add_argument('-S', '--storage', default='', help='Destination storage (if unspecified the default is used)')\n p.add_argument('source', help='Source URL')\n p.add_argument('volume', help='Volume name')\n p.set_defaults(func='backup')\n\n # BATCH-DEEP-SCRUB\n p = subparsers_root.add_parser('batch-deep-scrub',\n help='Check data and metadata integrity of multiple versions at once',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('-p',\n '--block-percentage',\n type=partial(integer_range, 1, 100),\n default=100,\n help='Check only a certain percentage of blocks')\n p.add_argument('-P',\n '--version-percentage',\n type=partial(integer_range, 1, 100),\n default=100,\n help='Check only a certain percentage of versions')\n p.add_argument('-g', '--group_label', default=None, help='Label to find related versions')\n p.add_argument('filter_expression', nargs='?', default=None, help='Version filter expression')\n p.set_defaults(func='batch_deep_scrub')\n\n # BATCH-SCRUB\n p = subparsers_root.add_parser('batch-scrub',\n help='Check block existence and metadata integrity of multiple versions at once',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('-p',\n '--block-percentage',\n type=partial(integer_range, 1, 100),\n default=100,\n help='Check only a certain percentage of blocks')\n p.add_argument('-P',\n '--version-percentage',\n type=partial(integer_range, 1, 100),\n default=100,\n help='Check only a certain percentage of versions')\n p.add_argument('-g', '--group_label', default=None, help='Label to find related versions')\n p.add_argument('filter_expression', nargs='?', default=None, help='Version filter expression')\n p.set_defaults(func='batch_scrub')\n\n # CLEANUP\n p = subparsers_root.add_parser('cleanup', help='Cleanup no longer referenced blocks')\n p.add_argument('--override-lock', action='store_true', help='Override and release any held lock (dangerous)')\n p.set_defaults(func='cleanup')\n\n # COMPLETION\n p = subparsers_root.add_parser('completion', help='Emit autocompletion script')\n p.add_argument('shell', choices=['bash', 'tcsh'], help='Shell')\n p.set_defaults(func='completion')\n\n # DATABASE-INIT\n p = subparsers_root.add_parser('database-init',\n help='Initialize the database (will not delete existing tables or data)')\n p.set_defaults(func='database_init')\n\n # DATABASE-MIGRATE\n p = subparsers_root.add_parser('database-migrate', help='Migrate an existing database to a new schema revision')\n p.set_defaults(func='database_migrate')\n\n # DEEP-SCRUB\n p = subparsers_root.add_parser('deep-scrub',\n help='Check a version\\'s data and metadata integrity',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('-s', '--source', default=None, help='Additionally compare version against source URL')\n p.add_argument('-p',\n '--block-percentage',\n type=partial(integer_range, 1, 100),\n default=100,\n help='Check only a certain percentage of blocks')\n p.add_argument('version_uid', help='Version UID')\n p.set_defaults(func='deep_scrub')\n\n # ENFORCE\n p = subparsers_root.add_parser('enforce', help=\"Enforce a retention policy \")\n p.add_argument('--dry-run', action='store_true', help='Only show which versions would be removed')\n p.add_argument('-k', '--keep-metadata-backup', action='store_true', help='Keep version metadata backup')\n p.add_argument('-g', '--group_label', default=None, help='Label to find related versions to remove')\n p.add_argument('rules_spec', help='Retention rules specification')\n p.add_argument('filter_expression', nargs='?', default=None, help='Version filter expression')\n p.set_defaults(func='enforce_retention_policy')\n\n # LABEL\n p = subparsers_root.add_parser('label', help='Add labels to a version')\n p.add_argument('version_uid')\n p.add_argument('labels', nargs='+')\n p.set_defaults(func='label')\n\n # LS\n p = subparsers_root.add_parser('ls', help='List versions')\n p.add_argument('filter_expression', nargs='?', default=None, help='Version filter expression')\n p.add_argument('-l', '--include-labels', action='store_true', help='Include labels in output')\n p.add_argument('-s', '--include-stats', action='store_true', help='Include statistics in output')\n p.set_defaults(func='ls')\n\n # METADATA-BACKUP\n p = subparsers_root.add_parser('metadata-backup', help='Back up the metadata of one or more versions')\n p.add_argument('filter_expression', help=\"Version filter expression\")\n p.add_argument('-f', '--force', action='store_true', help='Overwrite existing metadata backups')\n p.set_defaults(func='metadata_backup')\n\n # METADATA EXPORT\n p = subparsers_root.add_parser('metadata-export',\n help='Export the metadata of one or more versions to a file or standard output')\n p.add_argument('filter_expression', nargs='?', default=None, help=\"Version filter expression\")\n p.add_argument('-f', '--force', action='store_true', help='Overwrite an existing output file')\n p.add_argument('-o', '--output-file', default=None, help='Output file (standard output if missing)')\n p.set_defaults(func='metadata_export')\n\n # METADATA-IMPORT\n p = subparsers_root.add_parser('metadata-import',\n help='Import the metadata of one or more versions from a file or standard input')\n p.add_argument('-i', '--input-file', default=None, help='Input file (standard input if missing)')\n p.set_defaults(func='metadata_import')\n\n # METADATA-LS\n p = subparsers_root.add_parser('metadata-ls', help='List the version metadata backup')\n p.add_argument('-S', '--storage', default=None, help='Source storage (if unspecified the default is used)')\n p.set_defaults(func='metadata_ls')\n\n # METADATA-RESTORE\n p = subparsers_root.add_parser('metadata-restore', help='Restore the metadata of one ore more versions')\n p.add_argument('-S', '--storage', default=None, help='Source storage (if unspecified the default is used)')\n p.add_argument('version_uids', metavar='VERSION_UID', nargs='+', help=\"Version UID\")\n p.set_defaults(func='metadata_restore')\n\n # NBD\n p = subparsers_root.add_parser('nbd',\n help='Start an NBD server',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('-a', '--bind-address', default='127.0.0.1', help='Bind to the specified IP address')\n p.add_argument('-p', '--bind-port', default=10809, help='Bind to the specified port')\n p.add_argument('-r', '--read-only', action='store_true', default=False, help='NBD device is read-only')\n p.add_argument('-d', '--discard-changes', action='store_true', default=False, help='Discard changes to NDBD device after disconnecting. Don\\'t create new version.')\n p.set_defaults(func='nbd')\n\n # PROTECT\n p = subparsers_root.add_parser('protect', help='Protect one or more versions')\n p.add_argument('version_uids', metavar='version_uid', nargs='+', help=\"Version UID\")\n p.set_defaults(func='protect')\n\n # RESTORE\n p = subparsers_root.add_parser('restore', help='Restore a backup')\n p.add_argument('-s', '--sparse', action='store_true', help='Restore only existing blocks')\n p.add_argument('-f', '--force', action='store_true', help='Overwrite an existing file, device or image')\n p.add_argument('-d', '--database-less', action='store_true', help='Restore without requiring the database')\n p.add_argument('-S', '--storage', default=None, help='Source storage (if unspecified the default is used)')\n p.add_argument('version_uid', help='Version UID to restore')\n p.add_argument('destination', help='Destination URL')\n p.set_defaults(func='restore')\n\n # RM\n p = subparsers_root.add_parser('rm', help='Remove one or more versions')\n p.add_argument('-f', '--force', action='store_true', help='Force removal (overrides protection of recent versions)')\n p.add_argument('-k', '--keep-metadata-backup', action='store_true', help='Keep version metadata backup')\n p.add_argument('--override-lock', action='store_true', help='Override and release any held locks (dangerous)')\n p.add_argument('version_uids', metavar='version_uid', nargs='+', help='Version UID')\n p.set_defaults(func='rm')\n\n # SCRUB\n p = subparsers_root.add_parser('scrub',\n help='Check a version\\'s block existence and metadata integrity',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('-p',\n '--block-percentage',\n type=partial(integer_range, 1, 100),\n default=100,\n help='Check only a certain percentage of blocks')\n p.add_argument('version_uid', help='Version UID')\n p.set_defaults(func='scrub')\n\n # STORAGE-STATS\n p = subparsers_root.add_parser('storage-stats', help='Show storage statistics')\n p.add_argument('storage_name', nargs='?', default=None, help='Storage')\n p.set_defaults(func='storage_stats')\n\n # UNPROTECT\n p = subparsers_root.add_parser('unprotect', help='Unprotect one or more versions')\n p.add_argument('version_uids', metavar='version_uid', nargs='+', help='Version UID')\n p.set_defaults(func='unprotect')\n\n # VERSION-INFO\n p = subparsers_root.add_parser('version-info', help='Program version information')\n p.set_defaults(func='version_info')\n\n # REST-API\n if enable_experimental:\n p = subparsers_root.add_parser('rest-api', help='Start REST API server')\n p.set_defaults(func='rest_api')\n p.add_argument('-a', '--bind-address', default='127.0.0.1', help='Bind to the specified IP address')\n p.add_argument('-p', '--bind-port', default=8080, type=int, help='Bind to the specified port')\n p.add_argument('--threads', default=1, type=int, help='Number of worker threads')\n\n # DU\n p = subparsers_root.add_parser('storage-usage', help='Provide storage usage statistics')\n p.add_argument('filter_expression', nargs='?', default=None, help='Version filter expression')\n p.set_defaults(func='storage_usage')\n\n argcomplete.autocomplete(parser)\n args = parser.parse_args()\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(os.EX_USAGE)\n\n if args.func == 'completion':\n completion(args.shell)\n sys.exit(os.EX_OK)\n\n from benji.config import Config\n from benji.logging import logger, setup_logging\n if args.config_file is not None and args.config_file != '':\n try:\n cfg = open(args.config_file, 'r', encoding='utf-8').read()\n except FileNotFoundError:\n logger.error('File {} not found.'.format(args.config_file))\n sys.exit(os.EX_USAGE)\n config = Config(ad_hoc_config=cfg)\n else:\n config = Config()\n\n console_formatter = 'console-colored'\n if args.machine_output:\n console_formatter = 'json'\n elif args.no_color:\n console_formatter = 'console-plain'\n\n setup_logging(logfile=config.get('logFile', types=(str, type(None))),\n console_level=args.log_level,\n console_formatter=console_formatter)\n\n IOFactory.initialize(config)\n StorageFactory.initialize(config)\n\n import benji.commands\n commands = benji.commands.Commands(args.machine_output, config)\n func = getattr(commands, args.func)\n\n # Pass over to function\n func_args = dict(args._get_kwargs())\n del func_args['config_file']\n del func_args['func']\n del func_args['log_level']\n del func_args['machine_output']\n del func_args['no_color']\n\n # From most specific to least specific\n # yapf: disable\n exception_mappings = [\n _ExceptionMapping(exception=benji.exception.UsageError, exit_code=os.EX_USAGE, include_stacktrace=False),\n _ExceptionMapping(exception=benji.exception.AlreadyLocked, exit_code=os.EX_NOPERM, include_stacktrace=False),\n _ExceptionMapping(exception=benji.exception.InternalError, exit_code=os.EX_SOFTWARE, include_stacktrace=True),\n _ExceptionMapping(exception=benji.exception.ConfigurationError, exit_code=os.EX_CONFIG, include_stacktrace=False),\n _ExceptionMapping(exception=benji.exception.InputDataError, exit_code=os.EX_DATAERR, include_stacktrace=False),\n _ExceptionMapping(exception=benji.exception.ScrubbingError, exit_code=os.EX_DATAERR, include_stacktrace=False),\n _ExceptionMapping(exception=PermissionError, exit_code=os.EX_NOPERM, include_stacktrace=False),\n _ExceptionMapping(exception=FileExistsError, exit_code=os.EX_CANTCREAT, include_stacktrace=False),\n _ExceptionMapping(exception=FileNotFoundError, exit_code=os.EX_NOINPUT, include_stacktrace=False),\n _ExceptionMapping(exception=EOFError, exit_code=os.EX_IOERR, include_stacktrace=True),\n _ExceptionMapping(exception=IOError, exit_code=os.EX_IOERR, include_stacktrace=True),\n _ExceptionMapping(exception=OSError, exit_code=os.EX_OSERR, include_stacktrace=True),\n _ExceptionMapping(exception=ConnectionError, exit_code=os.EX_IOERR, include_stacktrace=True),\n _ExceptionMapping(exception=LookupError, exit_code=os.EX_NOINPUT, include_stacktrace=True),\n _ExceptionMapping(exception=KeyboardInterrupt, exit_code=os.EX_NOINPUT, include_stacktrace=False),\n _ExceptionMapping(exception=BaseException, exit_code=os.EX_SOFTWARE, include_stacktrace=True),\n ]\n # yapf: enable\n\n try:\n logger.debug('commands.{0}(**{1!r})'.format(args.func, func_args))\n func(**func_args)\n sys.exit(os.EX_OK)\n except SystemExit:\n raise\n except BaseException as exception:\n for case in exception_mappings:\n if isinstance(exception, case.exception):\n message = str(exception)\n if message:\n message = '{}: {}'.format(exception.__class__.__name__, message)\n else:\n message = '{} exception occurred.'.format(exception.__class__.__name__)\n if case.include_stacktrace:\n logger.error(message, exc_info=True)\n else:\n logger.debug(message, exc_info=True)\n logger.error(message)\n sys.exit(case.exit_code)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"elemental-lf/benji","sub_path":"src/benji/scripts/benji.py","file_name":"benji.py","file_ext":"py","file_size_in_byte":18402,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"78"} +{"seq_id":"72420086333","text":"n = int(input('Enter Number:-'))\ns = 0\ni = 1\n\nwhile(i<=n):\n if(i%2==0):\n s = s + i**2\n i = i + 1\n \nprint('Sum of Square of all even numbers till {} is {}'.format(n,s))","repo_name":"VaibhavNGithub/SeleniumPractice","sub_path":"EvenNumbers.py","file_name":"EvenNumbers.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20944723961","text":"# %%\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\n\n# %%\neof_result = xr.open_dataset('/work/mh0033/m300883/Tel_MMLE/data/MPI_GE_onepct/EOF_result/plev_50000_decade_temporal_ens_eof_result.nc')\n# %%\npc = eof_result['pc']\n# %%\nNAO = pc.sel(mode='NAO')\n\n# %%\nNAO_first10 = NAO.isel(time=slice(0, 10))\nNAO_last10 = NAO.isel(time=slice(-10, None))\n\n# %%\nNAO_first10 = NAO_first10.stack(realization=('ens', 'time'))\nNAO_last10 = NAO_last10.stack(realization=('ens', 'time'))\n\n# %%\n\n\n#%%\nfirst_pc = NAO_first10.to_dataframe(name = 'NAO')['NAO'].reset_index()['NAO']\nlast_pc = NAO_last10.to_dataframe(name = 'NAO')['NAO'].reset_index()['NAO']\n#%%\n# Fit an AR1 model to the data\nmodel_first = sm.tsa.ARIMA(first_pc, order=(1, 0, 0)).fit()\nmodel_last = sm.tsa.ARIMA(last_pc, order=(1, 0, 0)).fit()\n\n# %%\n# Generate 9000 realizations of model_first, each 1000 long\nn_realizations = 5000\nn_obs = 1000\nsimulations = np.empty((n_realizations, n_obs))\nfor i in range(n_realizations):\n simulations[i, :] = model_first.simulate(nsimulations=n_obs)\n\n# Print the shape of the simulations array\nprint(f\"The shape of the simulations array is {simulations.shape}.\")\n# %%\n# count the number of values above 1.5 in simulations along the second axis\ncounts = (simulations > 1.5).sum(axis=1)\n# %%\n# calculate the 5th and 95th percentiles of counts\npercentiles = np.percentile(counts, [5, 95])\n# %%\nimport numpy as np\nimport statsmodels.api as sm\n\n# Generate some sample data\nnp.random.seed(123)\nnobs = 100\nx = np.random.normal(size=nobs)\ny = np.zeros(nobs)\nfor i in range(3, nobs):\n y[i] = 0.5*y[i-1] - 0.2*y[i-2] + 0.1*y[i-3] + x[i] + np.random.standard_t(3)\n\n# Fit an AR model of order 3 with t-distributed noise\nmodel = sm.tsa.ar_model.AutoReg(y, lags=3, trend='c', method='mle', dist='t', df=3).fit()\n\n# Print the model summary\nprint(model.summary())\n# %%\n","repo_name":"liuquan18/Tel_MMLE","sub_path":"script/7new_standard/AR1_sig.py","file_name":"AR1_sig.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12456194934","text":"import argparse\nfrom config import Config\nfrom worker import Worker\n\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='a quick setup tool for flask and react project')\n parser.add_argument('name', help='The name of the project')\n parser.add_argument('-p', '--path', dest='path', help='The path of the project', default='.')\n args = parser.parse_args()\n\n config = Config(name=args.name, path=args.path)\n w = Worker(config=config)\n w.work()\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"ripitrust/flask_react","sub_path":"flask_react/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30804305776","text":"\"\"\"Compute unrealized gains.\n\nThe configuration for this plugin is a single string, the name of the subaccount\nto add to post the unrealized gains to, like this:\n\n plugin \"beancount.plugins.unrealized\" \"Unrealized\"\n\nIf you don't specify a name for the subaccount (the configuration value is\noptional), by default it inserts the unrealized gains in the same account that\nis being adjusted.\n\nModified by Jason Chu to post new transactions every month\n\"\"\"\n__author__ = \"Martin Blais \"\n\nimport collections\nimport datetime\n\nfrom beancount.core.number import ZERO\nfrom beancount.core import data\nfrom beancount.core import account\nfrom beancount.core import getters\nfrom beancount.core import amount\nfrom beancount.core import flags\nfrom beancount.ops import holdings\nfrom beancount.ops import prices\nfrom beancount.ops import summarize\nfrom beancount.parser import options\nfrom beancount.utils import date_utils\n\n\n__plugins__ = ('add_unrealized_gains',)\n\n\nUnrealizedError = collections.namedtuple('UnrealizedError', 'source message entry')\n\n\nONEDAY = datetime.timedelta(days=1)\n\n\ndef matching_unrealized_transaction(entry, account, cost_currency, prev_currency):\n return (any(posting.account == account for posting in entry.postings) and\n entry.postings[0].units.currency == cost_currency and\n entry.meta['prev_currency'] == prev_currency)\n\n\ndef find_previous_unrealized_transaction(entries, account, cost_currency, prev_currency, include_clear=False):\n for entry in reversed(entries):\n if matching_unrealized_transaction(entry, account, cost_currency, prev_currency):\n if not include_clear and entry.narration.startswith('Clear unrealized'):\n return None\n return entry\n return None\n\n\ndef add_unrealized_gains_at_date(entries, unrealized_entries, income_account_type,\n price_map, date, meta, subaccount):\n \"\"\"Insert/remove entries for unrealized capital gains \n\n This function takes a list of entries and a date and creates a set of unrealized gains\n transactions, negating previous unrealized gains transactions within the same account.\n\n Args:\n entries: A list of data directives.\n unrealized_entries: A list of previously generated unrealized transactions.\n income_account_type: The income account type.\n price_map: A price map returned by prices.build_price_map.\n date: The effective date to generate the unrealized transactions for.\n meta: meta.\n subaccount: A string, and optional the name of a subaccount to create\n under an account to book the unrealized gain. If this is left to its\n default value, the gain is booked directly in the same account.\n Returns:\n A list of newly created unrealized transactions and a list of errors.\n \"\"\"\n errors = []\n\n entries_truncated = summarize.truncate(entries, date + ONEDAY)\n\n holdings_list = holdings.get_final_holdings(entries_truncated, price_map=price_map, date=date)\n\n # Group positions by (account, cost, cost_currency).\n holdings_list = holdings.aggregate_holdings_by(\n holdings_list, lambda h: (h.account, h.currency, h.cost_currency))\n\n holdings_with_currencies = set()\n\n # Create transactions to account for each position.\n new_entries = []\n for index, holding in enumerate(holdings_list):\n if (holding.currency == holding.cost_currency or\n holding.cost_currency is None):\n continue\n\n # Note: since we're only considering positions held at cost, the\n # transaction that created the position *must* have created at least one\n # price point for that commodity, so we never expect for a price not to\n # be available, which is reasonable.\n if holding.price_number is None:\n # An entry without a price might indicate that this is a holding\n # resulting from leaked cost basis. {0ed05c502e63, b/16}\n if holding.number:\n errors.append(\n UnrealizedError(meta,\n \"A valid price for {h.currency}/{h.cost_currency} \"\n \"could not be found\".format(h=holding), None))\n continue\n\n # Compute the PnL; if there is no profit or loss, we create a\n # corresponding entry anyway.\n pnl = holding.market_value - holding.book_value\n if holding.number == ZERO:\n # If the number of units sum to zero, the holdings should have been\n # zero.\n errors.append(\n UnrealizedError(\n meta,\n \"Number of units of {} in {} in holdings sum to zero \"\n \"for account {} and should not\".format(\n holding.currency, holding.cost_currency, holding.account),\n None))\n continue\n\n # Compute the name of the accounts and add the requested subaccount name\n # if requested.\n asset_account = holding.account\n income_account = account.join(income_account_type,\n account.sans_root(holding.account))\n if subaccount:\n asset_account = account.join(asset_account, subaccount)\n income_account = account.join(income_account, subaccount)\n\n holdings_with_currencies.add((holding.account, holding.cost_currency, holding.currency))\n\n # Find the previous unrealized gain entry to negate and decide if we\n # should create a new posting.\n latest_unrealized_entry = find_previous_unrealized_transaction(unrealized_entries, asset_account, holding.cost_currency, holding.currency)\n\n # Don't create a new transaction if our last one hasn't changed.\n if (latest_unrealized_entry and\n pnl == latest_unrealized_entry.postings[0].units.number):\n continue\n\n # Don't bother creating a blank unrealized transaction if none existed\n if pnl == ZERO and not latest_unrealized_entry:\n continue\n\n relative_pnl = pnl\n if latest_unrealized_entry:\n relative_pnl = pnl - latest_unrealized_entry.postings[0].units.number\n\n # Create a new transaction to account for this difference in gain.\n gain_loss_str = \"gain\" if relative_pnl > ZERO else \"loss\"\n narration = (\"Unrealized {} for {h.number} units of {h.currency} \"\n \"(price: {h.price_number:.4f} {h.cost_currency} as of {h.price_date}, \"\n \"average cost: {h.cost_number:.4f} {h.cost_currency})\").format(\n gain_loss_str, h=holding)\n entry = data.Transaction(data.new_metadata(meta[\"filename\"], lineno=1000 + index,\n kvlist={'prev_currency': holding.currency}), date,\n flags.FLAG_UNREALIZED, None, narration, set(), set(), [])\n\n # Book this as income, converting the account name to be the same, but as income.\n # Note: this is a rather convenient but arbitraty choice--maybe it would be best to\n # let the user decide to what account to book it, but I don't a nice way to let the\n # user specify this.\n #\n # Note: we never set a price because we don't want these to end up in Conversions.\n entry.postings.extend([\n data.Posting(\n asset_account,\n amount.Amount(pnl, holding.cost_currency),\n None,\n None,\n None,\n None),\n data.Posting(\n income_account,\n amount.Amount(-pnl, holding.cost_currency),\n None,\n None,\n None,\n None)\n ])\n if latest_unrealized_entry:\n for posting in latest_unrealized_entry.postings[:2]:\n entry.postings.append(\n data.Posting(\n posting.account,\n -posting.units,\n None,\n None,\n None,\n None))\n\n new_entries.append(entry)\n\n return new_entries, holdings_with_currencies, errors\n\n\ndef add_unrealized_gains(entries, options_map, subaccount=None):\n \"\"\"Insert entries for unrealized capital gains.\n\n This function inserts entries that represent unrealized gains, at the end of\n the available history. It returns a new list of entries, with the new gains\n inserted. It replaces the account type with an entry in an income account.\n Optionally, it can book the gain in a subaccount of the original and income\n accounts.\n\n Args:\n entries: A list of data directives.\n options_map: A dict of options, that confirms to beancount.parser.options.\n subaccount: A string, and optional the name of a subaccount to create\n under an account to book the unrealized gain. If this is left to its\n default value, the gain is booked directly in the same account.\n Returns:\n A list of entries, which includes the new unrealized capital gains entries\n at the end, and a list of errors. The new list of entries is still sorted.\n \"\"\"\n errors = []\n meta = data.new_metadata('', 0)\n\n account_types = options.get_account_types(options_map)\n\n # Assert the subaccount name is in valid format.\n if subaccount:\n validation_account = account.join(account_types.assets, subaccount)\n if not account.is_valid(validation_account):\n errors.append(\n UnrealizedError(meta,\n \"Invalid subaccount name: '{}'\".format(subaccount),\n None))\n return entries, errors\n\n if not entries:\n return (entries, errors)\n\n # Group positions by (account, cost, cost_currency).\n price_map = prices.build_price_map(entries)\n\n new_entries = []\n\n # Start at the first month after our first transaction\n date = date_utils.next_month(entries[0].date)\n last_month = date_utils.next_month(entries[-1].date)\n last_holdings_with_currencies = None\n while date <= last_month:\n date_entries, holdings_with_currencies, date_errors = add_unrealized_gains_at_date(\n entries, new_entries, account_types.income, price_map, date, meta,\n subaccount)\n new_entries.extend(date_entries)\n errors.extend(date_errors)\n\n if last_holdings_with_currencies:\n for account_, cost_currency, currency in last_holdings_with_currencies - holdings_with_currencies:\n # Create a negation transaction specifically to mark that all gains have been realized\n if subaccount:\n account_ = account.join(account_, subaccount)\n\n latest_unrealized_entry = find_previous_unrealized_transaction(new_entries, account_, cost_currency, currency)\n if not latest_unrealized_entry:\n continue\n entry = data.Transaction(data.new_metadata(meta[\"filename\"], lineno=999,\n kvlist={'prev_currency': currency}), date,\n flags.FLAG_UNREALIZED, None, 'Clear unrealized gains/losses of {}'.format(currency), set(), set(), [])\n\n # Negate the previous transaction because of unrealized gains are now 0\n for posting in latest_unrealized_entry.postings[:2]:\n entry.postings.append(\n data.Posting(\n posting.account,\n -posting.units,\n None,\n None,\n None,\n None))\n new_entries.append(entry)\n\n\n last_holdings_with_currencies = holdings_with_currencies\n date = date_utils.next_month(date)\n\n # Ensure that the accounts we're going to use to book the postings exist, by\n # creating open entries for those that we generated that weren't already\n # existing accounts.\n new_accounts = {posting.account\n for entry in new_entries\n for posting in entry.postings}\n open_entries = getters.get_account_open_close(entries)\n new_open_entries = []\n for index, account_ in enumerate(sorted(new_accounts)):\n if account_ not in open_entries:\n meta = data.new_metadata(meta[\"filename\"], index)\n open_entry = data.Open(meta, new_entries[0].date, account_, None, None)\n new_open_entries.append(open_entry)\n\n return (entries + new_open_entries + new_entries, errors)\n\n\ndef get_unrealized_entries(entries):\n \"\"\"Return entries automatically created for unrealized gains.\n\n Args:\n entries: A list of directives.\n Returns:\n A list of directives, all of which are in the original list.\n \"\"\"\n return [entry\n for entry in entries\n if (isinstance(entry, data.Transaction) and\n entry.flag == flags.FLAG_UNREALIZED)]\n","repo_name":"xentac/beancount-plugins-xentac","sub_path":"beancount_plugins_xentac/plugins/unrealized_periodic.py","file_name":"unrealized_periodic.py","file_ext":"py","file_size_in_byte":13173,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"30394833972","text":"import sys\n\nsys.path.append('../')\n\nfrom typing import List\nimport torch\nimport torchvision\nfrom common.utils import *\nfrom components.model_component import ModelSegmentation\nfrom components.muxer_component import SourceMuxer\nfrom components.outer_component import DisplayComponent\nfrom components.painter_component import Tiler, MaskPainter\nfrom components.reader_component import *\nfrom components.handler_component import Filter\nfrom pipeline import Pipeline\n\nSEM_CLASSES = [\n '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'\n]\n\n\ndef get_usb_cam(path: str, name: str) -> CamReader:\n return CamReader(path, name)\n\n\ndef get_videofile_reader(path: str, name: str) -> VideoReader:\n return VideoReader(path, name)\n\n\ndef get_muxer(readers: List[ReaderBase]) -> SourceMuxer:\n muxer = SourceMuxer('muxer', max_batch_size=1)\n for reader in readers:\n muxer.add_source(reader)\n return muxer\n\n\ndef get_segmentation_model(name: str, model: torch.nn.Module, sources: List[ReaderBase], classes: List[str],\n transforms: list = None,\n confidence = .8) -> ModelSegmentation:\n model_segm = ModelSegmentation(name, model)\n model_segm.set_labels(classes)\n for src in sources:\n model_segm.add_source(src.get_name())\n model_segm.set_transforms(transforms)\n model_segm.set_confidence(conf=confidence)\n return model_segm\n\n\ndef get_tiler(name: str, tiler_size: tuple, frame_size: tuple = (640, 1280)) -> Tiler:\n tiler = Tiler(name=name, tiler_size=tiler_size)\n tiler.set_size(frame_size)\n return tiler\n\n\nif __name__ == '__main__':\n model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=True)\n pipeline = Pipeline()\n\n readers = []\n usb_srcs = get_cam_srcs()\n for usb_src in usb_srcs:\n readers.append(CamReader(usb_src, usb_src))\n\n name = None\n file_srcs = get_video_file_srcs()\n for i_file_srcs in range(len(file_srcs)):\n name = f'{file_srcs[i_file_srcs]}_{i_file_srcs}'\n readers.append(VideoReader(file_srcs[i_file_srcs], name))\n\n name = None\n file_srcs = get_img_srcs()\n for i_file_srcs in range(len(file_srcs)):\n name = f'{file_srcs[i_file_srcs]}_{i_file_srcs}'\n readers.append(ImageReader(file_srcs[i_file_srcs], name))\n\n muxer = get_muxer(readers)\n model_segm = get_segmentation_model('detection', model, sources=readers, classes=SEM_CLASSES,\n confidence=get_confidence())\n\n model_segm.set_transforms([torchvision.transforms.Resize((240, 320))])\n mask_painter = MaskPainter('mask_painter')\n\n filter_masks = Filter('mask_filter', ['person'])\n tiler = get_tiler('tiler', tiler_size=get_tsize(), frame_size=get_fsize())\n\n outer = DisplayComponent('display')\n pipeline.set_device(get_device())\n pipeline.add_all([muxer, model_segm, filter_masks, mask_painter, tiler, outer])\n pipeline.compile()\n pipeline.run()\n pipeline.close()\n","repo_name":"MLFreelib/cvflow","sub_path":"examples/segmentation_example.py","file_name":"segmentation_example.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"10090794543","text":"#!/usr/bin/env python3\n\nfrom tkinter import Tk, Frame #importing only necessary stuff.\n\ndef keyrelease(e):\n print('The key was released: ', repr(e.char))\n\nroot = Tk()\nf = Frame(root, width=100, height=100)\nf.bind(\"\", keyrelease)\nf.pack()\nroot.mainloop()\n","repo_name":"scjurgen/curses-smf-player","sub_path":"minikeyboard.py","file_name":"minikeyboard.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13328704502","text":"import os\nfrom io import StringIO\n\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.core.management import call_command\nfrom django.test import TestCase\n\nfrom bsyncviewer.models.bedes_models import BedesEnumeration, BedesTerm\nfrom bsyncviewer.models.schema import Schema\nfrom bsyncviewer.models.use_case import UseCase\n\n# For reset test only\nDEFAULT_SCHEMA_VERSION = settings.DEFAULT_SCHEMA_VERSION\n\n# Use a custom version that is not an actual version to prevent overwriting saved BEDES mappings\nTEST_SCHEMA_VERSION = '0.0.1'\n\n\nclass TestCommand(TestCase):\n\n def test_reset_schema_command(self):\n print('TESTING RESET SCHEMA COMMAND')\n out = StringIO()\n call_command('reset_schema', schema_version=DEFAULT_SCHEMA_VERSION, stdout=out)\n\n # was a schema created?\n schemas = Schema.objects.all()\n self.assertGreater(schemas.count(), 0)\n\n # clean-up\n for schema in schemas:\n schema.delete()\n\n\nclass TestCommandWithSchema(TestCase):\n\n def setUp(self):\n self.schema = Schema.objects.filter(version=TEST_SCHEMA_VERSION).first()\n if not self.schema:\n # add schema file - make sure to create a copy since the version will be deleted if\n # the schema is deleted\n sf = os.path.join(os.path.dirname(__file__), 'data', 'test_schema.xsd')\n file = open(sf, 'rb')\n simple_uploaded_file = SimpleUploadedFile(file.name, file.read())\n\n self.schema = Schema(\n name='Version {}'.format(TEST_SCHEMA_VERSION),\n version=TEST_SCHEMA_VERSION,\n schema_file=simple_uploaded_file\n )\n self.schema.save() # Calling save also processes the schema and generates the template\n\n def test_create_use_case_command(self):\n print('TESTING CREATE USE CASE COMMAND')\n\n out = StringIO()\n call_command('create_use_case', schema_version=TEST_SCHEMA_VERSION, stdout=out)\n\n # assert that a use case was created\n use_cases = UseCase.objects.all().count()\n print(\"USE CASES: {}\".format(use_cases))\n self.assertGreater(use_cases, 0)\n\n def test_bedes_command(self):\n # The schema must exist before this command is called.\n print('TESTING BEDES COMMAND')\n\n # create the CSV files\n out = StringIO()\n call_command('bedes', schema_version=TEST_SCHEMA_VERSION, bedes_version='v2.2', stdout=out)\n\n # add to database\n call_command('bedes', schema_version=TEST_SCHEMA_VERSION, bedes_version='v2.2', save_to_db=True, stdout=out)\n\n # check that there are items in bedes models\n bterms = BedesTerm.objects.all().count()\n benums = BedesEnumeration.objects.all().count()\n print(\"BEDES TERMS: {}, BEDES ENUMS: {}\".format(bterms, benums))\n self.assertGreater(bterms, 0)\n self.assertGreater(benums, 0)\n\n def tearDown(self):\n # clean-up files on disk\n if self.schema and self.schema.id is not None:\n self.schema.delete()\n","repo_name":"BuildingSync/BuildingSync-website","sub_path":"bsyncviewer/tests/test_command.py","file_name":"test_command.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"15018847708","text":"import io\r\nfrom collections import OrderedDict\r\nimport numpy as np\r\nimport torch\r\n\r\ndef load_vectors(fname):\r\n fin = io.open(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\r\n n, d = ( fin.readline().split())\r\n data = {}\r\n for line in fin:\r\n tokens = line.rstrip().split(' ')\r\n data[tokens[0]] = (tokens[1:])\r\n if(len(data) > 40000):\r\n break\r\n return data\r\n\r\ndef build_vocab(all_tokens):\r\n \r\n char_v = OrderedDict()\r\n char_v[''] = 0\r\n char_v[''] = 1\r\n char_idx = 2\r\n for tokens in all_tokens:\r\n for tok in tokens:\r\n for c in tok:\r\n if c not in char_v:\r\n char_v[c] = char_idx\r\n char_idx += 1\r\n\r\n return char_v\r\n\r\ndef load_char_embedding( embed_file, vocab, word_embed_dim):\r\n \r\n embed_matrix = list()\r\n embed_matrix.append(np.zeros(word_embed_dim, dtype=np.float32)) ## FOR PAD\r\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim)) ## FOR UNK\r\n\r\n #for i in range(2, len(vocab)):\r\n for k in vocab.keys():\r\n if k == '' or k == '':\r\n continue\r\n if k in embed_file:\r\n vec = embed_file[k]\r\n embed_matrix.append(vec)\r\n else:\r\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\r\n\r\n return embed_matrix\r\n\r\n\r\n\r\n\r\ndef get_embed_matrix_and_vocab(val_dataset_raw, train_dataset_raw, tokenizer):\r\n \"\"\"\r\n Entry point into the script\r\n \"\"\"\r\n eng_vectors = load_vectors('wiki-news-300d-1M.vec')\r\n all_tokens = val_dataset_raw['input_ids'] + (train_dataset_raw['input_ids']) \r\n all_ = []\r\n for tok in all_tokens:\r\n all_.append(tokenizer.convert_ids_to_tokens(tok, skip_special_tokens=True))\r\n char_vocab = build_vocab(all_)\r\n embed_matrix = load_char_embedding(eng_vectors, char_vocab, 300)\r\n\r\n\r\n temp_emb = embed_matrix\r\n\r\n new_temp_emb = np.zeros(shape = (len(embed_matrix), 300))\r\n\r\n for i in range(len(temp_emb)):\r\n \r\n vec = temp_emb[i]\r\n if type(vec) == list:\r\n print('changing list to numpy array')\r\n new_vec = np.zeros(300)\r\n for j in range(len(vec)):\r\n new_vec[j] = float(vec[j])\r\n\r\n new_temp_emb[i] = new_vec\r\n else:\r\n new_temp_emb[i] = vec\r\n\r\n embed_matrix = new_temp_emb\r\n\r\n embed_matrix = torch.tensor(embed_matrix)\r\n embed_matrix = embed_matrix.to('cuda')\r\n\r\n return char_vocab, embed_matrix\r\n\r\n","repo_name":"nitkannen/CABACE-AAAI-22","sub_path":"utils/character_utils.py","file_name":"character_utils.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"31953516811","text":"# 색종이 만들기 (s4, 69.35%)\r\n# source : https://www.acmicpc.net/problem/2630\r\n# keyword : 구현, 해시\r\n# return : N의 길이인 정사각형을 잘랐을 때 하얀색과 파란색 색종이 개수 출력\r\n\r\n\"\"\"\r\n1. 문제\r\n- 정사각형이 모두 하나의 색으로 칠해질 때까지 4등분\r\n- 하얀색 색종이와 파란색 색종이 개수 구하기\r\n\r\n2. 입력\r\n- 전체 종이의 한 변의 길이 N (N=[2, 4, 8, 16, 32, 64, 128])\r\n- 색종이의 색 정보 (0=하얀색, 1=파란색)\r\n\r\n3. 로직 \r\n- 재귀 함수\r\n- 한 줄씩 탐색한다 (w=False, b=False)\r\n - 하얀색을 만나면 w=True, 파란색을 만나면 b=True\r\n- 탐색 완료 후 w, b 둘 다 True면 나눈다\r\n- 탐색 완료 후 w, b 둘 중에 하나만 True면 카운트 하고 종료한다\r\n\"\"\"\r\n\r\n\r\nimport sys\r\nsys.setrecursionlimit(10000)\r\ninput = sys.stdin.readline\r\nN = int(input())\r\n\r\nglobal b_cnt, w_cnt\r\nb_cnt, w_cnt = 0, 0\r\npaper = [[*map(int, input().split())] for _ in range(N)]\r\n\r\nmove = [(0, 0), (0, 1), (1, 0), (1, 1)]\r\ndef cut(y, x, l):\r\n global w_cnt, b_cnt\r\n w, b = False, False\r\n for r in paper[y:y+l]:\r\n for c in r[x:x+l]:\r\n if c == 0: w = True\r\n else: b = True\r\n if w and b:\r\n for i in range(4):\r\n ny, nx = y+(move[i][0]*l//2), x+(move[i][1]*l//2)\r\n cut(ny, nx, l//2)\r\n break\r\n if not (w and b):\r\n if w: w_cnt += 1\r\n else: b_cnt += 1\r\n\r\ncut(0, 0, N)\r\nprint(w_cnt)\r\nprint(b_cnt)\r\n\r\n\r\n\"\"\"\r\n테스트케이스\r\n\r\n8\r\n1 1 0 0 0 0 1 1\r\n1 1 0 0 0 0 1 1\r\n0 0 0 0 1 1 0 0\r\n0 0 0 0 1 1 0 0\r\n1 0 0 0 1 1 1 1\r\n0 1 0 0 1 1 1 1\r\n0 0 1 1 1 1 1 1\r\n0 0 1 1 1 1 1 1\r\n---결과 출력\r\n9\r\n7\r\n\"\"\"","repo_name":"hanna-joo/Self_Coding","sub_path":"python/08_etc/implement_230822_b2630.py","file_name":"implement_230822_b2630.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33142372939","text":"import jwt\nfrom chalice import Blueprint, AuthResponse\nfrom chalicelib.model.constants import JwtConstants\nfrom chalicelib.dao.auth import AuthDao\nfrom chalicelib.routes.constants import ROUTES_WITH_REQUIRED_AUTH_ALL\nfrom chalicelib import logger\nauthorizer_blueprint = Blueprint(__name__)\n\n\nclass DenyAuthResponse(AuthResponse):\n \"\"\"Generate explicit deny policy for lambda authorizer\n\n This is required because we define ResourcePolicy for API Gateway with explicit\n Allow to certain IP ranges. This conflicts later with unauthorized response from lambda authorizer that is\n also Allow (Allow [] path which is Deny in result). Refer to Table A in AWS doc\n\n @link https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-authorization-flow.html\n\n In result request is processed by Api Gateway even if authorizer returns Allow []\n \"\"\"\n\n def _generate_policy(self, request):\n allowed_resources = self._generate_allowed_resources(request)\n return {\n 'Version': '2012-10-17',\n 'Statement': [\n {\n 'Action': 'execute-api:Invoke',\n 'Effect': 'Deny',\n 'Resource': allowed_resources,\n }\n ]\n }\n\n\n@authorizer_blueprint.authorizer()\ndef lambda_authorizer(auth_request):\n \"\"\"Lambda authorizer for secured routes\n\n :param auth_request: this is AWS token authorizer, Api Gateway will provide only header with token\n :return: chalice AuthResponse object\n \"\"\"\n try:\n token = auth_request.token\n payload = jwt.decode(token, verify=False)\n auth_dao = AuthDao()\n existing_user = auth_dao.get_user_by_id(payload['sub'])\n secret = existing_user.secret\n\n jwt.decode(token, secret, algorithms=['HS256'], options={'require': ['exp', 'iss', 'sub']},\n audience=JwtConstants.AUD.value)\n\n return AuthResponse(routes=ROUTES_WITH_REQUIRED_AUTH_ALL, principal_id='user')\n except Exception as e:\n logger.exception(e)\n return DenyAuthResponse(routes=ROUTES_WITH_REQUIRED_AUTH_ALL, principal_id='user')\n\n","repo_name":"stokilo/slawomirstec.com","sub_path":"web-api/chalicelib/routes/authorizer.py","file_name":"authorizer.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29938846690","text":"#!/usr/bin/env python\n\nfrom asap3.analysis.rdf import RadialDistributionFunction\nfrom ase.io import read\nimport sys\nimport progressbar\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter1d as gf\nfrom cmocean import cm\n\nfont = {'family' : 'CMU Serif',\n# 'weight' : 'light',\n 'size' : 36}\n\nplt.rc('font', **font)\nplt.rc('text', usetex=True)\n\ntraj = read(sys.argv[1], index='0:200')\ntraj_og = read(sys.argv[2], index='0:200')\n\nrMax = 15.0\nnBins = 1000\nx = np.linspace(0, rMax, nBins)\nbar = progressbar.ProgressBar()\n\nfig, ax = plt.subplots(1, 1, figsize=(20,10))\n\nRDFobj = None\nfor atoms in bar(traj):\n atoms.cell = [400, 400, 400]\n if RDFobj is None:\n RDFobj = RadialDistributionFunction(atoms, rMax, nBins)\n else:\n RDFobj.atoms = atoms # Fool RDFobj to use the new atoms\n RDFobj.update() # Collect data\nrdf_PP = RDFobj.get_rdf(elements=(15, 15))\nrdf_PN = RDFobj.get_rdf(elements=(15, 7))\nrdf_NN = RDFobj.get_rdf(elements=(7, 7))\n\nbar = progressbar.ProgressBar()\nRDFobj = None\nfor atoms in bar(traj_og):\n atoms.cell = [400, 400, 400]\n if RDFobj is None:\n RDFobj = RadialDistributionFunction(atoms, rMax, nBins)\n else:\n RDFobj.atoms = atoms # Fool RDFobj to use the new atoms\n RDFobj.update() # Collect data\nrdf_og = RDFobj.get_rdf()\nax.plot(x, gf(rdf_PP + 1, 5) / gf(rdf_og + 1, 5), '-', linewidth=4, color=cm.balance(0.75), label='p-p doped')\nax.plot(x, gf(rdf_PN + 1, 5) / gf(rdf_og + 1, 5), '--', linewidth=4, color=cm.algae(0.45), label='p-n doped')\nax.plot(x, gf(rdf_NN + 1, 5) / gf(rdf_og + 1, 5), '-.', linewidth=4, color=cm.balance(0.25), label='n-n doped')\nax.legend()\nax.grid(linestyle='--', linewidth=0.5)\nax.set_xlabel('Distance [\\\\AA]')\nax.set_ylabel('$g(r) / g_{CC}(r)$')\nplt.tight_layout()\nplt.savefig('doping_g_r.pdf')\nplt.show()\n","repo_name":"PierreTDarancet/QuantumTransduction","sub_path":"analysis/rdfFromBin.py","file_name":"rdfFromBin.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36493359805","text":"# basicShapes.py, Mason Corey\nimport turtle\nimport math\nt = turtle.Turtle() #Initialize turtle t\nturtle.screensize(canvwidth = 1920, canvheight = 1080) #Set proper screen size\nt.shape(\"turtle\")\nt.width(4) #Set size of lines created by turtle\n\ndef drawRectangle_1(): #Draws a rectangle\n \n t.color('green', 'yellow')\n t.seth(0) \n t.begin_fill() \n t.forward(50) \n t.left(90) \n t.forward(100) \n t.left(90)\n t.forward(50)\n t.left(90)\n t.forward(100)\n t.left(90) \n t.end_fill() \n\ndef drawRectangle_2(): #Draws a rectangle in a different way\n\n x1 = t.xcor()\n y1 = t.ycor()\n\n x2 = x1 + 50\n y2 = y1\n\n x3 = x2\n y3 = y2 + 100\n\n x4 = x1\n y4 = y1 + 100\n\n \n t.color(\"green\", \"yellow\")\n t.begin_fill()\n t.goto(x2, y2)\n t.goto(x3, y3)\n t.goto(x4, y4)\n t.goto(x1, y1)\n \n t.end_fill()\n\ndef drawRectangle_3(): #Draws a rectangle in a different way\n\n x1 = t.xcor()\n y1 = t.ycor()\n\n fourCorners = [(x1 + 50, y1), (x1 + 50, y1 + 100), (x1, y1 + 100), (x1, y1)]\n \n t.color(\"green\", \"yellow\")\n t.begin_fill()\n \n t.goto(fourCorners[0][0], fourCorners[0][1])\n t.goto(fourCorners[1][0], fourCorners[1][1])\n t.goto(fourCorners[2][0], fourCorners[2][1])\n t.goto(fourCorners[3][0], fourCorners[3][1])\n\n t.end_fill()\n\ndef drawRectangle(width, height, tilt, penColor, fillColor): #Draws a rectangle in a different way\n\n t.color(penColor, fillColor)\n t.seth(tilt)\n t.begin_fill()\n t.forward(width)\n t.left(90)\n t.forward(height)\n t.left(90)\n t.forward(width)\n t.left(90)\n t.forward(height)\n t.end_fill()\n t.seth(0)\n\ndef drawTwoRectangles(): #Draws two rectangles\n \n drawRectangle( 50, 100, 0, \"red\", \"\") \n\n t.seth(0)\n t.up() \n t.forward(100) \n t.down()\n\n drawRectangle( 100, 150, 22, \"green\", \"yellow\")\n\ndef drawTriangle(base, height, penColor, fillColor): #Draws a triangle\n a = base/2\n b = height\n c = math.sqrt(a**2 + b**2)\n angle1 = 180-(math.degrees(math.atan(b/a)))\n angle2 = 180-(2*(math.degrees(math.atan(a/b))))\n t.color(penColor, fillColor)\n t.begin_fill()\n t.seth(0)\n t.forward(base)\n t.left(angle1)\n t.forward(c)\n t.left(angle2)\n t.forward(c)\n t.end_fill()\n t.seth(0)\n\ndef drawTriangleTop(base, height, tilt, penColor, fillColor): #Draws a triangle with a different orientation\n startAngle = 2*math.degrees(math.atan(base/(2*height)))\n angle1 = (180-startAngle)/2\n sideLen = math.sqrt(((base**2)/4)+height**2)\n t.color(penColor, fillColor)\n t.begin_fill()\n t.seth(tilt)\n t.right(startAngle/2)\n t.forward(sideLen)\n t.left(180-angle1)\n t.forward(base)\n t.left(180-angle1)\n t.forward(sideLen)\n t.seth(tilt)\n t.end_fill()\n \n\ndef drawTwoTriangles(): #Draws two triangles of different colors\n\n t.up()\n t.forward(100)\n t.down()\n \n drawTriangle(50, 100, 'green', 'blue')\n\n t.seth(0)\n t.up()\n t.forward(60)\n t.down()\n\n drawTriangle(60, 80, 'purple', 'red')\n\nif __name__ ==\"__main__\":\n\n pass\n","repo_name":"themason2011/Python-Projects_Misc","sub_path":"basic_shapes.py","file_name":"basic_shapes.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70406161851","text":"import math\n\n\nclass TicTacToe:\n def __init__(self):\n self.board = [\" \" for _ in range(9)] # [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"]\n self.current_winner = None\n\n def print_board(self):\n # prints the board at current state of the game\n board = [[self.board[i], self.board[i + 1], self.board[i + 2]] for i in range(0, 9, 3)]\n for row in board:\n result = \"| \"\n for i in row:\n result += f\"{i} | \"\n print(result)\n\n @staticmethod\n def print_board_nums():\n # prints the board with index at each spot\n board = [[i, i + 1, i + 2] for i in range(0, 9, 3)]\n for row in board:\n result = \"| \"\n for i in row:\n result += f\"{i} | \"\n print(result)\n\n def available_moves(self):\n return [i for (i, spot) in enumerate(self.board) if spot == \" \"]\n\n def empty_squares(self):\n return \" \" in self.board\n\n def num_empty_squares(self):\n return self.board.count(\" \")\n\n def make_move(self, square, letter):\n # if square is empty, reassign board at that square to be letter and return True, otherwise False\n if self.board[square] == \" \":\n self.board[square] = letter\n if self.winner(square, letter):\n self.current_winner = letter\n return True\n else:\n return False\n\n def winner(self, square, letter):\n # checks to see if winner based on the move the player made onto the square\n # check row\n row_idx = square // 3\n target_row = self.board[row_idx * 3: row_idx * 3 + 3]\n if all([spot == letter for spot in target_row]):\n return True\n\n #check col\n col_idx = square // 3\n target_col = [self.board[i] for i in range(0,9) if (i - col_idx) % 3 == 0]\n if all([spot == letter for spot in target_col]):\n return True\n\n #check diagonal [0,2,4,6,8]\n if square % 2 == 0:\n diagonal1 = [self.board[i] for i in [0, 4, 8]]\n diagonal2 = [self.board[i] for i in [2,4,6]]\n if all([spot == letter for spot in diagonal1]) or all([spot == letter for spot in diagonal2]):\n return True\n #if after checking row, col and diagonal and still fail:\n return False\n\n\n\n\n\n\n","repo_name":"gloria-cheung/python_projects","sub_path":"tic_tac_toe/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32329347406","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 16 22:42:27 2021\r\n\r\n@author: 91798\r\n\"\"\"\r\n#VITAP assignment-1\r\n#School: SCOPE \r\n#Semester: Fall Sem 2021-22\r\n#Subject: Problem Solving using Python \r\n#Subject Code: CSE1012\r\n# This code is to Calculate the Sum of Natural Numbers Using While Loop\r\n\r\nnum1 = int(input(\"Enter the first number :-\"))\r\nnum2 = int(input(\"Enter the second number :- \"))\r\n\r\nwhile num1 and num2 >= 1 :\r\n break\r\nelse :\r\n print(\"the given number is not a natural number\")\r\n\r\nadd = num1+num2\r\n\r\nprint (\"The sum of first and second number is \", add )\r\n ","repo_name":"AgentRatz/VITAP-FRESHERS-CSE1002-AS1","sub_path":"#To check sum of two numbers using a while loop.py","file_name":"#To check sum of two numbers using a while loop.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2973283327","text":"from django.test import TestCase, Client\nfrom django.contrib.auth import get_user_model\nfrom posts.models import Post, Group\nfrom django.urls import reverse\nfrom http import HTTPStatus\nfrom django.core.cache import cache\nimport shutil\nimport tempfile\nfrom django.conf import settings\n\nUser = get_user_model()\n\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\nclass StaticURLTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.PostAuthor = User.objects.create_user(username='PostAuthor')\n cls.RandomUser = User.objects.create_user(username='RandomUser')\n\n cls.group = Group.objects.create(\n title='Тестовый заголовок',\n slug='test-slug',\n description='Тестовое описание'\n )\n\n cls.post = Post.objects.create(\n text='Test',\n author=cls.PostAuthor,\n pk=1\n )\n\n def setUp(self):\n self.guest = Client()\n self.post_author = Client()\n self.post_author.force_login(self.PostAuthor)\n self.random_user = Client()\n self.random_user.force_login(self.RandomUser)\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n cache.clear()\n\n def test_urls_guest(self):\n '''Тест страниц доступных гостю'''\n guest_allowed_urls = (\n reverse('posts:index'),\n reverse('posts:group_list', kwargs={'slug': self.group.slug}),\n reverse('posts:profile', kwargs={'username': self.RandomUser}),\n reverse('posts:post_detail', kwargs={'post_id': self.post.pk}),\n )\n guest_redirect_urls = (\n reverse('posts:post_create'),\n reverse(\n 'posts:post_edit',\n kwargs={'post_id': self.post.pk}),\n reverse(\n 'posts:profile_follow',\n kwargs={'username': self.PostAuthor.username}),\n reverse(\n 'posts:profile_unfollow',\n kwargs={'username': self.PostAuthor.username}),\n reverse('posts:follow_index'),\n )\n\n for url in guest_allowed_urls:\n with self.subTest(url=url):\n cache.clear()\n response = self.guest.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n for url in guest_redirect_urls:\n with self.subTest(url=url):\n cache.clear()\n response = self.guest.get(url)\n self.assertEqual(response.status_code, HTTPStatus.FOUND)\n\n def test_urls_random_user(self):\n '''Тест страниц доступных авторизованному пользователю'''\n random_user_allowed_urls = (\n reverse('posts:index'),\n reverse('posts:group_list', kwargs={'slug': self.group.slug}),\n reverse('posts:profile', kwargs={'username': self.RandomUser}),\n reverse('posts:post_detail', kwargs={'post_id': self.post.pk}),\n reverse('posts:post_create'),\n reverse('posts:follow_index'),\n\n )\n random_user_redirect_urls = (\n reverse('posts:post_edit', kwargs={'post_id': self.post.pk}),\n reverse(\n 'posts:profile_follow',\n kwargs={'username': self.PostAuthor.username}),\n reverse(\n 'posts:profile_unfollow',\n kwargs={'username': self.PostAuthor.username}),\n )\n for url in random_user_allowed_urls:\n with self.subTest(url=url):\n cache.clear()\n response = self.random_user.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n for url in random_user_redirect_urls:\n with self.subTest(url=url):\n cache.clear()\n response = self.random_user.get(url)\n self.assertEqual(response.status_code, HTTPStatus.FOUND)\n\n def test_urls_post_author(self):\n '''Тест страниц доступных авторизованному пользователю автору'''\n post_author_allowed_urls = (\n reverse('posts:index'),\n reverse('posts:group_list', kwargs={'slug': self.group.slug}),\n reverse('posts:profile', kwargs={'username': self.RandomUser}),\n reverse('posts:post_detail', kwargs={'post_id': self.post.pk}),\n reverse('posts:post_create'),\n reverse('posts:post_edit', kwargs={'post_id': self.post.pk}),\n reverse('posts:follow_index'),\n )\n post_author_redirect_urls = {\n reverse(\n 'posts:profile_follow',\n kwargs={'username': self.PostAuthor.username}),\n reverse(\n 'posts:profile_unfollow',\n kwargs={'username': self.PostAuthor.username}),\n }\n for url in post_author_allowed_urls:\n with self.subTest(url=url):\n cache.clear()\n response = self.post_author.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n for url in post_author_redirect_urls:\n with self.subTest(url=url):\n cache.clear()\n response = self.random_user.get(url)\n self.assertEqual(response.status_code, HTTPStatus.FOUND)\n\n def test_edit_guest_redirect(self):\n '''\n Гость будет перенаправлен на /auth/login/ с последующим\n перенаправлением на карточку поста\n '''\n response = self.guest.get(\n reverse('posts:post_edit', kwargs={'post_id': self.post.pk}),\n follow=True)\n self.assertRedirects(\n response, ('/auth/login/?next=/posts/1/edit/'))\n\n def test_edit_random_user(self):\n '''Не автор будет перенаправлени на карточку поста'''\n response = self.random_user.get(\n reverse('posts:post_edit', kwargs={'post_id': self.post.pk}),\n follow=True)\n self.assertRedirects(\n response,\n reverse('posts:post_detail', kwargs={'post_id': self.post.pk})\n )\n\n def test_unexisting_page(self):\n '''Тест несуществующей ссылки'''\n response = self.random_user.get('/some_randome_url/')\n self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)\n\n def test_urls_uses_correct_templates(self):\n templates_url = {\n reverse(\n viewname='posts:index'\n ): 'posts/index.html',\n reverse(\n viewname='posts:group_list',\n kwargs={'slug': self.group.slug}\n ): 'posts/group_list.html',\n reverse(\n viewname='posts:profile',\n kwargs={'username': self.RandomUser}\n ): 'posts/profile.html',\n reverse(\n viewname='posts:post_detail',\n kwargs={'post_id': self.post.pk}\n ): 'posts/post_detail.html',\n reverse(\n viewname='posts:post_create'\n ): 'posts/create_post.html',\n reverse(\n viewname='posts:post_edit',\n kwargs={'post_id': self.post.pk}\n ): 'posts/create_post.html',\n '/some-random-url/': 'core/404.html',\n\n }\n for url, template in templates_url.items():\n with self.subTest(url=url):\n cache.clear()\n response = self.post_author.get(url)\n self.assertTemplateUsed(response, template)\n","repo_name":"malahovskiy/hw05_final","sub_path":"yatube/posts/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":7807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29026640728","text":"import os\nimport os.path\nimport importlib.resources\nimport json\nfrom types import SimpleNamespace\nfrom platformdirs import PlatformDirs\n\n# theser are the URLs into the open data portals\n# update when new datasets are released\n__urls = {\n \"demographics\": {\n \"url\": \"https://data.cityofnewyork.us/resource/vmmu-wj3w.csv?$limit=1000000\",\n \"filename\":\"school-demographics.csv\",\n \"desc\": \"School demographic data from NYC Open Data Portal\"\n },\n \"school_geo\": {\n \"url\": \"https://data.cityofnewyork.us/resource/a3nt-yts4.geojson?$limit=1000000\",\n \"filename\":\"school-zipcodes.geojson\",\n \"desc\": \"location points with zipcodes\"\n },\n \"school_locations\": {\n \"url\": \"https://data.cityofnewyork.us/resource/wg9x-4ke6.csv?$limit=1000000\",\n \"filename\":\"school-locations.csv\",\n \"desc\": \"x,y geolocations and location meta data\"\n },\n \"district_geo\": {\n \"url\": \"https://data.cityofnewyork.us/api/geospatial/r8nu-ymqj?method=export&format=GeoJSON\",\n \"filename\":\"district-shapes.geojson\",\n \"desc\": \"shape file for school districts\"\n },\n \"charter_ela\": {\n \"url\": \"https://data.cityofnewyork.us/resource/sgjd-xi99.csv?$limit=1000000\",\n \"filename\":\"charter-ela.csv\",\n \"desc\": \"charter school 3-8 ELA results\"\n },\n \"charter_math\": {\n \"url\": \"https://data.cityofnewyork.us/resource/3xsw-bpuy.csv?$limit=1000000\",\n \"filename\":\"charter-math.csv\",\n \"desc\": \"charter school 3-8 math results\"\n },\n \"nyc_ela\": {\n \"url\": \"https://data.cityofnewyork.us/api/views/hvdr-xc2s/files/4db8f0e7-0150-4302-bed7-529c89efa225?download=true&filename=school-ela-results-2013-2019-(public).xlsx\",\n \"filename\":\"nyc-ela.csv\",\n \"desc\": \"excel file with non-charter school ELA test results with demographic categories\"\n },\n \"nyc_math\": {\n \"url\": \"https://data.cityofnewyork.us/api/views/365g-7jtb/files/17910cb0-8a62-4037-84b5-f0b4c2b3f71f?download=true&filename=school-math-results-2013-2019-(public).xlsx\",\n \"filename\":\"nyc-math.csv\",\n \"desc\": \"excel file with non-charter school math test results with demographic categories\"\n },\n \"nyc_regents\": {\n \"url\": \"https://data.cityofnewyork.us/api/views/2h3w-9uj9/files/ae520e30-953f-47a0-9654-c77900232236?download=true&filename=2014-15-to-2018-19-nyc-regents-overall-and-by-category---public%20(1).xlsx\",\n \"filename\":\"nyc-regents.csv\",\n \"desc\": \"regents exam results\"\n },\n \"nysed_math_ela\": {\n \"comment\":\"Each test year has its own URL, so this item doesn't match others in format\",\n \"url\":\"https://data.nysed.gov/downloads.php\",\n \"urls\": [\n \"https://data.nysed.gov/files/assessment/20-21/3-8-2020-21.zip\",\n \"https://data.nysed.gov/files/assessment/18-19/3-8-2018-19.zip\",\n \"https://data.nysed.gov/files/assessment/17-18/3-8-2017-18.zip\",\n \"https://data.nysed.gov/files/assessment/16-17/3-8-2016-17.zip\",\n \"https://data.nysed.gov/files/assessment/15-16/3-8-2015-16.zip\"\n ],\n \"filename\":\"nysed-exams.csv\",\n \"desc\": \"NYS grades 3-8 ELA and Math test in a .zip archive\"\n }\n}\n\ndirs = PlatformDirs(\"nycschools\", \"mxc\")\n\nconfig_file = \"nycschools.config\"\nconfig_paths = [\n os.path.join(dirs.site_config_dir, config_file),\n os.path.join(dirs.user_config_dir, config_file)\n]\n\ndata_paths = [\n dirs.site_data_dir,\n dirs.user_data_dir\n]\n\n\n\ndef read_urls():\n urls = {}\n for k,v in __urls.items():\n urls[k] = SimpleNamespace(**v)\n return urls\n\n\ndef get_config():\n\n for path in config_paths:\n if os.path.exists(path):\n with open(path,\"r\") as f:\n config = json.loads(f.read())\n config[\"urls\"] = read_urls()\n return SimpleNamespace(**config)\n\n # create a new config in the user space\n path = os.path.join(dirs.user_config_dir, config_file)\n os.makedirs(os.path.dirname(path))\n config = {\n \"config_file\": path,\n \"data_dir\": find_data_dir()\n }\n with open(path,\"w\") as f:\n f.write(json.dumps(config, indent=2))\n\n config[\"urls\"] = read_urls()\n return SimpleNamespace(**config)\n\n\ndef find_data_dir():\n \"\"\"Finds a writeable data directory\"\"\"\n # if ther's a local data dir with data files in it\n # use that first\n local = os.path.join(\".\", \"school-data\")\n if os.path.exists(local):\n files = os.listdir(path)\n # there must be at least one data file already in our list\n if len(files) > 0:\n urls = read_urls()\n datafiles = [url.filename for url in urls]\n for f in datafiles:\n if f in files:\n return local\n\n for path in data_paths:\n if check_write_edit_delete(path):\n return path\n\n if check_write_edit_delete(local):\n return local\n\n return None\n\n\ndef check_write_edit_delete(path):\n \"\"\"Checks if path can be a suitable data dir\"\"\"\n\n print(\"Checking data dir at:\", path)\n if os.path.exists(path):\n files = os.listdir(path)\n if len(files) > 0:\n # if the path exists and is not empty, assume it works\n return True\n else:\n try:\n print(\"trying to make\", path)\n os.makedirs(path)\n except:\n return False\n\n\n f = os.path.join(path, \".tmp\")\n msg = \"testing data dir\"\n with open(f, \"w\") as tmp:\n try:\n tmp.write(msg)\n except:\n return False\n\n with open(f, \"r\") as tmp:\n try:\n assert tmp.read() == msg\n except:\n return False\n\n with open(f, \"a\") as tmp:\n try:\n tmp.write(\"\\nmore data\")\n except:\n return False\n try:\n os.remove(f)\n except:\n return False\n\n return True\n\nconfig = get_config()\n","repo_name":"annan0609/nycschools","sub_path":"nycschools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"71050039291","text":"# Define import\n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom keras.models import Sequential\n\nfrom keras.layers import Dense, Dropout\n\nfrom keras.callbacks import EarlyStopping\n\nfrom keras.regularizers import l2\n\nfrom keras import optimizers\n\nimport xgboost as xgb\n\nfrom sklearn import datasets\n\n#from sklearn.cross_validation import train_test_split\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.datasets import dump_svmlight_file\n\nfrom sklearn.externals import joblib\n\nfrom sklearn.metrics import precision_score\n\n\n\nimport matplotlib.pyplot as plt\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nimport os\n\nprint(os.listdir(\"../input\"))\n\n# Import data\n\ntrain_input = pd.read_csv(\"../input/train.csv\")\n\ntest_input = pd.read_csv(\"../input/test.csv\")\n\ntrain_input.head()\n#Training Data input(x) and output(y)\n\ntrain_x = train_input.drop(['ID_code', 'target'], axis = 1)\n\ntrain_y = train_input['target']\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.2, random_state=42)\n\n\n\n# use DMatrix for xgbosot\n\ndtrain = xgb.DMatrix(X_train, label=y_train)\n\n#Test Data input(x)\n\ntest_x = test_input.drop(['ID_code'], axis = 1)\n\ndtest = xgb.DMatrix(X_test)\n# use svmlight file for xgboost\n\ndump_svmlight_file(X_train, y_train, 'dtrain.svm', zero_based=True)\n\ndump_svmlight_file(X_test, y_test, 'dtest.svm', zero_based=True)\n\ndtrain_svm = xgb.DMatrix('dtrain.svm')\n\ndtest_svm = xgb.DMatrix('dtest.svm')\n#standardized input\n\nss = StandardScaler()\n\ntrain_x_scaled = ss.fit_transform(train_x)\n\ntest_x_scaled = ss.transform(test_x)\n#Label encoded output\n\nencoder = LabelEncoder()\n\nencoder.fit(train_y)\n\ntrain_y_encoded = encoder.transform(train_y)\n#Definining the NN model\n\nmodel = Sequential()\n\nmodel.add(Dense(200, activation='relu', kernel_initializer='normal', kernel_regularizer=l2(0.001)))\n\nmodel.add(Dropout(0.4))\n\nmodel.add(Dense(50, activation='relu', kernel_regularizer=l2(0.001)))\n\nmodel.add(Dense(1, activation='sigmoid'))\n#Defining optimizer\n\n_opt= 'adam'\n\n_loss = 'binary_crossentropy'\n\n#complie model\n\nmodel.compile(loss=_loss, optimizer=_opt, metrics=['accuracy'])\n# Early stopping \n\n#from keras.callbacks import EarlyStopping\n\n_es_monitor = 'val_loss'\n\n_es_patience = 10\n\nes = EarlyStopping(monitor=_es_monitor, mode='min', verbose=1, patience=_es_patience)\n#batch size and number of epchos \n\n_batch_size = 1\n\n_epochs = 100\n#Train model\n\nhistory = model.fit(train_x_scaled, train_y_encoded, validation_split=0.20,\n\n epochs=_epochs, batch_size = len(train_x_scaled), verbose=1, callbacks=[es])\n#Evaluate Model's accuracy\n\nmetrics = model.evaluate(train_x_scaled, train_y_encoded)\n\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], metrics[1]*100))\n# Plot accuracy - Training vs Validation\n\nimport matplotlib.pyplot as plt\n\nplt.plot(history.history['acc'], label='train')\n\nplt.plot(history.history['val_acc'], label='test')\n\nplt.title('Accuracy - Training vs Validation')\n\nplt.ylabel('Accuracy')\n\nplt.xlabel('Epoch')\n\nplt.legend(['Train', 'Test'], loc='lower right')\n\nplt.show()\n\n# Plot loss - Training vs Validation\n\nimport matplotlib.pyplot as plt\n\n\n\nplt.plot(history.history['loss'], label='train loss')\n\nplt.plot(history.history['val_loss'], label='test loss')\n\nplt.title('Loss - Training vs Validation')\n\nplt.ylabel('Loss')\n\nplt.xlabel('Epoch')\n\nplt.legend(['Train', 'Test'], loc='upper right')\n\nplt.show()\n# set xgboost params\n\nparam = {\n\n 'max_depth': 3, # the maximum depth of each tree\n\n 'eta': 0.3, # the training step for each iteration\n\n 'silent': 1, # logging mode - quiet\n\n 'objective': 'multi:softprob', # error evaluation for multiclass training\n\n 'num_class': 3} # the number of classes that exist in this datset\n\nnum_round = 20 # the number of training iterations\n\n\n\n#-------------numpy array------------------\n\n# training and testing - numpy matrices\n\nbst = xgb.train(param, dtrain, num_round)\n\npreds = bst.predict(dtest)\n# extracting most confident predictions\n\nbest_preds = np.asarray([np.argmax(line) for line in preds])\n\nprint (\"Numpy array precision:\", precision_score(y_test, best_preds, average='macro'))\n\n# Model predict on Test data\n\nimport numpy\n\nfrom numpy import array\n\nfrom numpy import argmax\n\n\n\npredict1 = model.predict(test_x_scaled)\n\ndtest2 = xgb.DMatrix(test_x)\n\npredict2 = bst.predict(dtest2)\n\npredict_ary1 = array(predict1)\n\npredict_ary2 = array(predict2)\n\nsummed = predict_ary1 + predict_ary2\n\n\n\nresult = argmax(summed, axis=1)\n\nresult = pd.DataFrame({\"ID_code\": pd.read_csv(\"../input/test.csv\")['ID_code'], \"target\": summed[:,0]})\n\nprint(result.head())\n\n\n\nresult.to_csv(\"submission.Arnab.Apr102019.1.csv\", index=False)","repo_name":"aorursy/new-nb-1","sub_path":"arnabdan_nn-dropouts-early-stopping-xgboost-ensemble.py","file_name":"arnabdan_nn-dropouts-early-stopping-xgboost-ensemble.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1301152355","text":"import logging\nfrom typing import Optional\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import issparse\n\nfrom perturbationx.io import RelationTranslator\nfrom .preprocess_network import infer_node_type, enumerate_nodes, remove_invalid_graph_elements, \\\n infer_edge_attributes, infer_metadata\nfrom ..matrices import generate_boundary_laplacian\n\n__all__ = [\"format_dataset\", \"prune_network_dataset\", \"infer_graph_attributes\"]\n\n\ndef format_dataset(dataset: pd.DataFrame, computing_statistics=True):\n if not isinstance(dataset, pd.DataFrame):\n raise ValueError(\"Dataset is not a pandas.DataFrame.\")\n if any(col not in dataset.columns for col in [\"nodeID\", \"logFC\"]):\n raise ValueError(\"Dataset does not contain columns 'nodeID' and 'logFC'.\")\n\n if computing_statistics and \"stderr\" not in dataset.columns:\n if 't' in dataset.columns:\n dataset[\"stderr\"] = np.divide(dataset[\"logFC\"].to_numpy(), dataset['t'].to_numpy())\n else:\n raise ValueError(\"Dataset does not contain columns 'stderr' or 't'.\")\n\n reduced_dataset = dataset[[\"nodeID\", \"logFC\", \"stderr\"]] \\\n if computing_statistics \\\n else dataset[[\"nodeID\", \"logFC\"]]\n\n return reduced_dataset\n\n\ndef remove_opposing_edges(adjacency: np.ndarray, dataset: pd.DataFrame, minimum_amplitude=1.):\n dataset_sign = np.sign(dataset[\"logFC\"].to_numpy())\n dataset_mask = np.logical_and(\n np.abs(dataset[\"logFC\"].to_numpy()) >= minimum_amplitude,\n dataset[\"logFC\"].to_numpy() != 0.\n )\n\n # mask edges with a different sign than the dataset\n adjacency_mask = adjacency * dataset_sign[np.newaxis, :] < 0.\n adjacency_mask *= dataset_mask[np.newaxis, :]\n\n # remove edges with a different sign than the dataset\n adjacency_pruned = adjacency.copy()\n adjacency_pruned[adjacency_mask] = 0\n return adjacency_pruned\n\n\ndef prune_network_dataset(graph: nx.DiGraph, adj_b: np.ndarray, dataset: pd.DataFrame, dataset_id,\n missing_value_pruning_mode=\"nullify\", opposing_value_pruning_mode=None,\n opposing_value_minimum_amplitude=1., boundary_edge_minimum=6, verbose=True):\n if missing_value_pruning_mode not in [\"remove\", \"nullify\"]:\n raise ValueError(\"Invalid missing value pruning mode. Must be one of 'remove' or 'nullify'.\")\n if opposing_value_pruning_mode is not None and opposing_value_pruning_mode not in [\"remove\", \"nullify\"]:\n raise ValueError(\"Invalid opposing value pruning mode. Must be either None or one of 'remove' or 'nullify'.\")\n if boundary_edge_minimum < 0:\n raise ValueError(\"Boundary edge minimum must be non-negative.\")\n if adj_b.ndim != 2:\n raise ValueError(\"Argument adjacency_boundary is not two-dimensional.\")\n\n dataset_pruned = dataset[~dataset[\"logFC\"].isna()]\n dataset_pruned = dataset_pruned[dataset_pruned[\"nodeID\"].isin(graph.nodes)]\n\n core_size = adj_b.shape[0]\n network_idx = np.array([graph.nodes[node_name][\"idx\"] - core_size\n for node_name in dataset_pruned[\"nodeID\"].values\n if node_name in graph.nodes])\n\n if network_idx.size == 0:\n raise ValueError(\"The dataset does not contain any boundary nodes.\")\n\n lap_b = adj_b\n\n if missing_value_pruning_mode == \"remove\":\n lap_b = lap_b[:, network_idx]\n if opposing_value_pruning_mode == \"remove\":\n lap_b = remove_opposing_edges(\n lap_b, dataset_pruned, minimum_amplitude=opposing_value_minimum_amplitude)\n\n lap_b = generate_boundary_laplacian(lap_b, boundary_edge_minimum)\n\n if missing_value_pruning_mode == \"nullify\":\n lap_b = lap_b[:, network_idx]\n if opposing_value_pruning_mode == \"nullify\":\n lap_b = remove_opposing_edges(\n lap_b, dataset_pruned, minimum_amplitude=opposing_value_minimum_amplitude)\n\n lap_b_pruned = - lap_b\n\n # Infer dataset-specific metadata\n outer_boundary_node_count = network_idx.size\n # Count non-zero elements per row\n if issparse(lap_b_pruned):\n non_zero_row_count = np.bincount(lap_b_pruned.nonzero()[0], minlength=lap_b_pruned.shape[0])\n else:\n non_zero_row_count = np.count_nonzero(lap_b_pruned, axis=1)\n boundary_edge_count = np.sum(non_zero_row_count)\n inner_boundary_node_count = np.count_nonzero(non_zero_row_count)\n\n if verbose:\n logging.info(\"boundary nodes matched with dataset: %d\" % outer_boundary_node_count)\n logging.info(\"boundary edges remaining: %d\" % boundary_edge_count)\n logging.info(\"core nodes with boundary edges remaining: %d\" % inner_boundary_node_count)\n\n graph.graph[\"dataset_\" + dataset_id] = {\n \"matched_outer_boundary_nodes\": int(outer_boundary_node_count),\n \"matched_boundary_edges\": int(boundary_edge_count),\n \"matched_inner_boundary_nodes\": int(inner_boundary_node_count)\n }\n\n return lap_b_pruned, dataset_pruned\n\n\ndef infer_graph_attributes(graph: nx.DiGraph, relation_translator: Optional[RelationTranslator] = None, verbose=True):\n # Quietly remove nodes without edges\n graph.remove_nodes_from(list(nx.isolates(graph)))\n\n # Partition core and boundary nodes\n boundary_nodes, core_nodes = infer_node_type(graph)\n if len(core_nodes) == 0:\n raise ValueError(\"The network does not contain any core nodes.\")\n if len(boundary_nodes) == 0:\n raise ValueError(\"The network does not contain any boundary nodes.\")\n\n # Compute node type and indices, add data to graph instance\n enumerate_nodes(graph, boundary_nodes, core_nodes)\n\n remove_invalid_graph_elements(graph)\n\n # Compute edge weight and interaction type\n infer_edge_attributes(graph, relation_translator)\n\n # Add stats to metadata\n infer_metadata(graph, verbose)\n\n return graph\n","repo_name":"mikethenut/BNPA","sub_path":"perturbationx/toponpa/preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5464072681","text":"from typing import Optional\nimport openai\nfrom langchain.prompts import (\n ChatPromptTemplate,\n MessagesPlaceholder,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\nfrom langchain.chains import ConversationChain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.memory import ConversationBufferMemory\nfrom vocode import getenv\n\nfrom vocode.turn_based.agent.base_agent import BaseAgent\n\n\nclass ChatGPTAgent(BaseAgent):\n def __init__(\n self,\n system_prompt: str,\n api_key: Optional[str] = None,\n initial_message: Optional[str] = None,\n model_name: str = \"gpt-3.5-turbo\",\n temperature: float = 0.7,\n max_tokens: int = 100,\n memory: Optional[ConversationBufferMemory] = None,\n ):\n super().__init__(initial_message=initial_message)\n openai.api_key = getenv(\"OPENAI_API_KEY\", api_key)\n if not openai.api_key:\n raise ValueError(\"OpenAI API key not provided\")\n self.prompt = ChatPromptTemplate.from_messages(\n [\n SystemMessagePromptTemplate.from_template(system_prompt),\n MessagesPlaceholder(variable_name=\"history\"),\n HumanMessagePromptTemplate.from_template(\"{input}\"),\n ]\n )\n self.memory = memory if memory else ConversationBufferMemory(return_messages=True)\n if initial_message:\n self.memory.chat_memory.add_ai_message(initial_message)\n self.llm = ChatOpenAI( # type: ignore\n model_name=model_name,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n self.conversation = ConversationChain(\n memory=self.memory, prompt=self.prompt, llm=self.llm\n )\n\n def respond(self, human_input: str):\n return self.conversation.predict(input=human_input)","repo_name":"vocodedev/vocode-python","sub_path":"vocode/turn_based/agent/chat_gpt_agent.py","file_name":"chat_gpt_agent.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":1836,"dataset":"github-code","pt":"78"} +{"seq_id":"73150804732","text":"from django.urls import path\n\nfrom .views import *\n\napp_name = 'accounts'\nurlpatterns = [\n path('login', AccountLoginView.as_view(), name='login'),\n path('logout', AccountLogoutView.as_view(), name='logout'),\n path('register', AccountRegisterView.as_view(), name='register'),\n]\n","repo_name":"jwear/price-tracking-watchlist","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"16453491956","text":"# load movie review text data\nfrom tensorflow.keras.datasets import imdb\n(train_input, train_target), (test_input, test_target) = imdb.load_data(num_words=500)\nprint(train_input.shape, test_input.shape) # sample count of train set & test set\nprint(len(train_input[0]), len(train_input[1])) # words count of 1st & 2nd samples\nprint(train_input[0]) # tonken list of 1st sample\nprint(train_target[:20]) # target data\n\n# divide train set with validation set\nfrom sklearn.model_selection import train_test_split\ntrain_input, val_input, train_target, val_target = train_test_split(\n train_input, train_target, test_size=0.2, random_state=42)\n# get words count of each review\nimport numpy as np\nlengths = np.array([len(x) for x in train_input])\nprint(np.mean(lengths), np.median(lengths))\n\nimport matplotlib.pyplot as plt\nplt.hist(lengths)\nplt.xlabel('length')\nplt.ylabel('frequency')\nplt.show()\n\n# truncate words count until 100\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\ntrain_seq = pad_sequences(train_input, maxlen=100)\nval_seq = pad_sequences(val_input, maxlen=100)\nprint(train_seq.shape) # sample count of new train set\nprint(train_seq[0]) # token list of 1st sample\nprint(train_seq[5]) # token list of 6st sample\n\n# transform train set by one-hot incoding\nfrom tensorflow import keras\n# train_oh = keras.utils.to_categorical(train_seq)\n# val_oh = keras.utils.to_categorical(val_seq)\n# print(train_oh.shape) # shape of train set\n# print(train_oh[0][0][:20]) # content of 1st sample\n\n# create Recurrent Nural Network by One-Hot incoding\n# model = keras.Sequential()\n# model.add(keras.layers.SimpleRNN(8, input_shape=(100, 500)))\n# model.add(keras.layers.Dense(1, activation='sigmoid'))\n# model.summary()\n\n# compile & train model\n# rmsprop = keras.optimizers.RMSprop(learning_rate=1e-4)\n# model.compile(optimizer=rmsprop, loss='binary_crossentropy',\n# metrics=['accuracy'])\n# checkpoint_cb = keras.callbacks.ModelCheckpoint('best-cnn-model.h5',\n# save_best_only=True)\n# early_stopping_cb = keras.callbacks.EarlyStopping(patience=2,\n# restore_best_weights=True)\n# history = model.fit(train_oh, train_target, epochs=100, batch_size=64,\n# validation_data=(val_oh, val_target),\n# callbacks=[checkpoint_cb, early_stopping_cb])\n\n# draw graph of train loss & validation loss\n# plt.plot(history.history['loss'])\n# plt.plot(history.history['val_loss'])\n# plt.xlabel('epoch')\n# plt.ylabel('loss')\n# plt.legend(['train', 'val'])\n# plt.show()\n\n# create Recurrent Nural Network by Word embedding\nmodel2 = keras.Sequential()\nmodel2.add(keras.layers.Embedding(500, 16, input_length=100))\nmodel2.add(keras.layers.SimpleRNN(8))\nmodel2.add(keras.layers.Dense(1, activation='sigmoid'))\nmodel2.summary()\n\n# compile & train model\nrmsprop = keras.optimizers.RMSprop(learning_rate=1e-4)\nmodel2.compile(optimizer=rmsprop, loss='binary_crossentropy',\n metrics=['accuracy'])\ncheckpoint_cb = keras.callbacks.ModelCheckpoint('best-embedding-model.h5',\n save_best_only=True)\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=3,\n restore_best_weights=True)\nhistory = model2.fit(train_seq, train_target, epochs=100, batch_size=64,\n validation_data=(val_seq, val_target),\n callbacks=[checkpoint_cb, early_stopping_cb])\n\n# draw graph of train loss & validation loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.legend(['train', 'val'])\nplt.show()\n","repo_name":"DonggeunJung/PythonSamples","sub_path":"ml/MovieReview1.py","file_name":"MovieReview1.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72108262653","text":"from sklearn.feature_extraction import DictVectorizer\nimport csv\nfrom sklearn import tree\nfrom sklearn import preprocessing\nfrom sklearn.externals.six import StringIO\n\n# Read in the csv file and put features into list of dict and list of class label\n# 载入样本数据\nallElectronicsData = open(r'./AllElectronics.csv', 'r')\n# 第一行表头\nreader = csv.reader(allElectronicsData)\n# #headers = reader.next\nfor row in reader:\n headers = row\n break\n\nprint(headers)\n\nfeatureList = []\nlabelList = []\n\nfor row in reader:\n labelList.append(row[len(row)-1])\n rowDict = {}\n for i in range(1, len(row)-1):\n rowDict[headers[i]] = row[i]\n featureList.append(rowDict)\n\nprint(featureList)\n\n# Vetorize features\nvec = DictVectorizer()\ndummyX = vec.fit_transform(featureList) .toarray()\n\nprint(\"dummyX: \" + str(dummyX))\nprint(vec.get_feature_names())\n\nprint(\"labelList: \" + str(labelList))\n\n# vectorize class labels\nlb = preprocessing.LabelBinarizer()\ndummyY = lb.fit_transform(labelList)\nprint(\"dummyY: \" + str(dummyY))\n\n# Using decision tree for classification\n# clf = tree.DecisionTreeClassifier()\nclf = tree.DecisionTreeClassifier(criterion='entropy')\nclf = clf.fit(dummyX, dummyY)\nprint(\"clf: \" + str(clf))\n\n\n# Visualize model\nwith open(\"allElectronicInformationGainOri.dot\", 'w') as f:\n f = tree.export_graphviz(clf, feature_names=vec.get_feature_names(), out_file=f)\n\n#3 dot -Tpdf allElectronicInformationGainOri.dot -o tree.pdf\n\n# oneRowX = dummyX[0, :]\n# print(\"oneRowX: \" + str(oneRowX))\n#\n# newRowX = oneRowX\n# newRowX[0] = 1\n# newRowX[2] = 0\n# print(\"newRowX: \" + str(newRowX))\n#\n# predictedY = clf.predict([newRowX])\n# print(\"predictedY: \" + str(predictedY))\n\n\n","repo_name":"cxinping/Python","sub_path":"MachineLearning/决策树算法/AllElectronics.py","file_name":"AllElectronics.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"78"} +{"seq_id":"17647042197","text":"#2 계단 오르기 \n# https://www.acmicpc.net/problem/2579\n\nn = int(input()) \ns = [int(input()) for _ in range(n)]\n\ndp = [0]*(n) \n\nif len(s) <= 2 : \n print(sum(s))\n\nelse: \n dp[0] = s[0] \n dp[1] = s[0] + s[1] \n for i in range(2,n) : \n \n dp[i] = max(dp[i-3]+s[i-1]+s[i], dp[i-2]+s[i])\n\n print(dp[-1])\n\n","repo_name":"Techeer-3rd-gen-study/Algorithm-study","sub_path":"18주차_02.01_02.07/algorithm/2_계단오르기/홍다연.py","file_name":"홍다연.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"42869480260","text":"_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n type='CascadeRCNN',\n pretrained='open-mmlab://resnext101_64x4d',\n backbone=dict(\n type='ResNeXt',\n depth=101,\n groups=64,\n base_width=4,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=True),\n style='pytorch'),\n roi_head=dict(\n bbox_head=[\n dict(\n type='Shared2FCBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=True,\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n loss_weight=1.0)),\n dict(\n type='Shared2FCBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1]),\n reg_class_agnostic=True,\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n loss_weight=1.0)),\n dict(\n type='Shared2FCBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.033, 0.033, 0.067, 0.067]),\n reg_class_agnostic=True,\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n])\n)\n\ntest_cfg = dict(\n rpn=dict(\n nms_across_levels=False,\n nms_pre=12000,\n nms_post=2000,\n max_num=2000,\n nms_thr=0.7,\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.00,\n nms=dict(type='nms', iou_threshold=0.7),\n max_per_img=10000)\n)\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(544, 304), (1306, 730)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n #img_scale=(1088, 608),\n img_scale=(1306, 730),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndataset_type = 'CocoDataset'\ndata_root = '/raid/ljp/data/MOT/'\ndata = dict(\n samples_per_gpu=4,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'MOT17/mot17half.json',\n img_prefix=data_root,\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'MOT17/mot17val.json',\n img_prefix=data_root,\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'MOT17/mot17val.json',\n img_prefix=data_root,\n pipeline=test_pipeline))\nevaluation = dict(interval=1, metric='bbox')\nload_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth'\n","repo_name":"ljpadam/CGPS","sub_path":"configs/MOT/cascade_rcnn_x101_64x4d_fpn_20e_coco.py","file_name":"cascade_rcnn_x101_64x4d_fpn_20e_coco.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"78"} +{"seq_id":"38336698445","text":"from flask import Flask, render_template\nimport lightning\nimport os\nfrom collections import defaultdict\n\nsockPath = os.path.join(os.environ[\"HOME\"], \".lightning\", \"lightning-rpc\")\nl = lightning.LightningRpc(sockPath)\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef channels():\n peers = l.listpeers()[\"peers\"]\n channels = defaultdict(list)\n nodes = l.listnodes()[\"nodes\"]\n for peer in peers:\n aliases = [n[\"alias\"] for n in nodes if n[\"nodeid\"] == peer[\"id\"]]\n alias = aliases[0] if aliases else \"Unknown\"\n for channel in peer.get(\"channels\", []):\n channel[\"alias\"] = alias\n channels[channel[\"state\"]].append(channel)\n return render_template(\"channels.html\", channels=channels)\n\ndef main():\n app.run(port=8081, host=\"0.0.0.0\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mocmocamoc/ozone","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39317687582","text":"\nfrom django.urls import path, include\nfrom . import views\n\napp_name = 'user_app'\n\nurlpatterns = [\n path('', views.login_page, name=\"login_page\"),\n path('signup/', views.signup, name=\"signup\"),\n path('login_form/', views.login_form, name=\"login_form\"),\n path('register/', views.register, name=\"register\"),\n path('home/', views.home, name=\"home\"),\n path('logout/', views.logout_form, name=\"logout\"),\n]","repo_name":"Fawad2aria/Twitter","sub_path":"Twiter/user_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20137565252","text":"class Solution(object):\n # Time Limit Exceeded\n # def groupAnagrams(self, strs):\n # \"\"\"\n # :type strs: List[str]\n # :rtype: List[List[str]]\n # \"\"\"\n # res = []\n # while len(strs) != 0:\n # sub = self.findAnagrams(strs[0], strs)\n # for s in sub:\n # strs.remove(s)\n # res.append(sub)\n # return res\n #\n # def findAnagrams(self, tar, strs):\n # from collections import Counter\n # tarCounter = Counter(tar)\n # subAns = []\n # for s in strs:\n # sCounter = Counter(s)\n # if sCounter == tarCounter:\n # subAns.append(s)\n # return subAns\n\n def groupAnagrams(self, strs):\n \"\"\"\n If two strings are anagrams, then after sorted, they will be the same\n \"\"\"\n map = {}\n for s in strs:\n a = \"\".join(sorted(s))\n map[a] = map.get(a, []) + [s]\n return map.values()\n\n\nif __name__ == '__main__':\n arr = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\n sol = Solution()\n print(sol.groupAnagrams(arr))\n","repo_name":"Rocky-Zhenxiang-Fang/LeetCode","sub_path":"Group Anagrams.py","file_name":"Group Anagrams.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23433912834","text":"from HW09_Gianna_Fazio import Repository, Student, Instructor\r\nfrom collections import defaultdict\r\nimport unittest\r\n\r\nclass RepositoryTest(unittest.TestCase):\r\n ''' contains all test cases for HW09 '''\r\n def test_instances(self):\r\n ''' Tests if data is read correctly from the files ''' \r\n repo = Repository(r\"C:\\Users\\Test\\Desktop\\Scripts\\Repository\")\r\n repo.read_files()\r\n\r\n # tests the number of students and instructors read from each file is correct\r\n self.assertEqual(len(repo._students), 10)\r\n self.assertEqual(len(repo._instructors), 6)\r\n \r\n # tests that the first student object was created correctly from reading the files\r\n student = Student()\r\n student._cwid = \"10103\"\r\n student._name = \"Baldwin, C\"\r\n student._major = \"SFEN\"\r\n student._course = defaultdict(str)\r\n student._course['SSW 567'] = 'A'\r\n student._course['SSW 564'] = 'A-'\r\n student._course['SSW 687'] = 'B'\r\n student._course['CS 501'] = 'B'\r\n self.assertEqual(str(repo._students[student._cwid]), str(student))\r\n \r\n # test that the first instructor object was created correctly from reading the files\r\n instructor = Instructor()\r\n instructor._cwid = \"98763\"\r\n instructor._name = \"Newton, I\"\r\n instructor._dept = \"SFEN\"\r\n instructor._course = defaultdict(int)\r\n instructor._course['SSW 555'] = 1\r\n instructor._course['SSW 689'] = 1\r\n self.assertEqual(str(repo._instructors[instructor._cwid]), str(instructor))\r\n \r\n def test_student_summary(self):\r\n ''' tests the creation of the student Pretty Table '''\r\n repo = Repository(r\"C:\\Users\\Test\\Desktop\\Scripts\\Repository\")\r\n print('/n')\r\n repo.read_files()\r\n \r\n expect = [('10103', 'Baldwin, C', ['CS 501', 'SSW 564', 'SSW 567', 'SSW 687']), \r\n ('10115', 'Wyatt, X', ['CS 545', 'SSW 564', 'SSW 567', 'SSW 687']), \r\n ('10172', 'Forbes, I', ['SSW 555', 'SSW 567']), \r\n ('10175', 'Erickson, D', ['SSW 564', 'SSW 567', 'SSW 687']), \r\n ('10183', 'Chapman, O', ['SSW 689']), \r\n ('11399', 'Cordova, I', ['SSW 540']), \r\n ('11461', 'Wright, U', ['SYS 611', 'SYS 750', 'SYS 800']), \r\n ('11658', 'Kelly, P', ['SSW 540']), \r\n ('11714', 'Morton, A', ['SYS 611', 'SYS 645']), \r\n ('11788', 'Fuller, E', ['SSW 540'])] \r\n \r\n self.assertEqual(repo.student_summary(),expect)\r\n\r\n def test_instructor_summary(self):\r\n ''' tests the creation of the instructor Pretty Table '''\r\n repo = Repository(r\"C:\\Users\\Test\\Desktop\\Scripts\\Repository\")\r\n print('/n')\r\n repo.read_files()\r\n\r\n expect = [('98760', 'Darwin, C', 'SYEN', 'SYS 800', 1), \r\n ('98760', 'Darwin, C', 'SYEN', 'SYS 750', 1), \r\n ('98760', 'Darwin, C', 'SYEN', 'SYS 611', 2), \r\n ('98760', 'Darwin, C', 'SYEN', 'SYS 645', 1), \r\n ('98763', 'Newton, I', 'SFEN', 'SSW 555', 1), \r\n ('98763', 'Newton, I', 'SFEN', 'SSW 689', 1), \r\n ('98764', 'Feynman, R', 'SFEN', 'SSW 564', 3), \r\n ('98764', 'Feynman, R', 'SFEN', 'SSW 687', 3), \r\n ('98764', 'Feynman, R', 'SFEN', 'CS 501', 1), \r\n ('98764', 'Feynman, R', 'SFEN', 'CS 545', 1), \r\n ('98765', 'Einstein, A', 'SFEN', 'SSW 567', 4), \r\n ('98765', 'Einstein, A', 'SFEN', 'SSW 540', 3)]\r\n \r\n self.assertEqual(repo.instructor_summary(),expect)\r\n \r\nif __name__ == '__main__':\r\n unittest.main(exit=True,verbosity=2)","repo_name":"giannafazio2/SSW810","sub_path":"HW09_Test_Gianna_Fazio.py","file_name":"HW09_Test_Gianna_Fazio.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15289055581","text":"import numpy as np\nimport signal2noise\nimport argparse\n\n# Argparse\nparser = argparse.ArgumentParser(prog=\"Signal-to-noise\")\nparser.add_argument(\"F\", help=\"File for measurements (admixture or PCA\")\nparser.add_argument(\"L\", help=\"Numeric labels file (1:K)\")\nargs = parser.parse_args()\n\nF = np.genfromtxt(args.F, dtype=np.float32)\nL = np.genfromtxt(args.L, dtype=np.int32)-1\nK = max(L)+1\nO = np.argsort(L)\nF = F[O,:]\nL = L[O]\n\nB = 0.0\nW = 0.0\nfor k in range(K):\n\tb_k = signal2noise.betweenDist(F, L, k)\n\tw_k = signal2noise.withinDist(F, L, k)\n\tB += b_k\n\tW += w_k\n\nprint((B/float(K))/(W/float(K)))\n","repo_name":"Rosemeis/HaploNet","sub_path":"scripts/signal2noise.py","file_name":"signal2noise.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"78"} +{"seq_id":"13204273171","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\n\n# 声明一个先进先出的队列,队列中最多100个元素,类型为实数\nqueue = tf.FIFOQueue(100,\"float\")\n# 定义队列的入队操作\nenqueue_op = queue.enqueue([tf.random_normal([1])])\n\n# print(enqueue_op)\n'''\ndef random_normal(shape,\n mean=0.0,\n stddev=1.0,\n dtype=dtypes.float32,\n seed=None,\n name=None):\n'''\n# 使用tf.train.QueeuRunner 来创建多个线程运行队列的入队操作\n# tf.train.QueueRunner 的第一个参数给出了被操作的队列,[enqueue_op]*5,表示了需要启动5个线程,每个线程中运行的是enqueue_op\nqr = tf.train.QueueRunner(queue,[enqueue_op]*5)\n\n# 将定义过的QueueRunner加入TensorFlow计算图上指定的集合\n# tf.train.add_queue_runner函数没有指定集合,则加入默认集合tf.GrapgKeys.QUEUE_RUNNERS\n# 下面的函数就是刚刚定义的\n# qr加入默认的tf.GraphKeyers.QUEUE_RUNNERS集合\ntf.train.add_queue_runner(qr)\n# 定义出队操作\nout_tensor = queue.dequeue()\n\nwith tf.Session() as sess:\n # print(sess.run([tf.random_normal([1])] * 5))\n # print(sess.run(tf.random_normal([1])))\n # print(\"-------------------\")\n # 使用tf.train,Coordinator()来协同启动的线程\n coord = tf.train.Coordinator()\n # 使用tf.train.QueueRunner时,需要明确调用tf.train.start_queue_runners来启动所有线程。\n # 否则因为没有线程运行入队操作,当调用出队操作时,程序会一直等待入队操作被运行。\n # tf.train.start_queue_runners函数会默认启动tf.GraphKeys.QUEUE_RUNNERS集合中所有的QueueRunner。\n # 因为这个函数只支持启动指定集合中的QueueRunner,所以一般\n # 来说tf.train.add_queue_runners函数和tf.train.start_queue_runners函数会指定同一个集合\n threads = tf.train.start_queue_runners(sess=sess,coord=coord)\n # 获取队列中的取值\n for _ in range(3):\n print(sess.run(out_tensor)[0])\n # 使用tf.train.Coordinator来停止所有的线程\n coord.request_stop()\n coord.join(threads)\n'''\n以上程序将启动5个线程来执行行队列入队的操作,气质每一个线程都是将随机数写入队列\n于是在每次运行出队操作时,可以得到一个随机数。运行这段程序可以得到类似下面的结果:\n0.43106672\n-0.30390096\n1.5770454\n'''","repo_name":"myhuacai/deeplearning","sub_path":"TFdemo/7.3.1-2QueueRuuner.py","file_name":"7.3.1-2QueueRuuner.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9025054201","text":"#20209\nimport sys\nimport copy\ninput = sys.stdin.readline\n\n#스위치 누르는 모든 경우\ndef recursive(idx):\n if idx == k:\n pushSw(selectSw, cube) \n return\n for i in range(5):\n selectSw[idx] = i\n recursive(idx + 1)\n\n#ls : selectSw([0,0])\ndef pushSw(ls, cube2):\n global ans\n tmpCube = copy.deepcopy(cube2)\n # i번째 스위치(i+1만큼 값이 증가)\n for i in range(k):\n if ls[i] != 0:\n for j in sw[i]:\n tmpCube[j-1] = (tmpCube[j-1] + (i+1) * ls[i]) % 5\n #모두 같은 값인지 check\n flag = True\n tmp = tmpCube[0]\n for i in range(1, len(tmpCube)):\n if tmp != tmpCube[i]:\n flag = False\n if flag == False:\n return\n sum_ = 0\n for i in ls:\n sum_ += i\n ans = min(ans, sum_)\n \n\nn,k = map(int,input().split())\n\nselectSw = [0 for _ in range(k)]\n\ncube = list(map(int,input().split()))\nsw = []\nfor _ in range(k):\n tmp = list(map(int,input().split()))\n sw.append(tmp[1:])\n\nans = 2e9\n\nrecursive(0)\n\nprint(ans if ans != 2e9 else -1)\n","repo_name":"HiGeuni/Problem-Solving","sub_path":"BaekJoon/20209.py","file_name":"20209.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26437348948","text":"import threading\nimport socket\n\n#Script para probar comportamineto del server al mandarle ciertos mensajes\n#Este es para mensajes de tiempo\nLOCALHOST = '127.0.0.1'\n#IPTested = str(input(\"Direccion a la que deseas hacer pruebas\"))\n#obtenemos la ip donde esta corriendo el programa para no tener que ingresarla manualmente\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.connect((\"8.8.8.8\", 80))\nIPTested = s.getsockname()[0]\nPORT = 65432 # Puerto de jugador, para enviar y recibir la lista de numeros\nBCKPORT = 65433 # Puerto para sincronizacion de BDD entre servidores\nTIMEPORT = 60900 #Puerto de sincronizacion de reloj\nELECPORT = 30400 #Puerto de Elecciones\n\nsock = socket.socket(socket.AF_INET , socket.SOCK_DGRAM) #Creacion de socket UDP para pruebas\nsock.bind((LOCALHOST,TIMEPORT))\nsock.settimeout(6)\n\nwhile True:\n #msg=str(input(\"Mensaje a enviar\"))\n #sock.sendto(msg.encode('utf-8'),(IPTested,ELECPORT))\n try:\n data , addr = sock.recvfrom(100)\n cmdArgs = data.decode('utf-8').split()\n print(cmdArgs)\n if(cmdArgs[0] == \"GTM\"): #si llega este mensaje\n #msg = str(clk1.clk.getTimeToNumber())#Mandar hora\n msg = \"43665\"\n sock.sendto(msg.encode('utf-8'),(addr))\n elif(cmdArgs[0] == \"CTM\"):#Si llega este mensaje\n print(\"Hora recibida \",cmdArgs[1] )\n #clk1.clk.setTimeFromNumber(int(cmdArgs[1]))#Ajustar reloj\n except socket.timeout as e:\n print(\"Timeout in listentime\")\n continue\n except IndexError:\n print(\"No data received\")\n continue\n","repo_name":"Izaird/Desarrollo-de-sistemas-distribuidos","sub_path":"Practica_06/TimeTestSock.py","file_name":"TimeTestSock.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7548179769","text":"# Importar librerías\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom torch.utils.tensorboard import SummaryWriter\nfrom sklearn.model_selection import train_test_split\n\n# Verificar si GPU está disponible\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n# Definir la transformación de datos\ntransform = transforms.Compose(\n [\n transforms.Resize((512, 512)),\n transforms.ToTensor(),\n ]\n)\n\n# Ruta a tus datos\ndata_path = \"manzanitas\"\n\n# Crear un conjunto de datos\ndataset = datasets.ImageFolder(root=data_path, transform=transform)\n\n# Dividir el conjunto de datos en entrenamiento y prueba\ntrain_size = int(0.8 * len(dataset))\ntest_size = len(dataset) - train_size\ntrain_dataset, test_dataset = torch.utils.data.random_split(\n dataset, [train_size, test_size]\n)\n\n\n# Crear los dataloaders\ntrain_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\ntest_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)\n\n\n# Instanciar el modelo\nclass CNN(nn.Module):\n def __init__(self):\n \"\"\"\n Constructor de la clase CNN.\n Inicializa los componentes de la red convolucional.\n \"\"\"\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.flatten = nn.Flatten()\n self.fc1 = nn.Linear(16 * 256 * 256, 2) # 2 clases o 2 etiquetas\n\n def forward(self, x):\n \"\"\"\n Método forward de la clase CNN.\n Realiza la propagación hacia adelante de la entrada x a través de la red convolucional.\n\n Args:\n x (torch.Tensor): Tensor de entrada de la red convolucional.\n\n Returns:\n torch.Tensor: Tensor de salida de la red convolucional.\n \"\"\"\n x = self.conv1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.flatten(x)\n x = self.fc1(x)\n return x\n\n\n# Instanciar el modelo y moverlo a la GPU si está disponible\nmodel = CNN().to(device)\n\n# Definir la función de pérdida y el optimizador\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.01)\n\n# Configurar TensorBoard\nwriter = SummaryWriter()\n# Entrenar el modelo\nnum_epochs = 10\ndesired_accuracy = 0.95\n# debemos almacenar la precision exacta\nprecision_final = 0\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n # Mover datos a la GPU si está disponible\n images, labels = images.to(device), labels.to(device)\n\n optimizer.zero_grad()\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # Escribir pérdida en TensorBoard\n writer.add_scalar(\"Train/Loss\", loss.item(), epoch * len(train_loader) + i)\n\n # Evaluar el modelo en cada época\n model.eval()\n correct = 0\n total = 0\n\n with torch.no_grad():\n for images, labels in test_loader:\n # Mover datos a la GPU si está disponible\n images, labels = images.to(device), labels.to(device)\n\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n accuracy = correct / total\n\n # Escribir precisión en TensorBoard\n writer.add_scalar(\"Test/Accuracy\", accuracy, epoch)\n\n print(\n f\"Época {epoch + 1}/{num_epochs}, Precisión en el conjunto de prueba: {accuracy * 100:.2f}%\"\n )\n precision_final = accuracy\n # Verificar si se alcanza la precisión deseada\n if accuracy >= desired_accuracy:\n print(\n f\"Precisión minima deseada alcanzada ({desired_accuracy * 100:.2f}%). Deteniendo el entrenamiento. y con una precisión de {accuracy* 100:.2f}%\"\n )\n break\n\n# Cerrar el escritor de TensorBoard al final\nwriter.close()\n# Guardar el modelo\n# añadir fecha y hora con segundo exacto cuando se esta guardando\n# el modelo para que concuerde con lo de tensorboard lo mejor posible\nfrom datetime import datetime\n\n# Obtener la fecha y hora actuales\nnow = datetime.now()\n# Formatear la fecha y hora\nformatted_date_time = now.strftime(\"%Y-%m-%d_%H-%M-%S\")\n# Imprimir o usar la cadena formateada\nprint(formatted_date_time)\n# ademas agregaremos la precisión siendo el modelo + nombre + precision + fecha y hora\nnombre = f\"modelo_manzanas_{precision_final}_{formatted_date_time}\"\ntorch.save(model.state_dict(), f\"{nombre}.pth\")\nprint(f\"Nombre del modelo guardo como: {nombre}\")\n\n# Exportar el modelo a formato ONNX\n\nimport torch\n\n# Define el modelo y carga los pesos entrenados\nmodel = CNN()\nnombre_modelo = nombre\nmodel.load_state_dict(torch.load(f\"{nombre_modelo}.pth\"))\nmodel.eval()\n\n# Define un tensor de ejemplo con las dimensiones correctas\ndummy_input = torch.randn(1, 3, 512, 512)\n\n# Exporta el modelo a formato ONNX\ntorch.onnx.export(model, dummy_input, f\"{nombre_modelo}.onnx\", verbose=True)\nprint(f\"Modelo guardado exitosamente: {nombre_modelo}.onnx\")\n","repo_name":"Jairodaniel-17/TA_Microprocesadores_Manzanas_SD","sub_path":"codigos_modelo/version_final.py","file_name":"version_final.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9342124567","text":"from Labeler import SvmLabeler\nfrom TomogramAnalyzer import TomogramAnalyzer\nfrom CommonDataTypes import EulerAngle\n\ndef svm_eval(svm_and_templates, tomograms, test_list = None):\n \"\"\"\n Evaluate the tomograms the supplied SVM and templates (As returned from svm_train).\n :param svm_and_templates: SVM and templates as returned from svm_train.\n :param tomograms: Iterator of the tomograms to be evaluated.\n :param test_list: If not None, fills with analyzer objects for debugging\n :return A list of lists of the candidates for each tomogram.\n \"\"\"\n\n svm, tilts_templates = svm_and_templates\n EulerAngle.Tilts, templates = tilts_templates\n tomogram_candidates = []\n labeler = SvmLabeler(svm)\n\n for tomogram in tomograms:\n # Analyze the tomogram\n analyzer = TomogramAnalyzer(tomogram, templates, labeler)\n (candidates, feature_vectors, predicted_labels) = analyzer.analyze()\n\n # Add the candidates to the list of results\n tomogram_candidates.append(candidates)\n\n # save analyzer object for debugging\n if test_list is not None:\n test_list.append(analyzer)\n\n return tomogram_candidates\n","repo_name":"guyrom27/CryoEmSvm","sub_path":"src/SvmEval.py","file_name":"SvmEval.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70805377852","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\ndef normalize_suffix(suffix):\n '''Returns a normalized suffix, i.e. ensures it starts with a dot and\n doesn't starts or ends with whitespace characters'''\n value = suffix.strip()\n if len(value) and not value.startswith('.'):\n value = '.' + value\n return value\n\n# Variables from the build system\nAR = \"/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ar\"\nAR_EXTRACT = \"$(AR) x\".replace('$(AR)', AR)\nDLL_PREFIX = \"lib\"\nLIB_PREFIX = \"lib\"\nOBJ_SUFFIX = normalize_suffix(\"o\")\nLIB_SUFFIX = normalize_suffix(\"a\")\nDLL_SUFFIX = normalize_suffix(\".dylib\")\nIMPORT_LIB_SUFFIX = normalize_suffix(\"\")\nLIBS_DESC_SUFFIX = normalize_suffix(\"desc\")\nEXPAND_LIBS_LIST_STYLE = \"list\"\nEXPAND_LIBS_ORDER_STYLE = \"\"\nLD_PRINT_ICF_SECTIONS = \"\"\n","repo_name":"play-co/spidermonkey-ios","sub_path":"js/src/config/expandlibs_config.py","file_name":"expandlibs_config.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"} +{"seq_id":"73930857212","text":"from ahk import AHK\n# import ahkpy\nimport math\nimport keyboard\nimport time\n\n\nahk = AHK()\n\n\n# time.sleep(1)\n# print(\"hi\")\nor_win = ahk.find_window(title='PETSCREEN') # Find the opened window\nor_win.always_on_top = 'On'\nor_win.set_always_on_top('On')\n\ntheta = 0\nclass specialWindow:\n def __init__(self,win):\n self.win = win\n self.x = self.win.get_position()[0]\n self.y = self.win.get_position()[1]\n self.speed = 20\n self.acceleration = 0\n self.dx = 0\n self.dy = 0\n self.width = win.get_position()[2]\n self.height = win.get_position()[3]\n self.hw=win.get_position()[2]/2\n self.hh = win.get_position()[3]/2\n # self.ddx = 0\n # self.ddy = 0\n def updatePos(self):\n # self.dx += self.ddx\n # self.dy += self.ddy\n self.x += self.dx\n self.y += self.dy\n #\"dx\",self.dx,\"dy\",self.dy)\n self.win.move(x=self.x,y=self.y,blocking=True)\n #print(self.x,self.y)\n def update(self):\n self.x = self.win.get_position()[0]\n self.y = self.win.get_position()[1]\n self.updatePos()\n #print(\"hi\")\n def moveTowards(self,tarx,tary):\n \n xChange = self.x+self.hw-tarx\n yChange = self.y+self.hh-tary\n otherChange = math.sqrt(xChange**2+yChange**2)\n if otherChange >100:\n self.acceleration +=.25\n elif otherChange >10:\n self.acceleration -=.05\n else:\n self.acceleration -=.5\n \n self.acceleration = min(1,self.acceleration)\n self.acceleration = max(0,self.acceleration)\n \n self.dx = -self.acceleration * self.speed * xChange/otherChange\n self.dy = -self.acceleration * self.speed * yChange/otherChange\n self.updatePos()\n #print(\"dx\",self.dx,\"dy\",self.day)\n \n\n\nwin = specialWindow(or_win)\n\nstate = 0\n\nttime = 0\nstableTarx = 0\nstableTary = 0\nwhile or_win.exists():\n \n ttime +=1\n\n \n\n if (ttime%10==0):\n #win.update()a\n ttime = 0\n if keyboard.is_pressed(\"a\"):\n state =2\n \n if keyboard.is_pressed(\"ctrl+alt+p\"):\n get_mouse_pos = ahk.get_mouse_position(coord_mode=\"Screen\")\n stableTarx = get_mouse_pos[0]\n stableTary = get_mouse_pos[1]\n state = 3\n # if state ==1:\n # win.move(x=ahk.get_mouse_position()[0]-win.get_position()[2]/2+math.cos(theta)*250, y=ahk.get_mouse_position()[1]-win.get_position()[3]/2+math.sin(theta)*250,blocking=False);\n # theta+=.07\n\n if state == 2:\n #print(\"hi\")\n # curx = win.get_position()[2]\n # cury = win.get_position()[3]\n get_mouse_pos = ahk.get_mouse_position(coord_mode=\"Screen\")\n tarx = get_mouse_pos[0]\n tary = get_mouse_pos[1]\n # print(get_mouse_pos)\n # tarwin = ahk.win_get_from_mouse_position()\n # tarwinx= tarwin.get_position()[0]+11\n # tarwiny= tarwin.get_position()[1]+11\n # print(tarwinx,tarwiny)\n \n \n #print(tarx,tarya\n # state = 0\n win.moveTowards(tarx,tary)\n # win.moveTowards(tarx+tarwinx,tary+tarwiny)aaaa\n if state ==3:\n win.moveTowards(stableTarx,stableTary)\n \n\n \n\n\n# ahk.add_hotkey('^+LButton', callback=my_callback)\n# ahk.start_hotkeys() # start the hotkey process thread\n# ahk.block_forever() \n# ahk.add_hotstring('btw', my_callback) # call python function in response to the hotstring\n","repo_name":"jerlee25/desktop_pets","sub_path":"testing/ahk_pet_screen_mover.py","file_name":"ahk_pet_screen_mover.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"25366973805","text":"from itertools import product\nimport numpy as np\nfrom scipy.special import sph_harm\nfrom sympy.physics.wigner import wigner_3j\n\n\n__all__ = ['BondOrientOrder']\n\n\ndef _wigner_3j_values(l):\n m = range(-l, l + 1)\n inds = filter(lambda x: sum(x) == 0, product(m, m, m))\n res = {i: float(wigner_3j(l, l, l, *i).evalf(20)) for i in inds}\n\n return res\n\n\nclass BondOrientOrder:\n def __init__(self, atoms, voro_info, weighted=False):\n self.atoms = atoms\n self.n = len(atoms)\n assert self.n == len(voro_info['neighbors'])\n self.positions = atoms.positions\n self.neighbors = voro_info['neighbors']\n self.neighbor_vectors = voro_info['neighbor_vectors']\n self.weighted = weighted\n if self.weighted:\n self.face_weights = voro_info['face_areas']\n for i, f in enumerate(self.face_weights):\n self.face_weights[i] = [j/sum(f) for j in f]\n self.wigner_3j_cache = {}\n self.q_lm_cache = {}\n self.Q_lm_cache = {}\n\n def compute_q_lm(self, l, m):\n if (l, m) in self.q_lm_cache:\n return self.q_lm_cache[(l, m)]\n\n q_lm = np.zeros(self.n, dtype=np.complex128)\n for iatom in range(self.n):\n nn = len(self.neighbors[iatom])\n\n if nn == 0:\n q_lm[iatom] = np.nan + np.nan * 1j\n else:\n weights = 1.0/nn\n if self.weighted:\n weights = np.asarray(self.face_weights[iatom])\n\n bvecs = self.neighbor_vectors[iatom]\n xy = bvecs[:, 0]**2 + bvecs[:, 1]**2\n theta = np.arctan2(bvecs[:, 1], bvecs[:, 0])\n phi = np.arctan2(np.sqrt(xy), bvecs[:, 2])\n t = sph_harm(m, l, theta, phi)\n\n q_lm[iatom] = np.sum(t*weights)\n\n self.q_lm_cache[(l, m)] = q_lm\n\n return q_lm\n\n\n def compute_Q_lm(self, l, m):\n if (l, m) in self.Q_lm_cache:\n return self.Q_lm_cache[(l, m)]\n\n q_lm = self.compute_q_lm(l, m)\n Q_lm = np.zeros_like(q_lm)\n for iatom in range(self.n):\n nn = len(self.neighbors[iatom])\n t = q_lm[iatom] + q_lm[self.neighbors[iatom]].sum()\n Q_lm[iatom] = t/(nn + 1)\n\n self.Q_lm_cache[(l, m)] = Q_lm\n\n return Q_lm\n\n def compute_q_l(self, l, coarse_grained=False):\n q_lm_func = self.compute_q_lm\n if coarse_grained:\n q_lm_func = self.compute_Q_lm\n\n q_l = 0\n for m in range(-l, l + 1):\n q_lm = q_lm_func(l, m)\n q_l += np.abs(q_lm) ** 2\n q_l = np.sqrt(4*np.pi/(2*l + 1)*q_l)\n\n return q_l\n\n def compute_w_l(self, l, coarse_grained=False):\n q_lm_func = self.compute_q_lm\n if coarse_grained:\n q_lm_func = self.compute_Q_lm\n\n if l not in self.wigner_3j_cache:\n w3j = _wigner_3j_values(l)\n self.wigner_3j_cache[l] = w3j\n else:\n w3j = self.wigner_3j_cache[l]\n\n w_l = 0\n for m1, m2, m3 in w3j.keys():\n q_lm1 = q_lm_func(l, m1)\n q_lm2 = q_lm_func(l, m2)\n q_lm3 = q_lm_func(l, m3)\n w = w3j[(m1, m2, m3)]\n w_l += w*q_lm1*q_lm2*q_lm3\n\n return w_l.real\n\n def compute_w_l_cap(self, l, coarse_grained=False):\n q_lm_func = self.compute_q_lm\n if coarse_grained:\n q_lm_func = self.compute_Q_lm\n\n q_l = 0\n for m in range(-l, l + 1):\n q_lm = q_lm_func(l, m)\n q_l += np.abs(q_lm)**2\n w_l = self.compute_w_l(l, coarse_grained=coarse_grained)\n w_l_cap = w_l*np.power(q_l, -3/2)\n\n return w_l_cap\n\n\nif __name__ == \"__main__\":\n from ase.build import bulk\n from voroana import voronoi_analysis\n\n a = bulk(\"Fe\")*(2, 2, 2)\n boo = BondOrientOrder(a, voronoi_analysis(a, outputs='snN'))\n print(boo.compute_q_l(4)[0])\n print(boo.compute_q_l(6)[0])\n","repo_name":"thatstar/voroana","sub_path":"voroana/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"16355702318","text":"import numpy as np\nimport torch\n\nfrom utilities import read_new_init_embs,read_transe_out_embs\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\ndef read_data():\n\n out_transE_entity_emb = './benchmarks/FB15K/out_transE_entity_embedding100.txt'\n out_transE_relation_emb = './benchmarks/FB15K/out_transE_relation_embedding100.txt'\n\n pre_train_entity_id, pre_train_rel_id = read_transe_out_embs(out_transE_entity_emb,out_transE_relation_emb)\n print(\"out_transE_entity_emb \",pre_train_entity_id.shape)\n print(\"out_transE_relation_emb \",pre_train_rel_id.shape)\n\n\n new_entity_embs_path = './benchmarks/FB15K/new_init_entity_embedding_mention_description_id0_des300.txt'\n new_rel_embs_path = './benchmarks/FB15K/new_init_relation_embedding_mention_description_id0_des300.txt'\n print(\"entity_embs \",new_entity_embs_path)\n print(\"relation_embs \",new_rel_embs_path)\n\n entity_embs, rel_embs = read_new_init_embs(new_entity_embs_path,new_rel_embs_path)\n\n print(\"entity_embs.shape \", entity_embs.shape)\n print(\"rel_embs.shape \",rel_embs.shape)\n\n print(\"=============\")\n\n\n _out_ent_embs = torch.from_numpy(pre_train_entity_id)\n _out_rel_embs = torch.from_numpy(pre_train_rel_id)\n\n\n\n init_ent_embs = torch.from_numpy(entity_embs)\n init_rel_embs = torch.from_numpy(rel_embs)\n\n entity_embedding = torch.cat([_out_ent_embs,init_ent_embs],dim=1)\n relation_embedding = torch.cat([_out_rel_embs,init_rel_embs],dim=1)\n\n # print(entity_embedding.shape)\n # print(relation_embedding.shape)\n logger.info(entity_embedding.shape)\n\ndef read_files(rank_path):\n f = open(rank_path)\n f.readline()\n\n x_obj = []\n for d in f:\n d = d.strip()\n if d:\n d = d.split('\\t')\n\n elements = []\n for n in d:\n elements.append(n.strip())\n d = elements\n x_obj.append(d)\n f.close()\n\n return np.array(x_obj,dtype=int)\n\ndef static_frequency(dic_path, data):\n d = data\n c = dict.fromkeys(d, 0)\n for x in d:\n c[x] += 1\n # sorted_x = sorted(c.items(), key=lambda d: d[1], reverse=True)\n\n sorted_x = sorted(c.items(), key=lambda d: int(d[0]), reverse=False)\n\n # write_entity_relation_frequency(path=dic_path, data=sorted_x)\n\n num = len(sorted_x)\n file = open(dic_path, 'w')\n file.writelines('%s\\n' % num)\n i = 0\n for e in sorted_x:\n file.write(str(e[0]) + '\\t' + str(e[1]) + '\\n')\n i += 1\n\n file.close()\n\n return sorted_x\n\n\nif __name__ == \"__main__\":\n\n data = read_files(\"./benchmarks/FB15K237/each_entity_num_neighbours\")\n num_nei = data[:,0].tolist()\n print(num_nei)\n print(np.mean(num_nei))\n static_frequency(\"./benchmarks/FB15K237/each_entity_num_neighbours_frequency.txt\",data[:,0].tolist())\n\n\n\n","repo_name":"MiaoHu-Pro/NDKGE","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13300013026","text":"import unittest\nimport warnings\nimport tempfile\nfrom datetime import datetime, timedelta\nfrom openmm import *\nfrom openmm.app import *\nfrom openmm.unit import *\nimport math, random\n\nclass TestIntegrators(unittest.TestCase):\n \"\"\"Test Python Integrator classes\"\"\"\n\n def testMTSIntegratorExplicit(self):\n \"\"\"Test the MTS integrator on an explicit solvent system\"\"\"\n # Create a periodic solvated system with PME\n pdb = PDBFile('systems/alanine-dipeptide-explicit.pdb')\n ff = ForceField('amber99sbildn.xml', 'tip3p.xml')\n system = ff.createSystem(pdb.topology, nonbondedMethod=PME)\n\n # Split forces into groups\n for force in system.getForces():\n if force.__class__.__name__ == 'NonbondedForce':\n force.setForceGroup(1)\n force.setReciprocalSpaceForceGroup(2)\n else:\n force.setForceGroup(0)\n\n # Create an integrator\n integrator = MTSIntegrator(4*femtoseconds, [(2,1), (1,2), (0,8)])\n\n # Run a few steps of dynamics\n context = Context(system, integrator)\n context.setPositions(pdb.positions)\n integrator.step(10)\n\n # Ensure energy is well-behaved.\n state = context.getState(getEnergy=True)\n if not (state.getPotentialEnergy() / kilojoules_per_mole < 0.0):\n raise Exception('Potential energy of alanine dipeptide system with MTS integrator is blowing up: %s' % str(state.getPotentialEnergy()))\n\n def testMTSIntegratorConstraints(self):\n \"\"\"Test the MTS integrator energy conservation on a system of constrained particles with no inner force (just constraints)\"\"\"\n\n # Create a constrained test system\n numParticles = 8\n numConstraints = 5\n system = System()\n force = NonbondedForce()\n for i in range(numParticles):\n system.addParticle(5.0 if i%2==0 else 10.0)\n force.addParticle((0.2 if i%2==0 else -0.2), 0.5, 5.0);\n system.addConstraint(0, 1, 1.0);\n system.addConstraint(1, 2, 1.0);\n system.addConstraint(2, 3, 1.0);\n system.addConstraint(4, 5, 1.0);\n system.addConstraint(6, 7, 1.0);\n system.addForce(force)\n\n # Create integrator where inner timestep just evaluates constraints\n integrator = MTSIntegrator(1*femtoseconds, [(1,1), (0,4)])\n integrator.setConstraintTolerance(1e-5);\n\n positions = [ (i/2., (i+1)/2., 0.) for i in range(numParticles) ]\n velocities = [ (random.random()-0.5, random.random()-0.5, random.random()-0.5) for i in range(numParticles) ]\n\n # Create Context\n platform = Platform.getPlatformByName('Reference')\n context = Context(system, integrator, platform)\n context.setPositions(positions)\n context.setVelocities(velocities)\n context.applyConstraints(1e-5)\n\n # Simulate it and see whether the constraints remain satisfied.\n CONSTRAINT_RELATIVE_TOLERANCE = 1.e-4 # relative constraint violation tolerance\n ENERGY_RELATIVE_TOLERANCE = 1.e-2 # relative energy violation tolerance\n for i in range(1000):\n state = context.getState(getPositions=True, getEnergy=True)\n positions = state.getPositions()\n for j in range(numConstraints):\n [particle1, particle2, constraint_distance] = system.getConstraintParameters(j)\n current_distance = 0.0 * nanometers**2\n for k in range(3):\n current_distance += (positions[particle1][k] - positions[particle2][k])**2\n current_distance = sqrt(current_distance)\n # Fail test if outside of relative tolerance\n relative_violation = (current_distance - constraint_distance) / constraint_distance\n if (relative_violation > CONSTRAINT_RELATIVE_TOLERANCE):\n raise Exception('Constrained distance is violated by relative tolerance of %f (constraint %s actual %s)' % (relative_violation, str(constraint_distance), str(current_distance)))\n # Check total energy\n total_energy = state.getPotentialEnergy() + state.getKineticEnergy()\n if (i == 1):\n initial_energy = total_energy\n elif (i > 1):\n relative_violation = abs((total_energy - initial_energy) / initial_energy)\n if (relative_violation > ENERGY_RELATIVE_TOLERANCE):\n raise Exception('Total energy is violated by relative tolerance of %f on step %d (initial %s final %s)' % (relative_violation, i, str(initial_energy), str(total_energy)))\n # Take a step\n integrator.step(1)\n\n def testBadGroups(self):\n \"\"\"Test the MTS integrator with bad force group substeps.\"\"\"\n # Create a periodic solvated system with PME\n pdb = PDBFile('systems/alanine-dipeptide-explicit.pdb')\n ff = ForceField('amber99sbildn.xml', 'tip3p.xml')\n system = ff.createSystem(pdb.topology, nonbondedMethod=PME)\n\n # Split forces into groups\n for force in system.getForces():\n if force.__class__.__name__ == 'NonbondedForce':\n force.setForceGroup(1)\n force.setReciprocalSpaceForceGroup(2)\n else:\n force.setForceGroup(0)\n\n with self.assertRaises(ValueError):\n # Create an integrator\n integrator = MTSIntegrator(4*femtoseconds, [(2,1), (1,3), (0,8)])\n\n # Run a few steps of dynamics\n context = Context(system, integrator)\n context.setPositions(pdb.positions)\n integrator.step(10)\n\n def testMTSLangevinIntegrator(self):\n \"\"\"Test the MTSLangevinIntegrator on an explicit solvent system\"\"\"\n # Create a periodic solvated system with PME\n pdb = PDBFile('systems/alanine-dipeptide-explicit.pdb')\n ff = ForceField('amber99sbildn.xml', 'tip3p.xml')\n system = ff.createSystem(pdb.topology, nonbondedMethod=PME)\n\n # Split forces into groups\n for force in system.getForces():\n if force.__class__.__name__ == 'NonbondedForce':\n force.setForceGroup(1)\n force.setReciprocalSpaceForceGroup(2)\n else:\n force.setForceGroup(0)\n\n # Create an integrator\n integrator = MTSLangevinIntegrator(300*kelvin, 5/picosecond, 4*femtoseconds, [(2,1), (1,2), (0,4)])\n\n # Run some equilibration.\n context = Context(system, integrator)\n context.setPositions(pdb.positions)\n context.setVelocitiesToTemperature(300*kelvin)\n integrator.step(500)\n\n # See if the temperature is correct.\n totalEnergy = 0*kilojoules_per_mole\n steps = 50\n for i in range(steps):\n integrator.step(10)\n totalEnergy += context.getState(getEnergy=True).getKineticEnergy()\n averageEnergy = totalEnergy/steps\n dofs = 3*system.getNumParticles() - system.getNumConstraints() - 3\n temperature = averageEnergy*2/(dofs*MOLAR_GAS_CONSTANT_R)\n self.assertTrue(290*kelvin < temperature < 310*kelvin)\n\n def testMTSLangevinIntegratorFriction(self):\n \"\"\"Test the MTSLangevinIntegrator on a force-free particle to ensure friction is properly accounted for (issue #3790)\"\"\"\n # Create a System with a single particle and no forces\n system = System()\n system.addParticle(12.0*amu)\n platform = Platform.getPlatformByName('Reference')\n initial_positions = [Vec3(0,0,0)]\n initial_velocities = [Vec3(1,0,0)]\n nsteps = 125 # number of steps to take\n collision_rate = 1/picosecond\n timestep = 4*femtoseconds\n\n def get_final_velocities(nsubsteps):\n \"\"\"Get the final velocity vector after a fixed number of steps for the specified number of substeps\"\"\"\n integrator = MTSLangevinIntegrator(0*kelvin, collision_rate, timestep, [(0,nsubsteps)])\n context = Context(system, integrator, platform)\n context.setPositions(initial_positions)\n context.setVelocities(initial_velocities)\n integrator.step(nsteps)\n final_velocities = context.getState(getVelocities=True).getVelocities()\n del context, integrator\n return final_velocities\n\n # Compare sub-stepped MTS with single-step MTS\n for nsubsteps in range(2,6):\n mts_velocities = get_final_velocities(nsubsteps)\n self.assertAlmostEqual(math.exp(-timestep*nsteps*collision_rate), mts_velocities[0].x)\n self.assertAlmostEqual(0, mts_velocities[0].y)\n self.assertAlmostEqual(0, mts_velocities[0].z)\n\n def testNoseHooverIntegrator(self):\n \"\"\"Test partial thermostating in the NoseHooverIntegrator (only API)\"\"\"\n pdb = PDBFile('systems/alanine-dipeptide-explicit.pdb')\n ff = ForceField('amber99sbildn.xml', 'tip3p.xml')\n system = ff.createSystem(pdb.topology, nonbondedMethod=PME)\n\n integrator = NoseHooverIntegrator(1.0*femtosecond)\n integrator.addSubsystemThermostat(list(range(5)), [], 200*kelvin, 1/picosecond, 200*kelvin, 1/picosecond, 3,3,3)\n con = Context(system, integrator)\n con.setPositions(pdb.positions)\n\n integrator.step(5)\n self.assertNotEqual(integrator.computeHeatBathEnergy(), 0.0*kilojoule_per_mole)\n\n def testDrudeNoseHooverIntegrator(self):\n \"\"\"Test the DrudeNoseHooverIntegrator\"\"\"\n warnings.filterwarnings('ignore', category=CharmmPSFWarning)\n psf = CharmmPsfFile('systems/ala3_solv_drude.psf')\n crd = CharmmCrdFile('systems/ala3_solv_drude.crd')\n params = CharmmParameterSet('systems/toppar_drude_master_protein_2013e.str')\n # Box dimensions (cubic box)\n psf.setBox(33.2*angstroms, 33.2*angstroms, 33.2*angstroms)\n\n system = psf.createSystem(params, nonbondedMethod=PME, ewaldErrorTolerance=0.0005)\n integrator = DrudeNoseHooverIntegrator(300*kelvin, 1.0/picosecond, 1*kelvin, 10/picosecond, 0.001*picoseconds)\n con = Context(system, integrator)\n con.setPositions(crd.positions)\n\n integrator.step(5)\n self.assertNotEqual(integrator.computeHeatBathEnergy(), 0.0*kilojoule_per_mole)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"openmm/openmm","sub_path":"wrappers/python/tests/TestIntegrators.py","file_name":"TestIntegrators.py","file_ext":"py","file_size_in_byte":10314,"program_lang":"python","lang":"en","doc_type":"code","stars":1274,"dataset":"github-code","pt":"78"} +{"seq_id":"70065463612","text":"import math\nimport torch\nimport torch.nn as nn\n\nclass RelativePositionEmbedding(nn.Module):\n \"\"\" Relative Position Embedding\n https://arxiv.org/abs/1910.10683\n https://github.com/bojone/bert4keras/blob/db236eac110a67a587df7660f6a1337d5b2ef07e/bert4keras/layers.py#L663\n https://github.com/huggingface/transformers/blob/master/src/transformers/models/t5/modeling_t5.py#L344\n \"\"\"\n def __init__(self, heads_num, bidirectional = True, num_buckets = 32, max_distance = 128):\n super(RelativePositionEmbedding, self).__init__()\n self.num_buckets = num_buckets\n self.bidirectional = bidirectional\n self.max_distance = max_distance\n self.relative_attention_bias = nn.Embedding(self.num_buckets, heads_num)\n\n def forward(self, encoder_hidden, decoder_hidden):\n \"\"\"\n Compute binned relative position bias\n Args:\n encoder_hidden: [batch_size x seq_length x emb_size]\n decoder_hidden: [batch_size x seq_length x emb_size]\n Returns:\n position_bias: [1 x heads_num x seq_length x seq_length]\n \"\"\"\n query_length = encoder_hidden.size()[1]\n key_length = decoder_hidden.size()[1]\n\n context_position = torch.arange(query_length, dtype=torch.long)[:, None]\n memory_position = torch.arange(key_length, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position # shape (query_length, key_length)\n relative_position_bucket = self.relative_position_bucket(\n relative_position, # shape (query_length, key_length)\n bidirectional=self.bidirectional,\n num_buckets=self.num_buckets,\n max_distance=self.max_distance\n )\n relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)\n values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)\n values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)\n return values\n\n def relative_position_bucket(self, relative_position, bidirectional, num_buckets, max_distance):\n \"\"\"\n Adapted from Mesh Tensorflow:\n https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593\n Translate relative position to a bucket number for relative attention. The relative position is defined as\n memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for\n small absolute relative_position and larger buckets for larger absolute relative_positions. All relative\n positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.\n This should allow for more graceful generalization to longer sequences than the model has been trained on\n Args:\n relative_position: an int32 Tensor\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n Returns:\n a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)\n \"\"\"\n relative_buckets = 0\n if bidirectional:\n num_buckets //= 2\n relative_buckets += (relative_position > 0).to(torch.long) * num_buckets\n relative_position = torch.abs(relative_position)\n else:\n relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))\n # now relative_position is in the range [0, inf)\n\n # half of the buckets are for exact increments in positions\n max_exact = num_buckets // 2\n is_small = relative_position < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance\n relative_postion_if_large = max_exact + (\n torch.log(relative_position.float() / max_exact)\n / math.log(max_distance / max_exact)\n * (num_buckets - max_exact)\n ).to(torch.long)\n relative_postion_if_large = torch.min(\n relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)\n )\n\n relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large)\n return relative_buckets\n","repo_name":"dbiir/UER-py","sub_path":"uer/layers/relative_position_embedding.py","file_name":"relative_position_embedding.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":2802,"dataset":"github-code","pt":"78"} +{"seq_id":"16063191785","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\n\r\n#from functools import total_ordering\r\n\r\nimport colorama\r\nfrom colorama import Fore\r\nfrom colorama import Back\r\n\r\nclass Vertex:\r\n def __init__(self, node):\r\n self.id = node\r\n self.adjacent = {}\r\n # Set distance to infinity for all nodes\r\n self.distance = sys.maxsize\r\n # Mark all nodes unvisited \r\n self.visited = False \r\n # Predecessor\r\n self.previous = None\r\n\r\n\r\n def add_neighbor(self, neighbor, weight=0):\r\n self.adjacent[neighbor] = weight\r\n\r\n\r\n def get_connections(self):\r\n return self.adjacent.keys() \r\n\r\n\r\n def get_id(self):\r\n return self.id\r\n\r\n\r\n def get_weight(self, neighbor):\r\n return self.adjacent[neighbor]\r\n\r\n\r\n def set_distance(self, dist):\r\n self.distance = dist\r\n\r\n\r\n def get_distance(self):\r\n return self.distance\r\n\r\n\r\n def set_previous(self, prev):\r\n self.previous = prev\r\n\r\n\r\n def set_visited(self):\r\n self.visited = True\r\n\r\n\r\n def __str__(self):\r\n return str(self.id) + ' adjacent: ' + str([x.id for x in self.adjacent])\r\n \r\n def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.distance == other.distance\r\n return NotImplemented\r\n\r\n def __lt__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.distance < other.distance\r\n return NotImplemented\r\n def __hash__(self):\r\n return id(self)\r\n\r\n\r\nclass Graph:\r\n def __init__(self):\r\n self.vert_dict = {}\r\n self.num_vertices = 0\r\n\r\n\r\n def __iter__(self):\r\n return iter(self.vert_dict.values())\r\n\r\n\r\n def add_vertex(self, node):\r\n self.num_vertices = self.num_vertices + 1\r\n new_vertex = Vertex(node)\r\n self.vert_dict[node] = new_vertex\r\n return new_vertex\r\n\r\n\r\n def get_vertex(self, n):\r\n if n in self.vert_dict:\r\n return self.vert_dict[n]\r\n else:\r\n return None\r\n\r\n\r\n def add_edge(self, frm, to, cost = 0):\r\n if frm not in self.vert_dict:\r\n self.add_vertex(frm)\r\n if to not in self.vert_dict:\r\n self.add_vertex(to)\r\n\r\n\r\n self.vert_dict[frm].add_neighbor(self.vert_dict[to], cost)\r\n self.vert_dict[to].add_neighbor(self.vert_dict[frm], cost)\r\n\r\n\r\n def get_vertices(self):\r\n return self.vert_dict.keys()\r\n\r\n\r\n def set_previous(self, current):\r\n self.previous = current\r\n\r\n\r\n def get_previous(self, current):\r\n return self.previous\r\n\r\n\r\ndef shortest(v, path):\r\n ''' make shortest path from v.previous'''\r\n if v.previous:\r\n path.append(v.previous.get_id())\r\n shortest(v.previous, path)\r\n return\r\n\r\n\r\n\r\n\r\nimport heapq\r\n\r\n\r\ndef dijkstra(Graph, start):\r\n print (\"Dijkstra's shortest path\")\r\n \r\n # Set the distance for the start node to zero \r\n start.set_distance(0)\r\n\r\n\r\n # Put tuple pair into the priority queue\r\n unvisited_queue = [(v.get_distance(),v) for v in Graph]\r\n heapq.heapify(unvisited_queue)\r\n\r\n\r\n while len(unvisited_queue):\r\n # Pops a vertex with the smallest distance \r\n uv = heapq.heappop(unvisited_queue)\r\n currentnode = uv[1]\r\n currentnode.set_visited()\r\n\r\n\r\n #for next in v.adjacent:\r\n for next in currentnode.adjacent:\r\n # if visited, skip\r\n if next.visited:\r\n continue\r\n new_dist = currentnode.get_distance() + currentnode.get_weight(next)\r\n \r\n if new_dist < next.get_distance():\r\n next.set_distance(new_dist)\r\n next.set_previous(currentnode)\r\n print (Fore.BLUE + 'currentnode = %s nextnode = %s new_dist = %s' \\\r\n %(currentnode.get_id(), next.get_id(), next.get_distance()))\r\n \r\n else:\r\n print (Fore.RED + 'ignore : currentnode = %s nextnode = %s new_dist = %s' \\\r\n %(currentnode.get_id(), next.get_id(), next.get_distance()))\r\n \r\n\r\n\r\n # Rebuild heap\r\n # 1. Pop every item\r\n while len(unvisited_queue):\r\n heapq.heappop(unvisited_queue)\r\n # 2. Put all vertices not visited into the queue\r\n unvisited_queue = [(v.get_distance(),v) for v in Graph if not v.visited]\r\n heapq.heapify(unvisited_queue)\r\n \r\nif __name__ == '__main__':\r\n\r\n # Adding vertex\r\n g = Graph()\r\n\r\n g.add_vertex('a')\r\n g.add_vertex('b')\r\n g.add_vertex('c')\r\n g.add_vertex('d')\r\n g.add_vertex('e')\r\n g.add_vertex('f')\r\n g.add_vertex('g')\r\n g.add_vertex('h')\r\n g.add_vertex('i')\r\n g.add_vertex('j')\r\n g.add_vertex('k')\r\n g.add_vertex('l')\r\n g.add_vertex('m')\r\n g.add_vertex('n')\r\n g.add_vertex('o')\r\n g.add_vertex('p')\r\n g.add_vertex('q')\r\n g.add_vertex('r')\r\n g.add_vertex('s')\r\n g.add_vertex('t')\r\n\r\n\r\n# Creating edges\r\n \r\n g.add_edge('a', 'b', 10) \r\n g.add_edge('a', 'f', 13)\r\n g.add_edge('b', 'g', 7)\r\n g.add_edge('b', 'c', 17)\r\n g.add_edge('c', 'd', 7)\r\n g.add_edge('c', 'h', 3)\r\n g.add_edge('d', 'e', 14)\r\n g.add_edge('d', 'i', 9)\r\n g.add_edge('e', 'j', 13)\r\n g.add_edge('f', 'g', 6) \r\n g.add_edge('f', 'k', 15)\r\n g.add_edge('g', 'h', 5)\r\n g.add_edge('g', 'l', 10)\r\n g.add_edge('h', 'i', 18)\r\n g.add_edge('h', 'm', 7)\r\n g.add_edge('i', 'j', 4)\r\n g.add_edge('i', 'n', 2)\r\n g.add_edge('j', 'o', 4)\r\n g.add_edge('k', 'l', 5) \r\n g.add_edge('k', 'p', 17)\r\n g.add_edge('l', 'm', 8)\r\n g.add_edge('l', 'q', 13)\r\n g.add_edge('m', 'n', 7)\r\n g.add_edge('m', 'r', 9)\r\n g.add_edge('n', 'o', 20)\r\n g.add_edge('n', 's', 14)\r\n g.add_edge('o', 't', 7)\r\n g.add_edge('p', 'q', 14) \r\n g.add_edge('q', 'r', 11)\r\n g.add_edge('r', 's', 9)\r\n g.add_edge('s', 't', 12)\r\n\r\n# Print graph data that was created\r\n \r\n print ('Graph data:')\r\n for v in g:\r\n for w in v.get_connections():\r\n vid = v.get_id()\r\n wid = w.get_id()\r\n print (' %s , %s, %3d ' % ( vid, wid, v.get_weight(w)))\r\n\r\n# Calling dijkstra function to calculate shortest path from Source A to T\r\n \r\n dijkstra(g, g.get_vertex('a')) \r\n\r\n# Setting the target\r\n target = g.get_vertex('t')\r\n \r\n # Save the path\r\n path = [target.get_id()]\r\n shortest(target, path)\r\n \r\n # printing the shortest path \r\n print (Fore.GREEN + 'The shortest path : %s' %(path[::-1]))\r\n \r\n'''\r\nwe have used system specific parameters and functions, and also imported the functools for\r\n total ordering.\r\nThe comparison operators (<, <=, >, >=, == and !=) can be overloaded by providing definition to\r\n __lt__, __hash_, __eq__ methods, these are used to compare distance between objects.\r\n\r\nRepresented the graph using vertex(each rows and columns represent a vertex in graph).\r\nIn this implementation I have created two classes graph g, which has the list of \r\nvertex and vertices and class vertex which uses dictionary in order to implement adjacent list.\r\nUsing add neighbour I addressed connection from one vertex to another.\r\nUsed hashable objects inorder to support custom comparisions and also used heap queue in order to\r\n represent a priority queue to push, pop and maintain the heap structure.\r\n \r\nAlgorithm steps:\r\n\r\n1.Mark all unvisited vertices\r\n2.Set the distance to zero for our initial node and to infinity for other nodes.\r\n3.Select the unvisited node with the smallest distance, which makes it as current node.\r\n4.Find unvisited neighbors for the current node and calculate their distances through the current node.\r\n5.Compare the newly calculated distance to the assigned and save the smaller value.\r\n6.Mark the current node as visited and remove it from the unvisited set.\r\n7.Stop, if the destination node has been visited \r\nand calculates the shortest path.\r\n\r\n'''\r\n \r\n\r\n\r\n \r\n\r\n \r\n","repo_name":"shravani0585/wildcat","sub_path":"DIJKSTRA_ALGO.py","file_name":"DIJKSTRA_ALGO.py","file_ext":"py","file_size_in_byte":7966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26011583815","text":"from collections import Counter\nfrom time import time\n\nstime = time()\n\ndef oddElim(L1):\n n = 1\n if len(L1) == 1:\n return L1[0]\n while len(L1) > 1:\n if n == 1:\n L1[0:len(L1):2] = [0]*len(range(0,len(L1),2))\n L1 = list(filter(None, L1))\n n *= -1\n elif n == -1:\n L1[-1:-(len(L1)+1):-2] = [0]*len(range(-1,-(len(L1)+1),-2))\n L1 = list(filter(None, L1))\n n *= -1\n return(L1[0])\n \nL2 = []\n \nfor x in range(1,1001):\n L1 = [k for k in range(1,x+1)]\n L2.append(oddElim(L1))\n\nL3 = list(set(L2))\nL3.sort()\n\nL4 = {}\n\nfor x in L3:\n L4[x] = 0\n\nfor x in L2:\n L4[x] += 1\n \nprint(time()-stime)\n\n#for x in sorted(L4):\n# print(x,L4[x])\n\nprint(sum(L2))\n","repo_name":"arashrai/Project-Euler","sub_path":"PEuler539inProgress.py","file_name":"PEuler539inProgress.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41062930057","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nimport findScore\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Blackjack Detector\")\r\n\r\n\r\nmain = ttk.Frame(root, padding=(30, 15))\r\nmain.grid()\r\n\r\nstart_button = ttk.Button(main, text='Start', command=findScore.app)\r\nstart_button.grid(column=1, row=1)\r\n\r\nstart_button = ttk.Button(main, text='WEBCAM', command=findScore.app_webcam)\r\nstart_button.grid(column=0, row=1)\r\n\r\nroot.mainloop()\r\n","repo_name":"ramirez09r/OpenCV_Project","sub_path":"Presentation_ready_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39299532842","text":"#!/usr/bin/env python3\ngems = [\n \"ruby\",\n \"sapphire\",\n \"zircon\",\n \"garnet\",\n \"jade\",\n \"jasper\",\n \"spinel\",\n \"topaz\",\n \"agate\"\n]\n\nrecipe = \"{{\\n \\\"type\\\": \\\"minecraft:crafting_shapeless\\\",\\n \\\"ingredients\\\": [\\n {{\\n \\\"item\\\": \\\"minecraft:stone\\\"\\n }},\\n {{\\n \\\"item\\\": \\\"frivycat:{g}\\\"\\n }},\\n {{\\n \\\"item\\\": \\\"frivycat:{g}\\\"\\n }}\\n ],\\n \\\"result\\\": {{\\n \\\"item\\\": \\\"frivycat:{g}_ore\\\",\\n \\\"count\\\": 1\\n }},\\n \\\"group\\\": \\\"ores\\\"\\n}}\"\n\nfor gem in gems:\n name = gem + \"_ore.json\"\n content = recipe.format(g = gem);\n file = open(name, \"w\")\n file.write(content)\n file.close()\n\nprint(\"Done\")","repo_name":"ChaosDogG/FrivyCat-Inc-Mod","sub_path":"scripts/gen_gem_ore_recipe.py","file_name":"gen_gem_ore_recipe.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"20455789624","text":"\"\"\"\nFile: add2.py\nName:\n------------------------\nTODO:\n\"\"\"\n\nimport sys\n\n\nclass ListNode:\n def __init__(self, data=0, pointer=None):\n self.val = data\n self.next = pointer\n\n\ndef add_2_numbers(l1: ListNode, l2: ListNode) -> ListNode:\n cur_1 = l1\n cur_2 = l2\n total = cur_1.val + cur_2.val\n next_add_one = False\n if total >= 10:\n total -= 10\n next_add_one = True\n l3 = ListNode(total, None)\n cur_3 = l3\n while cur_1.next is not None or cur_2.next is not None:\n total = 0\n if next_add_one:\n total += 1\n next_add_one = False\n if cur_1.next is not None:\n cur_1 = cur_1.next\n total += cur_1.val\n if cur_2.next is not None:\n cur_2 = cur_2.next\n total += cur_2.val\n\n if total >= 10:\n total = total - 10\n next_add_one = True\n cur_3.next = ListNode(total, None)\n cur_3 = cur_3.next\n if next_add_one:\n cur_3.val = 1\n return l3\n\n####### DO NOT EDIT CODE BELOW THIS LINE ########\n\n\ndef traversal(head):\n \"\"\"\n :param head: ListNode, the first node to a linked list\n -------------------------------------------\n This function prints out the linked list starting with head\n \"\"\"\n cur = head\n while cur.next is not None:\n print(cur.val, end='->')\n cur = cur.next\n print(cur.val)\n\n\ndef main():\n args = sys.argv[1:]\n if not args:\n print('Error: Please type\"python3 add2.py test1\"')\n else:\n if args[0] == 'test1':\n l1 = ListNode(2, None)\n l1.next = ListNode(4, None)\n l1.next.next = ListNode(3, None)\n l2 = ListNode(5, None)\n l2.next = ListNode(6, None)\n l2.next.next = ListNode(4, None)\n ans = add_2_numbers(l1, l2)\n print('---------test1---------')\n print('l1: ', end='')\n traversal(l1)\n print('l2: ', end='')\n traversal(l2)\n print('ans: ', end='')\n traversal(ans)\n print('-----------------------')\n elif args[0] == 'test2':\n l1 = ListNode(9, None)\n l1.next = ListNode(9, None)\n l1.next.next = ListNode(9, None)\n l1.next.next.next = ListNode(9, None)\n l1.next.next.next.next = ListNode(9, None)\n l1.next.next.next.next.next = ListNode(9, None)\n l1.next.next.next.next.next.next = ListNode(9, None)\n l2 = ListNode(9, None)\n l2.next = ListNode(9, None)\n l2.next.next = ListNode(9, None)\n l2.next.next.next = ListNode(9, None)\n ans = add_2_numbers(l1, l2)\n print('---------test2---------')\n print('l1: ', end='')\n traversal(l1)\n print('l2: ', end='')\n traversal(l2)\n print('ans: ', end='')\n traversal(ans)\n print('-----------------------')\n elif args[0] == 'test3':\n l1 = ListNode(0, None)\n l2 = ListNode(0, None)\n ans = add_2_numbers(l1, l2)\n print('---------test3---------')\n print('l1: ', end='')\n traversal(l1)\n print('l2: ', end='')\n traversal(l2)\n print('ans: ', end='')\n traversal(ans)\n print('-----------------------')\n else:\n print('Error: Please type\"python3 add2.py test1\"')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chengti-wang/stanCode-Projects","sub_path":"StanCode Projects/boggle_game_solver/add2.py","file_name":"add2.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43016502212","text":"\nimport networkx as nx\nimport xml.etree.ElementTree as xml\nimport requests\nimport json\n\n\nclass UndirectedGraph:\n def __init__(self, path):\n self.graph = osm_xml_parser(path)\n\n\nREQUESTS_PER_CALL = 1500\n\n\ndef osm_xml_parser(file, from_file=False):\n \"\"\"Function to parse an osm file and create a network out of it.\n\n Parameters:\n filename - The filename of the file to import.\n Returns:\n graph - The created graph.\n \"\"\"\n\n # Parse the xml structure and initialize variables.\n if from_file:\n e = xml.parse(file).getroot()\n else:\n e = xml.fromstring(file)\n node_dict_tmp = {}\n graph = nx.Graph()\n\n # Allow these types of streets to be represented in the network by an edge.\n way_types = [\"footway\", \"residential\", \"living_street\", \"track\", \"road\", \"bridleway\", \"steps\", \"path\", \"cycleway\"]\n\n # Create nodes and edges.\n for i in e:\n # Nodes.\n if i.tag == \"node\":\n node_dict_tmp[i.attrib[\"id\"]] = [i.attrib[\"lat\"], i.attrib[\"lon\"]]\n\n # Edges.\n if i.tag == \"way\":\n insert = False\n directed = False\n max_speed_v = None\n way_tmp = []\n for j in i:\n if j.tag == \"nd\":\n way_tmp.append(j.attrib[\"ref\"])\n if j.tag == \"tag\":\n if j.attrib[\"k\"] == \"highway\":# and j.attrib[\"v\"] in way_types:\n insert = True\n if insert:\n if max_speed_v is None:\n graph.add_path(way_tmp)\n # if not directed:\n # graph.add_path(list(reversed(way_tmp)))\n else:\n graph.add_path(way_tmp, max_speed=max_speed_v)\n if not directed:\n graph.add_path(list(reversed(way_tmp)), max_speed=max_speed_v)\n\n # Extend the nodes by their geographical coordinates.\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}\n elevations = {}\n prev_limit = 0\n print(\"Total nodes {}\".format(len(node_dict_tmp.keys())))\n for index in range(1, int(len(node_dict_tmp.keys()) / REQUESTS_PER_CALL) + 2):\n limit = index * REQUESTS_PER_CALL\n if limit > len(node_dict_tmp.keys()):\n limit = prev_limit + len(node_dict_tmp.keys())\n keys = list(node_dict_tmp.keys())\n partial_keys = keys[prev_limit: limit]\n prev_limit = limit\n data = json.dumps({\"locations\": [{\"longitude\": float(node_dict_tmp[i][1]), \"latitude\": float(node_dict_tmp[i][0])} for i in partial_keys]})\n res = requests.post(\"https://api.open-elevation.com/api/v1/lookup\", data=data, headers=headers)\n if not res.ok:\n print(\"Error getting elevations: {}\".format(res))\n exit()\n else:\n print(\"Successfully obtained elevations\")\n elevations_json = json.loads(res.content.decode(\"utf-8\"))[\"results\"]\n for i, key in enumerate(partial_keys):\n elevations[key] = elevations_json[i]\n network_nodes = graph.nodes()\n for i in network_nodes:\n graph.node[i][\"coords\"] = [elevations[i][\"latitude\"], elevations[i][\"longitude\"]]\n graph.node[i][\"elevation\"] = elevations[i][\"elevation\"]\n graph.node[i][\"key\"] = i\n\n # Return the generated graph.\n return graph\n\n\nif __name__ == \"__main__\":\n # Specify the path to the OSM-XML file and call the parser.\n path_to_file = \"nufar.osm\"\n graph = osm_xml_parser(path_to_file, from_file=True)\n nx.write_gpickle(graph, 'graph-nofar.txt')\n","repo_name":"akrabio/BestElevationPath","sub_path":"osm_parser.py","file_name":"osm_parser.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"223664793","text":"from PyQt5.QtCore import (QCoreApplication, QRect, QMetaObject, Qt)\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QPixmap\nimport backend\nimport sys\nfrom exeptions import *\nfrom os import remove\nfrom glob import glob\n\n\nclass HistogramWindow(QWidget):\n\n def __init__(self, picture, parent=None):\n super(HistogramWindow, self).__init__(parent)\n self.setWindowTitle('Histogram')\n self.setWindowIcon(QIcon('statistics.png'))\n label = QLabel(self)\n picture = QPixmap(picture)\n label.setPixmap(picture)\n self.resize(picture.width(), picture.height())\n\n def close_event(self, event):\n event.ignore()\n\n\nclass MainWindow(object):\n\n def __init__(self, main_window):\n\n if main_window.objectName():\n main_window.setObjectName('main_window')\n main_window.resize(400, 625)\n\n main_window.setWindowIcon(QIcon('statistics.png'))\n\n self.central_widget = QWidget(main_window)\n self.central_widget.setObjectName('central_widget')\n main_window.setCentralWidget(self.central_widget)\n\n self.menu_bar = QMenuBar(main_window)\n self.menu_bar.setObjectName('menu_bar')\n main_window.setMenuBar(self.menu_bar)\n\n self.status_bar = QStatusBar(main_window)\n self.status_bar.setObjectName('status_bar')\n main_window.setStatusBar(self.status_bar)\n\n self.container = QWidget(self.central_widget)\n self.container.setObjectName('grid_layout')\n self.container.setGeometry(QRect(20, 30, 360, 390))\n\n self.average = QPushButton(self.container)\n self.average.setObjectName('average')\n self.average.setGeometry(QRect(0, 200, 170, 25))\n\n self.upper_quartile = QPushButton(self.container)\n self.upper_quartile.setObjectName('upper_quartile')\n self.upper_quartile.setGeometry(QRect(0, 235, 170, 25))\n\n self.lower_quartile = QPushButton(self.container)\n self.lower_quartile.setObjectName('lower_quartile')\n self.lower_quartile.setGeometry(QRect(0, 305, 170, 25))\n\n self.dispersion = QPushButton(self.container)\n self.dispersion.setObjectName('dispersion')\n self.dispersion.setGeometry(QRect(0, 340, 170, 25))\n\n self.kurtosis = QPushButton(self.container)\n self.kurtosis.setObjectName('kurtosis')\n self.kurtosis.setGeometry(QRect(190, 200, 170, 25))\n\n self.skewness = QPushButton(self.container)\n self.skewness.setObjectName('skewness')\n self.skewness.setGeometry(QRect(190, 235, 170, 25))\n\n self.median_quartile = QPushButton(self.container)\n self.median_quartile.setObjectName('median_quartile')\n self.median_quartile.setGeometry(QRect(0, 270, 140, 25))\n\n self.show_histogram_button = QPushButton(self.container)\n self.show_histogram_button.setObjectName('show_histogram_button')\n self.show_histogram_button.setGeometry(QRect(150, 265, 60, 35))\n\n self.max = QPushButton(self.container)\n self.max.setObjectName('max')\n self.max.setGeometry(QRect(220, 270, 140, 25))\n\n self.min = QPushButton(self.container)\n self.min.setObjectName('min')\n self.min.setGeometry(QRect(190, 305, 170, 25))\n\n self.extent = QPushButton(self.container)\n self.extent.setObjectName('extent')\n self.extent.setGeometry(QRect(190, 340, 170, 25))\n\n self.upload_button = QPushButton(self.central_widget)\n self.upload_button.setObjectName('upload_button')\n self.upload_button.setGeometry(QRect(20, 405, 360, 35))\n\n self.input = QLineEdit(self.central_widget)\n self.input.setObjectName('input')\n self.input.setGeometry(20, 450, 200, 23)\n self.input.setPlaceholderText('1.0;2.0;3.0;4.0;....;n.0;')\n\n self.add_list_button = QPushButton(self.central_widget)\n self.add_list_button.setObjectName('add_list_button')\n self.add_list_button.setGeometry(235, 450, 70, 25)\n\n self.clear_list_button = QPushButton(self.central_widget)\n self.clear_list_button.setObjectName('clear_list_button')\n self.clear_list_button.setGeometry(308, 450, 70, 25)\n\n self.last_calculations = QListWidget(self.central_widget)\n self.last_calculations.setObjectName('calculate_button')\n self.last_calculations.setGeometry(QRect(20, 485, 360, 80))\n\n self.clear_database_button = QPushButton(self.central_widget)\n self.clear_database_button.setObjectName('clear_list_button')\n self.clear_database_button.setGeometry(20, 570, 170, 25)\n\n self.remove_ls_button = QPushButton(self.central_widget)\n self.remove_ls_button.setObjectName('remove_ls_button')\n self.remove_ls_button.setGeometry(210, 570, 170, 25)\n\n self.result = QLineEdit(self.container)\n self.result.setGeometry(0, 0, 360, 160)\n self.result.setReadOnly(True)\n self.result.setStyleSheet('font-size: 18pt;')\n\n self.clear_result_button = QPushButton(self.container)\n self.clear_result_button.setObjectName('clear_result_button')\n self.clear_result_button.setGeometry(0, 165, 360, 25)\n\n self.actual_ls = []\n\n self.last_calculations_list = []\n\n self.reload_data()\n\n self.retranslate(main_window)\n\n QMetaObject.connectSlotsByName(main_window)\n\n self.last_calculations.itemClicked.connect(self.item_click)\n self.upload_button.clicked.connect(self.upload)\n self.add_list_button.clicked.connect(self.add_list)\n self.clear_list_button.clicked.connect(self.clear_list)\n self.average.clicked.connect(self.calculate_average)\n self.upper_quartile.clicked.connect(self.calculate_upper_quartile)\n self.median_quartile.clicked.connect(self.calculate_median_quartile)\n self.lower_quartile.clicked.connect(self.calculate_lower_quartile)\n self.dispersion.clicked.connect(self.calculate_dispersion)\n self.kurtosis.clicked.connect(self.calculate_kurtosis)\n self.skewness.clicked.connect(self.calculate_skewness)\n self.max.clicked.connect(self.calculate_max)\n self.min.clicked.connect(self.calculate_min)\n self.extent.clicked.connect(self.calculate_extent)\n self.clear_result_button.clicked.connect(self.clear_result)\n self.clear_database_button.clicked.connect(self.clear_database)\n self.remove_ls_button.clicked.connect(self.remove_ls_from_database)\n self.show_histogram_button.clicked.connect(self.show_histogram)\n\n def retranslate(self, main_window):\n main_window.setWindowTitle(QCoreApplication.translate('main_window', 'Statistics'))\n self.upload_button.setText(QCoreApplication.translate('main_window', 'Upload..'))\n self.add_list_button.setText(QCoreApplication.translate('main_window', 'Add'))\n self.clear_list_button.setText(QCoreApplication.translate('main_window', 'Clear'))\n self.average.setText(QCoreApplication.translate('main_window', 'Average'))\n self.upper_quartile.setText(QCoreApplication.translate('main_window', 'Upper quartile'))\n self.median_quartile.setText(QCoreApplication.translate('main_window', 'Median quartile'))\n self.lower_quartile.setText(QCoreApplication.translate('main_window', 'Lower quartile'))\n self.dispersion.setText(QCoreApplication.translate('main_window', 'Dispersion'))\n self.kurtosis.setText(QCoreApplication.translate('main_window', 'Kurtosis'))\n self.skewness.setText(QCoreApplication.translate('main_window', 'Skewness'))\n self.max.setText(QCoreApplication.translate('main_window', 'Max'))\n self.min.setText(QCoreApplication.translate('main_window', 'Min'))\n self.extent.setText(QCoreApplication.translate('main_window', 'Extent'))\n self.clear_database_button.setText(QCoreApplication.translate('main_window', 'Clear database'))\n self.remove_ls_button.setText(QCoreApplication.translate('main_window', 'Remove item'))\n self.clear_result_button.setText(QCoreApplication.translate('main_window', 'Clear'))\n self.show_histogram_button.setText(QCoreApplication.translate('main_window', 'Histogram'))\n\n def upload(self):\n url = str(QFileDialog.getOpenFileName())\n uploaded_file = url.split(\"'\")[1]\n filename_test = str(uploaded_file).split('.')[-1]\n ls = []\n if uploaded_file != '':\n file = open(uploaded_file, 'r')\n try:\n if filename_test == 'txt':\n for line in file:\n tmp = line.split(';')\n for element in tmp:\n test_element = StringToFloat(element)\n if not test_element.is_float():\n unusable_element = element\n raise ElementIsNotUsable\n else:\n ls.append(float(element))\n else:\n raise FileIsNotTxt\n if len(ls) != 0:\n new_item = backend.List(ls)\n is_in_list = False\n for element in self.last_calculations_list:\n if new_item.__str__() == element.__str__():\n is_in_list = True\n if not is_in_list:\n new_item.create_histogram()\n self.last_calculations_list.append(new_item)\n self.save_to_file()\n self.reload_data()\n else:\n self.already_exists_error()\n else:\n raise EmptyListError\n except FileIsNotTxt:\n message = QMessageBox()\n message.setWindowTitle('File is not .txt!')\n message.setText(\"The actual file is not a .txt, please try to upload a .txt file!\")\n message.setIcon(QMessageBox.Warning)\n message.exec()\n except ElementIsNotUsable:\n message = QMessageBox()\n message.setWindowTitle('Unusable item!')\n message.setText(\"One of the inserted items ({}) can't be \"\n \"used so it must to be removed!\".format(unusable_element))\n message.setIcon(QMessageBox.Warning)\n message.exec()\n except EmptyListError:\n message = QMessageBox()\n message.setWindowTitle('No usable data found!')\n message.setText(\"There weren't usable elements in the .txt file!\")\n message.setIcon(QMessageBox.Warning)\n message.exec()\n\n def add_list(self):\n input_list = self.input.text()[:-1]\n ls = []\n try:\n if input_list != '':\n input_list = input_list.split(';')\n for element in input_list:\n if element != '':\n test_element = StringToFloat(element)\n if not test_element.is_float():\n unusable_element = element\n raise ElementIsNotUsable\n else:\n ls.append(float(element))\n if len(ls) != 0:\n new_item = backend.List(ls)\n is_in_list = False\n for element in self.last_calculations_list:\n if new_item.__str__() == element.__str__():\n is_in_list = True\n if not is_in_list:\n new_item.create_histogram()\n self.last_calculations_list.append(new_item)\n self.save_to_file()\n self.reload_data()\n else:\n self.already_exists_error()\n else:\n raise EmptyListError\n else:\n raise EmptyListError\n except ElementIsNotUsable:\n message = QMessageBox()\n message.setWindowTitle('Unusable item!')\n message.setText(\"One of the inserted items ({}) can't be \"\n \"used so it must to be removed!\".format(unusable_element))\n message.setIcon(QMessageBox.Warning)\n message.exec()\n except EmptyListError:\n message = QMessageBox()\n message.setWindowTitle('No usable data found!')\n message.setText(\"There weren't usable elements in the input!\")\n message.setIcon(QMessageBox.Warning)\n message.exec()\n\n def clear_list(self):\n self.input.clear()\n self.input.setReadOnly(False)\n self.add_list_button.setCheckable(True)\n\n def clear_result(self):\n self.result.clear()\n\n def clear_database(self):\n file = open('database.txt', 'w')\n file.write('')\n file.close()\n images = glob('plots/*')\n for image in images:\n remove(image)\n self.reload_data()\n\n def show_histogram(self):\n if not self.actual_ls == []:\n picture = 'plots/' + self.actual_ls.__str__() + '.jpg'\n self.ui = HistogramWindow(picture)\n self.ui.show()\n else:\n return self.no_item_selected_error()\n\n def close_event(self, event):\n widget_list = QApplication.topLevelWidgets()\n num_windows = len(widget_list)\n if num_windows > 1:\n event.accept()\n else:\n event.ignore()\n\n def calculate_average(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n return self.no_item_selected_error()\n else:\n self.result.setText(str(data.get_avg()))\n\n def calculate_upper_quartile(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n quartiles = data.get_quartiles()\n for index in range(len(quartiles)):\n if index == 2:\n self.result.setText(str(quartiles[index]))\n\n def calculate_median_quartile(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n quartiles = data.get_quartiles()\n for index in range(len(quartiles)):\n if index == 1:\n self.result.setText(str(quartiles[index]))\n\n def calculate_lower_quartile(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n quartiles = data.get_quartiles()\n for index in range(len(quartiles)):\n if index == 0:\n self.result.setText(str(quartiles[index]))\n\n def calculate_dispersion(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n self.result.setText(str(data.get_dispersion()))\n\n def calculate_kurtosis(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n self.result.setText(str(data.get_kurtosis()))\n\n def calculate_skewness(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n self.result.setText(str(data.get_skewness()))\n\n def calculate_max(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n self.result.setText(str(data.get_max()))\n\n def calculate_min(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n self.result.setText(str(data.get_min()))\n\n def calculate_extent(self):\n try:\n data = self.actual_ls\n if not isinstance(data, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n self.result.setText(str(data.get_extent()))\n\n def save_to_file(self):\n file = open('database.txt', 'w')\n last_calculations = self.last_calculations_list\n for calculation in last_calculations:\n file.write(calculation.__str__())\n file.write('\\n')\n file.close()\n\n def reload_data(self):\n file = open('database.txt', 'r')\n self.last_calculations_list.clear()\n self.last_calculations.clear()\n for line in file:\n ls = []\n data = line.split(';')[:-1]\n for element in data:\n if element != '':\n ls.append(float(element))\n if len(ls) != 0:\n list_element = backend.List(ls)\n self.last_calculations_list.append(list_element)\n for element in self.last_calculations_list:\n self.last_calculations.addItem(QListWidgetItem(element.__str__()))\n\n def item_click(self, item):\n tmp = item.text()\n for element in self.last_calculations_list:\n if tmp == element.__str__():\n self.actual_ls = element\n self.input.setText(element.__str__())\n\n def remove_ls_from_database(self):\n try:\n if not isinstance(self.actual_ls, backend.List):\n raise UnselectedItemError\n except UnselectedItemError:\n self.no_item_selected_error()\n else:\n for element in self.last_calculations_list:\n if element.__str__() == self.actual_ls.__str__():\n self.last_calculations_list.remove(self.actual_ls)\n remove('plots/' + element.__str__() + '.jpg')\n self.save_to_file()\n self.reload_data()\n self.actual_ls = []\n\n @staticmethod\n def no_item_selected_error():\n message = QMessageBox()\n message.setWindowTitle('Select a list!')\n message.setText(\"No list have been chosen by you! Please select one!\")\n message.setIcon(QMessageBox.Warning)\n message.exec()\n\n @staticmethod\n def already_exists_error():\n message = QMessageBox()\n message.setWindowTitle('List already exists!')\n message.setText(\"This list already have been added to your database.\\n\"\n \"There are few things you can do:\\n\"\n \" - your can recall it by clicking on it\\n\"\n \" - you can clear this input and try to add a new one.\")\n message.setIcon(QMessageBox.Warning)\n message.exec()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = QMainWindow()\n ui = MainWindow(window)\n window.show()\n sys.exit(app.exec_())\n","repo_name":"zsoltardai/python_graphical_development","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39232420092","text":"def speed(a):\n if a<70:\n print(\"ok\")\n else:\n i=70\n count=0\n while i12:\n return \"License suspended\" \n print(point)\n print(\"he can drive\")\nx=int(input(\"enter the number\"))\nprint(speed(x)) \n","repo_name":"Nehajha99/Python","sub_path":"function/que6.py","file_name":"que6.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18216273235","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.generics import GenericAPIView\nfrom ..permissions import IsAuthenticated\n\nfrom ..app_settings import (\n CreateAPIKeySerializer,\n UpdateAPIKeySerializer,\n DeleteAPIKeySerializer,\n)\nfrom ..models import (\n API_Key\n)\nfrom ..authentication import TokenAuthentication\n\nclass APIKeyView(GenericAPIView):\n\n \"\"\"\n Check the REST Token and returns a list of all api_keys or the specified api_keys details\n \"\"\"\n\n authentication_classes = (TokenAuthentication, )\n permission_classes = (IsAuthenticated,)\n allowed_methods = ('GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'HEAD')\n\n\n def get(self, request, api_key_id = None, *args, **kwargs):\n \"\"\"\n Returns either a list of all api_keys with own access privileges or the members specified api_key\n \n :param request:\n :type request:\n :param api_key_id:\n :type api_key_id:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return: 200 / 403\n :rtype:\n \"\"\"\n\n if not api_key_id:\n\n api_keys = []\n\n for api_key in API_Key.objects.filter(user=request.user):\n api_keys.append({\n 'id': api_key.id,\n 'title': api_key.title,\n 'read': api_key.read,\n 'write': api_key.write,\n 'restrict_to_secrets': api_key.restrict_to_secrets,\n 'allow_insecure_access': api_key.allow_insecure_access,\n 'active': api_key.active,\n })\n\n return Response({'api_keys': api_keys},\n status=status.HTTP_200_OK)\n else:\n # Returns the specified api_key if the user has any rights for it\n try:\n api_key = API_Key.objects.get(id=api_key_id, user=request.user)\n except API_Key.DoesNotExist:\n return Response({\"message\":\"NO_PERMISSION_OR_NOT_EXIST\",\n \"resource_id\": api_key_id}, status=status.HTTP_400_BAD_REQUEST)\n\n response = {\n 'id': api_key.id,\n 'title': api_key.title,\n 'public_key': api_key.public_key,\n 'private_key': api_key.private_key,\n 'private_key_nonce': api_key.private_key_nonce,\n 'secret_key': api_key.secret_key,\n 'secret_key_nonce': api_key.secret_key_nonce,\n 'read': api_key.read,\n 'write': api_key.write,\n 'restrict_to_secrets': api_key.restrict_to_secrets,\n 'allow_insecure_access': api_key.allow_insecure_access,\n 'active': api_key.active,\n }\n\n\n return Response(response,\n status=status.HTTP_200_OK)\n\n def put(self, request, *args, **kwargs):\n \"\"\"\n Creates an api_key\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return: 201 / 400\n :rtype:\n \"\"\"\n\n serializer = CreateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n api_key = API_Key.objects.create(\n user = request.user,\n title = str(serializer.validated_data.get('title')),\n public_key = str(serializer.validated_data.get('public_key')),\n private_key = str(serializer.validated_data.get('private_key')),\n private_key_nonce = str(serializer.validated_data.get('private_key_nonce')),\n secret_key = str(serializer.validated_data.get('secret_key')),\n secret_key_nonce = str(serializer.validated_data.get('secret_key_nonce')),\n user_private_key = str(serializer.validated_data.get('user_private_key')),\n user_private_key_nonce = str(serializer.validated_data.get('user_private_key_nonce')),\n user_secret_key = str(serializer.validated_data.get('user_secret_key')),\n user_secret_key_nonce = str(serializer.validated_data.get('user_secret_key_nonce')),\n verify_key = str(serializer.validated_data.get('verify_key')),\n read = serializer.validated_data.get('read'),\n write = serializer.validated_data.get('write'),\n restrict_to_secrets = serializer.validated_data.get('restrict_to_secrets'),\n allow_insecure_access = serializer.validated_data.get('allow_insecure_access'),\n )\n\n return Response({\n \"api_key_id\": api_key.id,\n }, status=status.HTTP_201_CREATED)\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Updates a api_key\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n \"\"\"\n\n serializer = UpdateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n api_key = serializer.validated_data.get('api_key')\n title = serializer.validated_data.get('title')\n read = serializer.validated_data.get('read')\n write = serializer.validated_data.get('write')\n restrict_to_secrets = serializer.validated_data.get('restrict_to_secrets')\n allow_insecure_access = serializer.validated_data.get('allow_insecure_access')\n\n if title is not None:\n api_key.title = title\n\n if read is not None and api_key.read != read:\n api_key.read = read\n for token in api_key.tokens.all():\n token.read = read\n token.save()\n\n if write is not None and api_key.write != write:\n api_key.write = write\n for token in api_key.tokens.all():\n token.write = write\n token.save()\n\n if restrict_to_secrets is not None:\n api_key.restrict_to_secrets = restrict_to_secrets\n\n if allow_insecure_access is not None:\n api_key.allow_insecure_access = allow_insecure_access\n\n api_key.save()\n\n return Response(status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n Deletes an api_key\n\n :param request:\n :param args:\n :param kwargs:\n :return: 200 / 400\n \"\"\"\n\n serializer = DeleteAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n api_key = serializer.validated_data.get('api_key')\n\n # delete it\n api_key.delete()\n\n return Response(status=status.HTTP_200_OK)\n","repo_name":"psono/psono-server","sub_path":"psono/restapi/views/api_key.py","file_name":"api_key.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"78"} +{"seq_id":"4646387935","text":"#!/usr/bin/env python3\n# Raspberry Pi Internet Radio Class\n# $Id: test_alsa.py,v 1.1 2021/05/17 05:29:32 bob Exp $\n#\n#\n# Author : Bob Rathbone\n# Site : http://www.bobrathbone.com\n#\n# This program uses python3-alsaaudio package\n# Use \"apt-get install python3-alsaaudio\" to install the library\n# See: https://pypi.org/project/python-mpd2/\n#\n# License: GNU V3, See https://www.gnu.org/copyleft/gpl.html\n#\n# Disclaimer: Software is provided as is and absolutly no warranties are implied or given.\n# The authors shall not be liable for any loss or damage however caused.\n#\n# See https://www.programcreek.com/python/example/91453/alsaaudio.PCM\n# https://larsimmisch.github.io/pyalsaaudio/libalsaaudio.html\n\nimport pdb\nimport alsaaudio\npcms = alsaaudio.pcms()\nfor pcm in pcms:\n print(pcm)\nprint('')\ncards =alsaaudio.cards()\nfor card in cards:\n print(card)\n","repo_name":"bobrathbone/piradio6","sub_path":"test_alsa.py","file_name":"test_alsa.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"78"} +{"seq_id":"3806085670","text":"#!/usr/bin/python3\n\"\"\"\nThis module provides a function to add two integers or floats.\n\nThe function, 'add_integer(a, b=98)', takes two numeric arguments,\n'a' and 'b', and returns their sum. If either 'a' or 'b' is a float,\nit first casts them to integers before performing the addition.\n\n\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"\n Adds two integers or floats and returns the result.\n\n a and b must be first casted to integers if they are float.\n\n Args:\n a (int or float): The first number.\n b (int or float): The second number. Default is 98.\n\n Raises:\n TypeError: If either 'a' or 'b' is not an integer or float.\n\n Returns:\n int: The sum of 'a' and 'b'.\n \"\"\"\n if not isinstance(a, int) and not isinstance(a, float):\n raise TypeError(\"a must be an integer\")\n if not isinstance(b, int) and not isinstance(b, float):\n raise TypeError(\"b must be an integer\")\n\n return int(a) + int(b)\n","repo_name":"Polaris-algedi/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35028423403","text":"import pandas as pd\nfrom shutil import copyfile\nimport os\n\ndf = pd.read_csv('./train_info.csv')\nall_folders = os.listdir('./datasets/artset/')\n\nif not os.path.exists('./datasets/artset_genre'):\n os.mkdir('./datasets/artset_genre')\n\nco = 0\n\nnot_copied = []\nempty_label = []\n\nfor folder in all_folders:\n all_images = os.listdir(os.path.join('./datasets/artset', folder))\n for image in all_images:\n print(image)\n\n class_label = df[df['filename'] == image]['genre']\n \n if len(list(class_label)) == 0:\n print(\"{} has empty class label\".format(image))\n empty_label.append(image)\n continue\n \n try:\n class_label = str(list(class_label)[0])\n except:\n print(\"{} could not be copied.\".format(image))\n not_copied.append(image)\n continue\n\n if not os.path.exists(os.path.join('./datasets/artset_genre', class_label)):\n os.mkdir(os.path.join('./datasets/artset_genre', class_label))\n\n path_from = os.path.join('./datasets/artset', folder, image)\n path_to = os.path.join('./datasets/artset_genre', class_label, image)\n \n copyfile(path_from, path_to)\n print('Copy {} to {}'.format(image, path_to))\n co += 1\n\nprint('Copied {} images.'.format(co))\nprint('{} images not copied, because of empty label'.format(len(empty_label)))\nprint('Additional {} images not copied, because of unknown reason'.format(len(not_copied)))\nprint(\"list of images with empty labels:\")\nprint(empty_label)\nprint('other images that were not copied:')\nprint(not_copied)\n\n\n","repo_name":"thegialeo/DeepVisionProject","sub_path":"artset_style_to_genre.py","file_name":"artset_style_to_genre.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36365829526","text":"import numpy as np\nfrom parent import print\nfrom dezero import Variable\nfrom dezero.utils import plot_dot_graph\nfrom big_step2.step24 import goldstein\n\n\nif __name__ == '__main__':\n x = np.array(1.0)\n\n x = Variable(np.array(1), name='x = 1')\n y = Variable(np.array(1), name='y = 1')\n z = goldstein(x, y)\n z.backward()\n \n z.name = f'z = {z.data}'\n\n print(f'{goldstein.__name__}(1,1)', z)\n print('x.grad', x.grad)\n print('y.grad', y.grad, '\\n')\n \n plot_dot_graph(z, verbose=False, to_file='big_step3/graph/step26_goldstein.png')\n plot_dot_graph(z, verbose=True, to_file='big_step3/graph/step26_goldstein_verbose.png')\n","repo_name":"star14ms/Deep_Learning_3","sub_path":"big_step3/step26.py","file_name":"step26.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27855277474","text":"# https://codeforces.com/problemset/problem/706/B\n\nfrom bisect import bisect_right\nimport sys\n\ninput = sys.stdin.readline\n\nn = int(input())\nshops = list(map(int, input().split()))\nshops.sort()\nfor _ in range(int(input())):\n # Bisect library is way to easy use binary search so use bisect library\n pos = bisect_right(shops, int(input()))\n sys.stdout.write(str(pos) + \"\\n\")\n","repo_name":"thecode00/Algorithm-Problem-Solve","sub_path":"Codeforces/Python/Interesting drink/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4560525895","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport os, time\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nfrom keras.layers import Conv2D, MaxPooling2D, Input, concatenate\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam, SGD\nfrom keras import backend as K\nfrom keras.utils.generic_utils import get_custom_objects\nfrom keras.models import Model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications import VGG16\npath = '/home/ec2-user/Sanjay/projects/6_FaceDetection/Data/'\npath_cb = '/home/ec2-user/Sanjay/projects/6_FaceDetection/1_gender/'\ninput_shape = (28, 28, 3)\n\n\n# In[2]:\n\n\ndef swish(x):\n return (K.sigmoid(x) * x)\n\nget_custom_objects().update({'swish': Activation(swish)})\nnp.random.seed(2017)\n\n\n# In[3]:\n\n\ndef define_model():\n input_1 = Input(shape=input_shape, name='image')\n convolve = Conv2D(64, kernel_size=(3, 3), padding='same')(input_1)\n convolve = BatchNormalization()(convolve)\n convolve = Activation('swish')(convolve)\n convolve = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(convolve)\n convolve = Conv2D(128, kernel_size=(3, 3), padding='same')(convolve)\n convolve = BatchNormalization()(convolve)\n convolve = Activation('swish')(convolve)\n convolve = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(convolve)\n convolve = Conv2D(256, kernel_size=(3, 3), padding='same')(convolve)\n convolve = BatchNormalization()(convolve)\n convolve = Activation('swish')(convolve)\n convolve = Conv2D(256, kernel_size=(3, 3), padding='same')(convolve)\n convolve = BatchNormalization()(convolve)\n convolve = Activation('swish')(convolve)\n convolve = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(convolve)\n convolve = Conv2D(512, kernel_size=(3, 3), padding='same')(convolve)\n convolve = BatchNormalization()(convolve)\n convolve = Activation('swish')(convolve)\n convolve = Conv2D(512, kernel_size=(3, 3), padding='same')(convolve)\n convolve = BatchNormalization()(convolve)\n convolve = Activation('swish')(convolve)\n convolve = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(convolve)\n convolve = Flatten()(convolve)\n concat = Dense(512, activation='swish', kernel_initializer='he_normal')(convolve)\n concat = Dropout(0.3)(concat)\n concat = Dense(256, activation='swish', kernel_initializer='he_normal')(concat)\n concat = Dropout(0.3)(concat)\n predict = Dense(1, activation='sigmoid', kernel_initializer='he_normal')(concat)\n model = Model(inputs=input_1, output=predict)\n optimizer = Adam(lr=0.1)\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model\n\n\n# In[4]:\n\n\nmodel = define_model()\nmodel.summary()\n\n\n# In[13]:\n\n\nparams = {}\nparams['horizontal_flip'] = True\nparams['vertical_flip'] = True\nparams['zoom_range'] = 0.2\nparams['rotation_range'] = 10\n\n\n# In[14]:\n\n\ngenerator = ImageDataGenerator(**params)\ndef dataflow(image, label):\n flow_1 = generator.flow(image, label, batch_size=32,seed=2017)\n while True:\n tuple_1 = flow_1.next()\n yield tuple_1[0], tuple_1[1]\n\n\n# In[15]:\n\n\ndef callbacks(suffix):\n stop = EarlyStopping('val_loss', patience=25, mode=\"min\")\n path = path_cb + 'data/model/model_1/model_{}.hdf5'.format(suffix)\n save = ModelCheckpoint(path, save_best_only=True, save_weights_only=True)\n logger = CSVLogger(path_cb + 'data/model/model_1/logger_{}.log'.format(suffix))\n reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=0, mode='min')\n return [stop, save, reduce, logger]\n\n\n# In[16]:\n\n\ntrain_image = np.load(path_cb + 'data/train/x_train_1.npy')\ntrain_label = np.load(path_cb + 'data/train/y_train_1.npy')\ntest_image = np.load(path_cb + 'data/score/x_test_1.npy')\ntest_label = np.load(path_cb + 'data/score/y_test_1.npy')\ntrain_generator = dataflow(train_image, train_label)\ntest_generator = (test_image, test_label)\n\n\n# In[ ]:\n\n\nparams = {}\nparams['generator'] = train_generator\nparams['validation_data'] = test_generator\nparams['steps_per_epoch'] = 20\nparams['epochs'] = 5\nparams['verbose'] = 1\nparams['callbacks'] = callbacks(2)\nmodel_1 = define_model()\nmodel_1.fit_generator(**params)\nK.clear_session()\n\n","repo_name":"sanjayagra/FaceDetection","sub_path":"FaceDetection-GenderPrediction/Model/model2_dataAugment.py","file_name":"model2_dataAugment.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4264297577","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division\nimport pandas as pd\nfrom diviner import file_utils as fu, calib, ana_utils as au\nimport numpy as np\nfrom joblib import Parallel, delayed\nimport sys\nimport os\nimport logging\nimport glob\nimport argparse\n\ndef get_calib(t, c, kwargs):\n l1a = fu.L1ADataFile.from_timestr(t)\n df = fu.open_and_accumulate(l1a.fname)\n rdr2 = calib.Calibrator(df, **kwargs)\n rdr2.calibrate()\n helper = au.CalibHelper(rdr2)\n return helper.get_c_rad_molten(c, t, 'norm')\n\n\ndef process_one_timestring(tstr, path, region, kwargs):\n savename = os.path.join(path, 'tstring_'+tstr+'.h5')\n logging.info('Processing {}, savename: {}'.format(tstr, savename))\n region_now = region[region.filetimestr == tstr]\n newrad = get_calib(tstr, 9, kwargs)\n oldrad = region_now[['det','radiance']]\n oldrad = oldrad.reset_index()\n newregion = newrad.merge(oldrad, on=['index','det']).set_index('index')\n newregion.to_hdf(savename,'df')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('session_name', help='session name. determines folder'\n ' name for storage as well.')\n parser.add_argument('--do_jpl',help='do JPl calib simulation',\n action='store_true')\n parser.add_argument('--no_rad_corr', help='switch off rad_corr for calib.')\n parser.add_argument('--regions', help='determine what regions to do.',\n choices=[1,3,5], default=0, type=int)\n args = parser.parse_args()\n\n\n session_name = args.session_name\n root = os.path.join('/raid1/maye/coldregions', session_name)\n if not os.path.exists(root):\n os.mkdir(root)\n logging.basicConfig(filename='log_coldregions_'+session_name+'.log', level=logging.INFO)\n \n if args.regions == 0:\n todo = [1,3,5]\n else:\n todo = [args.regions]\n for region_no in todo:\n print(\"Processing region {}\".format(region_no))\n logging.info(\"Processing region {}\".format(region_no))\n regionstr = 'region'+str(region_no)\n regiondata = pd.read_hdf(os.path.join(root,\n '..',\n 'regions_data.h5'),\n regionstr)\n path = os.path.join(root, regionstr)\n if not os.path.exists(path):\n os.mkdir(path)\n timestrings = regiondata.filetimestr.unique()\n \n ###\n # Control here how the calibration should be run!!\n ###\n if args.do_jpl:\n kwargs = dict(do_jpl_calib=True)\n elif args.no_rad_corr:\n kwargs = dict(do_rad_corr=False)\n else:\n kwargs = dict(do_jpl_calib=False)\n \n Parallel(n_jobs=8, \n verbose=3)(delayed(process_one_timestring)(tstr,\n path,\n regiondata,\n kwargs)\n for tstr in timestrings)\n \n container = []\n tstring_files = glob.glob(os.path.join(path, 'tstring_*.h5'))\n for f in tstring_files:\n container.append(pd.read_hdf(f, 'df'))\n os.remove(f)\n df = pd.concat(container)\n df.to_hdf(os.path.join(path, regionstr+'_'+session_name+'.h5'), 'df')\n\n","repo_name":"cjtu/divcalib","sub_path":"bin/coldregions.py","file_name":"coldregions.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"33126074228","text":"import bpy\r\nimport math\r\nimport sys\r\nsys.path.append('.')\r\nimport env\r\nimport os\r\nimport shutil\r\nimport re\r\n\r\nROT_QUATER = math.pi / 2\r\n\r\ndef delete_all():\r\n for item in bpy.context.scene.objects:\r\n bpy.context.scene.objects.unlink(item)\r\n\r\n for item in bpy.data.objects:\r\n bpy.data.objects.remove(item)\r\n\r\n for item in bpy.data.meshes:\r\n bpy.data.meshes.remove(item)\r\n\r\n for item in bpy.data.materials:\r\n bpy.data.materials.remove(item)\r\n\r\ndef add_cone():\r\n bpy.ops.mesh.primitive_cone_add(\r\n vertices = 6,\r\n radius1 = 6,\r\n radius2 = 3,\r\n location = (3, 0, 0),\r\n rotation = (0.5, 0, 0)\r\n )\r\n\r\ndef add_text(text) :\r\n bpy.ops.object.text_add()\r\n ob = bpy.context.object\r\n ob.data.body = text\r\n ob.data.extrude = 0.1\r\n ob.rotation_euler[0] = ROT_QUATER\r\n ob.rotation_euler[2] = ROT_QUATER\r\n ob.data.align_x = 'CENTER'\r\n ob.data.align_y = 'CENTER'\r\n\r\n bpy.data.fonts.load(env.FONT_PATH)\r\n ob.data.font = bpy.data.fonts.get('Meiryo')\r\n # ob.data.font = bpy.data.fonts.get('HGSoeiKakupoptai')\r\n # ob.data.font = bpy.data.fonts.get('TogetogeRock-B')\r\n # ob.data.font = bpy.data.fonts.get('PopRumCute')\r\n print(bpy.data.fonts[1])\r\n\r\ndef export_object(name, path = env.OUTPUT_PATH):\r\n bpy.ops.export_scene.fbx(\r\n filepath = path + '\\\\' + name + '.fbx',\r\n version = 'BIN7400',\r\n ui_tab = 'GEOMETRY',\r\n use_mesh_modifiers = True,\r\n use_mesh_modifiers_render = True,\r\n mesh_smooth_type = 'OFF'\r\n )\r\n\r\ndef get_character(path = env.TEXT_PATH):\r\n file = open(path, 'r', encoding='utf-8')\r\n line = file.read()\r\n string = line.split()\r\n character = []\r\n for s in string:\r\n for c in s:\r\n character.append(c)\r\n return character\r\n\r\n# def read_timetag(path = env.TEXT_PATH):\r\n# file = open(path, 'r', encoding='utf-8')\r\n# line = file.read()\r\n# string = line.split()\r\n# print(re.split('[\\[\\]]', string[1]))\r\n# # character = []\r\n# # for s in string:\r\n# # for c in s:\r\n# # character.append(c)\r\n\r\nif __name__ == \"__main__\":\r\n delete_all()\r\n # 歌詞ファイルから文字配列を取得\r\n character = get_character()\r\n # read_timetag()\r\n\r\n if os.path.exists(env.OUTPUT_PATH):\r\n shutil.rmtree(env.OUTPUT_PATH)\r\n os.mkdir(env.OUTPUT_PATH)\r\n\r\n for c in character:\r\n add_text(c)\r\n export_object(str(ord(c)))\r\n delete_all()","repo_name":"jagpotato/blender_py","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27254573574","text":"import scapy.all as scapy\nimport sys\nimport argparse\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--target\", dest=\"target\", help=\"Target IP / IP range\")\n options = parser.parse_args()\n return options\n\ndef scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst='ff:ff:ff:ff:ff:ff')\n arp_request_broadcast = broadcast/arp_request\n #arp_request_broadcast.show()\n answered_list = scapy.srp(arp_request_broadcast,\n timeout=1, verbose=False)[0]\n #print(answered.summary())\n client_list = []\n for element in answered_list:\n target_ip = element[1].psrc\n target_mac = element[1].hwsrc\n client_list.append({\"ip\": target_ip, \"mac\": target_mac})\n #print_result(client_list)\n return client_list\n\n\ndef print_result(result_list):\n print(\"IP\\t\\t\\t\\tMAC\\n----------------------------------------------\")\n for client in result_list:\n print(client[\"ip\"] + \"\\t\\t\" + client[\"mac\"])\n\noptions = get_arguments()\nscan_result = scan(options.target)\nprint_result(scan_result)\n\n#scan(\"192.168.2.173/24\")\n","repo_name":"shadowsax/NetworkOpen","sub_path":"network_scanner.py","file_name":"network_scanner.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14294134810","text":"import unittest\nfrom panda import Panda\nfrom socialnetwork import PandaSocialNetwork\n\n\nclass TestPandaSocialNetwork(unittest.TestCase):\n\n def setUp(self):\n self.network = PandaSocialNetwork()\n self.panda1 = Panda(name='panda1', email='eee@abv.bg', gender='male')\n self.panda2 = Panda(name='panda2', email='ggg@abv.bg', gender='female')\n self.panda3 = Panda(name='panda3', email='ppp@abv.bg', gender='female')\n\n def test_get_pandas_return_1_panda_if_1_panda_is_added(self):\n panda = Panda(name='Jane', email='test@gmail.com',\n gender='female')\n\n self.network.add_panda(panda)\n self.assertEqual([panda], self.network.get_pandas())\n\n def test_make_friends(self):\n self.network.make_friends(self.panda1, self.panda2)\n\n self.assertEqual(2, len(self.network.get_pandas()))\n self.assertTrue(self.network.are_friends(self.panda1, self.panda2))\n self.assertTrue(self.network.are_friends(self.panda2, self.panda1))\n\n def test_connection_level_for_network_with_4_connected_pandas(self):\n panda1 = Panda('one', 'one@abv.bg', 'male')\n panda2 = Panda('two', 'two@abv.bg', 'male')\n panda3 = Panda('three', 'three@abv.bg', 'male')\n panda4 = Panda('four', 'four@abv.bg', 'female')\n\n self.network.make_friends(panda1, panda2)\n self.network.make_friends(panda2, panda3)\n self.network.make_friends(panda3, panda4)\n\n self.assertEqual(self.network.connection_level(panda1, panda3),\n (2, [panda1, panda2, panda3]))\n\n def test_if_two_pandas_are_friends(self):\n self.network.make_friends(self.panda1, self.panda2)\n self.assertTrue(self.network.are_friends(self.panda1, self.panda2))\n\n def test_friends_of_panda(self):\n self.network.make_friends(self.panda1, self.panda2)\n self.network.make_friends(self.panda1, self.panda3)\n self.assertEqual(self.network.friends_of_panda(self.panda1),\n set([self.panda2, self.panda3]))\n\n def test_if_two_pandas_are_connected_somehow(self):\n self.network.make_friends(self.panda1, self.panda2)\n self.assertEqual(self.network.connection_level(self.panda1,self.panda2),\n (1, [self.panda1, self.panda2]))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"plamenlazarov/hackbulgaria","sub_path":"week07/PandaSocialNetwork/test_panda_social_network.py","file_name":"test_panda_social_network.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36493480862","text":"import os\nimport json\nimport requests\n\n\ndef delete_index(\n endpoint,\n awsauth,\n index_name,\n headers=json.loads(os.getenv('Headers', '{\"Content-Type\": \"application/json\"}').strip())\n):\n '''\n\n delete specified index\n\n '''\n\n try:\n r = requests.delete(\n '{}/{}'.format(endpoint, index_name),\n auth=awsauth,\n headers=headers\n )\n\n if r.ok:\n print('Notice: {} index deleted'.format(index_name))\n return True\n\n print('Notice (delete_index): for {} returned {}'.format(\n index_name,\n r.status_code\n ))\n\n except Exception as e:\n print('Error (delete_index): {}'.format(e))\n return False\n\n return False\n\n\ndef delete_document(\n endpoint,\n awsauth,\n index_name,\n index_range,\n headers=json.loads(os.getenv('Headers', '{\"Content-Type\": \"application/json\"}').strip())\n):\n '''\n\n delete index documents satisfying specified range\n\n @index_range, object with the following structure\n\n {\n \"timestamp\": {\n \"lte\": \"now-5d\"\n }\n }\n\n '''\n\n if index_name and index_range:\n path = '{}/_delete_by_query'.format(index_name)\n payload = { 'query': { 'range': index_range } }\n\n else:\n print('Error (delete_document): path and payload not configured')\n return False\n\n try:\n r = requests.post(\n '{}/{}'.format(endpoint, path),\n auth=awsauth,\n json=payload,\n headers=headers\n )\n\n if r.ok:\n print('Notice: documents in {} deleted satisfying {}'.format(\n index_name,\n index_range\n ))\n return True\n\n print('Notice (delete_document): for {} returned {}'.format(\n index_name,\n r.status_code\n ))\n\n except Exception as e:\n print('Error (delete_document): {}'.format(e))\n return False\n\n return False\n","repo_name":"jeff1evesque/opensearch-customization","sub_path":"delete_configuration.py","file_name":"delete_configuration.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"26623026127","text":"#https://www.hackerrank.com/challenges/swap-case/problem\ndef swap_case(s):\n character = \"\"\n for loop in s:\n if loop.isupper()==1:\n character+=loop.lower()\n else:\n character+= loop.upper()\n \n return character\n\nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)","repo_name":"alokit-vidyakar/Python_Hackerrank","sub_path":"Programs/swap_case.py","file_name":"swap_case.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15900059425","text":"import argparse\nimport sys\nsys.path.insert(0, '/n/data2/hms/dbmi/beamlab/anil/TIER_Regularized_CLIP/Modeling/')\nimport torch\nimport CLIP_Embedding\nimport MedDataHelpers\nimport utils\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn import metrics\nimport pickle\nrerun = False\nprint(\"CUDA Available: \" + str(torch.cuda.is_available()))\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\nmodel_to_eval = '/n/data2/hms/dbmi/beamlab/anil/Med_ImageText_Embedding/models/clip_regularized/exp1/'\nnum_models = 2\nsub = 'all'\npicklename = model_to_eval + 'padchest_preds_' + str(num_models) + '_' + sub + '.pickle'\naucsname = model_to_eval + 'padchest_aucs_' + str(num_models) + '_' + sub + '.pickle'\n\nif not os.path.isfile(picklename) or rerun:\n clip_models = CLIP_Embedding.getCLIPModel(modelpath=model_to_eval, num_models=num_models, eval=True)\n if num_models == 1:\n clip_models = [clip_models]\n dat = MedDataHelpers.getDatasets(source = 'padchest', subset = [sub], filters = [])\n print(dat.__len__())\n dl = MedDataHelpers.getLoaders(dat, zeroworkers=True, shuffle=False)\n alldat = dl[sub]\n all_preds, all_targs, names = utils.getPadPredictions(alldat, clip_models) #gets all predictions for each target in logit form\n for k, n in enumerate(names):\n print(n, all_targs[:, k].sum())\n predinfo = {'all_preds': all_preds, 'all_targs':all_targs, 'names':names}\n with open(picklename, 'wb') as handle:\n pickle.dump(predinfo, handle, protocol=pickle.HIGHEST_PROTOCOL)\nelse:\n with open(picklename, 'rb') as handle:\n predinfo = pickle.load(handle)\n all_preds = predinfo['all_preds']\n all_targs = predinfo['all_targs']\n names = predinfo['names']\n\nprint(all_preds.shape, all_targs.shape, len(names))\naucs, fprs, tprs, thresholds = {}, {}, {}, {}\nfor i, h in enumerate(names):\n targs = all_targs[:, i]\n tpreds = all_preds[:, i]\n fprs[h], tprs[h], thresholds[h] = metrics.roc_curve(targs, tpreds)\n aucs[h] = np.round(metrics.auc(fprs[h], tprs[h]), 5)\n\nall = np.array([aucs[h] for h in names])\nprint(all)\naucs['Avg'] = np.mean(all)\nprint(\"Avg\", np.round(aucs['Avg'], 3))\nfor i, h in enumerate(names):\n print(h, np.round(aucs[h], 3))\n\nwith open(aucsname, 'wb') as handle:\n pickle.dump([aucs, names], handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n","repo_name":"apalepu13/TIER_Regularized_CLIP","sub_path":"Evaluation/eval_padchest.py","file_name":"eval_padchest.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"43758490352","text":"#Given a sorted array of integer arr and an integer target, find index of first and last position of target in arr\n#Otherwise, target can't be found in arr return [-1, -1]\n\n\n# First solution : Brute force\ndef first_and_last_position(arr, target):\n startIdx = -1\n endIdx = -1\n i = 0\n while i < len(arr) :\n if startIdx == -1 and arr[i] == target:\n startIdx = i\n\n if startIdx != -1 and arr[i] == target :\n endIdx = i\n break\n i += 1\n\n if startIdx == -1 :\n return [-1, -1]\n return [startIdx, endIdx]\n\n# Second solution : Binary solution\ndef start_index_helper(arr, target):\n if arr[0] == target :\n return 0\n left, right = 0, len(arr)-1\n while left <= right :\n mid = (left + right) // 2\n if arr[mid] == target and arr[mid-1] < target :\n return mid\n elif arr[mid] < target :\n left = mid + 1\n else :\n right = mid - 1\n return -1\n\ndef end_index_helper(arr, target):\n if arr[-1] == target :\n return len(arr) -1\n\n left, right = 0, len(arr)-1\n while left <= right :\n mid = (left + right) // 2\n if arr[mid] == target and arr[mid+1] > target :\n return mid\n elif arr[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n return -1\n\ndef first_and_last_position_2(arr, target) :\n if len (arr) == 0 or arr[0] > target or arr[-1] < target :\n return [-1, -1]\n return [start_index_helper(arr, target) , end_index_helper(arr, target)]\n\n\nif __name__ == \"__main__\":\n test_arr = [1, 2, 3, 3, 3, 3, 5]\n print(first_and_last_position([1, 2, 3, 3, 3, 3, 5], 3))\n print(first_and_last_position_2(test_arr, 3))","repo_name":"leolo0626/effective_python","sub_path":"algorithm/first_and_last_position.py","file_name":"first_and_last_position.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4742369831","text":"name = input(\"Username: \")\npas = input(\"Password: \")\nde = input(\"Designation: \")\norg = input(\"Organisation: \")\nif name == \"shiba_tatsuya\":\n if pas == \"yotsuba\":\n if de == \"SL\":\n if org == \"FLT\":\n print(\"Hello Shiba Tatsuya\")\n\nelif de == \"CM\":\n if org == \"STARS\":\n print(\"Hello Angie Sirius\")\n\nelif de == \"SL\":\n if org == \"SIRIUS\":\n print(\"Hello Shiba Miyuku\")\n\nelif de == \"FG\":\n if org == \"SCFB\":\n print(\"Hello Sudeeksha Yellanki\")\n\nelse:\n print(\"Welcome \"+name)\n","repo_name":"dynamight72/CS50-PythonXJS","sub_path":"Python/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5975508933","text":"import os\nfrom os import listdir\nfrom os.path import isfile, join\nimport tensorflow as tf\nimport time\nimport ipdb\n\nfrom domains.witness import WitnessState\nfrom search.bfs_levin import BFSLevin\nfrom models.model_wrapper import KerasManager, KerasModel\nfrom concurrent.futures.process import ProcessPoolExecutor\nfrom bootstrap import Bootstrap\nfrom bootstrap_no_debug_data import Bootstrap_No_Debug\nfrom parameter_parser import parameter_parser\ntf.debugging.set_log_device_placement (True)\nos.environ['PYTHONHASHSEED'] = str(1)\n\n\ndef search(states, planner, nn_model, ncpus, time_limit_seconds, search_budget=-1):\n \"\"\"\n This function runs (best-first) Levin tree search with a learned policy on a set of problems\n \"\"\"\n total_expanded = 0\n total_generated = 0\n total_cost = 0\n slack_time = 600\n solutions = {}\n\n for name, state in states.items ():\n state.reset ()\n solutions[name] = (-1, -1, -1, -1)\n start_time = time.time ()\n\n while len (states) > 0:\n with ProcessPoolExecutor (max_workers=ncpus) as executor:\n args = ((state, name, nn_model, search_budget, start_time, time_limit_seconds, slack_time) for name, state\n in states.items ())\n results = executor.map (planner.search, args)\n for result in results:\n solution_depth = result[0]\n expanded = result[1]\n generated = result[2]\n running_time = result[3]\n puzzle_name = result[4]\n\n if solution_depth > 0:\n solutions[puzzle_name] = (solution_depth, expanded, generated, running_time)\n del states[puzzle_name]\n\n if solution_depth > 0:\n total_expanded += expanded\n total_generated += generated\n total_cost += solution_depth\n\n partial_time = time.time ()\n if partial_time - start_time + slack_time > time_limit_seconds or len (states) == 0 or search_budget >= 1000000:\n for name, data in solutions.items ():\n print (\"{:s}, {:d}, {:d}, {:d}, {:.2f}\".format (name, data[0], data[1], data[2], data[3]))\n return\n\n search_budget *= 2\n\n\ndef main():\n \"\"\"\n It is possible to use this system to either train a new neural network model through the bootstrap system and\n Levin tree search (LTS) algorithm, or to use a trained neural network with LTS.\n \"\"\"\n parameters = parameter_parser()\n states = {}\n if parameters.problem_domain == 'Witness':\n puzzle_files = [f for f in listdir (parameters.problems_folder) if\n isfile (join (parameters.problems_folder, f))]\n for file in puzzle_files:\n if '.' in file:\n continue\n s = WitnessState ()\n s.read_state (join (parameters.problems_folder, file))\n states[file] = s\n print ('Loaded ', len (states), ' instances')\n\n KerasManager.register ('KerasModel', KerasModel)\n ncpus = int (os.environ.get ('SLURM_CPUS_PER_TASK', default=1))\n k_expansions = 1 #32\n\n with KerasManager () as manager:\n nn_model = manager.KerasModel ()\n bootstrap = None\n if parameters.learning_mode:\n if not parameters.load_debug_data:\n print(\"not going to compute cosine/dot-prod measurements\")\n bootstrap = Bootstrap_No_Debug (states, parameters.model_name,\n ncpus=ncpus,\n initial_budget=int (parameters.search_budget),\n gradient_steps=int (parameters.gradient_steps),\n k_expansions=k_expansions)\n\n else:\n print(\"will compute cosine/dot-prod measurements\")\n bootstrap = Bootstrap (states, parameters.model_name,\n ncpus=ncpus,\n initial_budget=int (parameters.search_budget),\n gradient_steps=int (parameters.gradient_steps),\n params_diff=parameters.params_diff)\n\n if parameters.search_algorithm == 'Levin' or parameters.search_algorithm == 'LevinStar':\n if parameters.search_algorithm == 'Levin':\n bfs_planner = BFSLevin (parameters.use_heuristic, parameters.use_learned_heuristic, False, k_expansions,\n float (parameters.mix_epsilon))\n else:\n bfs_planner = BFSLevin (parameters.use_heuristic, parameters.use_learned_heuristic, True, k_expansions,\n float (parameters.mix_epsilon))\n\n if parameters.use_learned_heuristic:\n nn_model.initialize (parameters.loss_function, parameters.search_algorithm, two_headed_model=True)\n else:\n nn_model.initialize (parameters.loss_function, parameters.search_algorithm, two_headed_model=False)\n\n if parameters.learning_mode:\n solve_problems_start_time = time.time()\n print(\"gonna call solve_problems\")\n bootstrap.solve_problems (bfs_planner, nn_model, parameters)\n solve_problems_end_time = time.time()\n print(\"time to execute entire prog =\", solve_problems_end_time - solve_problems_start_time)\n elif parameters.blind_search:\n search (states, bfs_planner, nn_model, ncpus, int (parameters.time_limit),\n int (parameters.search_budget))\n # else:\n # nn_model.load_weights(join('trained_models_large', 'BreadthFS_' + parameters.model_name, 'model_weights'))\n # search (states, bfs_planner, nn_model, ncpus, int (parameters.time_limit),\n # int (parameters.search_budget))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"daveloui/curriculum_learner_LTS","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"40560489102","text":"#\r\n# checkAccess.py\r\n# Created by: Dmitry Chulkov\r\n#\r\n\r\nimport datetime\r\n\r\n# make our access list\r\nglobal trustedPeople\r\ntrustedPeople = { 'dmitry' : '08:25 - 22:00', \r\n 'anastasia' : 'full'\r\n }\r\n\r\n# function checks access permission to refrigerator for person\r\n# this function returns dictionaty with status of access in the form\r\n# status = { 'trustedPerson': True/False,\r\n# 'access' : True/False }\r\ndef check(personName):\r\n global trustedPeople\r\n\r\n # create dictionaty to return\r\n status = {}\r\n \r\n # return false immediately if person not in our list\r\n if personName not in trustedPeople:\r\n status['trustedPerson'] = False\r\n status['access'] = False\r\n return status\r\n else:\r\n status['trustedPerson'] = True\r\n\r\n # get access status for person\r\n access = trustedPeople[personName]\r\n\r\n # if person has full access return true immediately\r\n if access == 'full':\r\n status['access'] = True\r\n return status\r\n\r\n # get current time\r\n now = datetime.datetime.now()\r\n # create objects for comparin with current time\r\n start = now.replace(hour=int(access[0:2]), minute=int(access[3:5]))\r\n end = now.replace(hour=int(access[8:10]), minute=int(access[11:]))\r\n\r\n if now > start and now < end:\r\n status['access'] = True\r\n return status\r\n else:\r\n status['access'] = False\r\n return status\r\n \r\n \r\n \r\n","repo_name":"Jaimin7632/Home-Automation-and-Security","sub_path":"raspberry_pi/face_recognization_iot/access.py","file_name":"access.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72835591293","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 20 17:24:13 2019\r\n\r\n@author: jivitesh's PC\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndata1=[1,0.5,-7,3,9.45,5]\r\narr1= np.array(data1)\r\narr1\r\nprint(arr1.ndim)\r\nprint(np.ndim(arr1))\r\narr2=np.array(np.arange(1,10).reshape(3,3))\r\nprint(arr2.dtype)\r\narr2=arr2.astype('float64')\r\nprint(arr2.dtype)\r\narr3=np.array(np.arange(1,19).reshape(3,2,3))\r\narr2[:,0]=90\r\nprint (arr2)\r\n\r\ndf1=pd.DataFrame({\"key\":['a','b','b','b','e','f','g'],\r\n\"val\": [1,2,3,4,5,6,7]}) \r\n\r\ndf2=df=pd.DataFrame({\"key\":['a','b','b','d','f'],\r\n\"vl\": [1,2,3,4,6 ]}) \r\n\r\ndf3=pd.merge(df1,df2,how='left')\r\nprint(df3)","repo_name":"JSharma2K/PythonProgramsDataCleaningAndModelCreation","sub_path":"numPy.py","file_name":"numPy.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27215458284","text":"import cv2\nimport numpy as np\nimport win32gui, win32ui, win32con, win32api\nimport pygetwindow as gw\n\nS_X = 14\nS_Y = 469\nL_X = 500\nL_Y = 378\n\ndef grab_screen(shape=(800,450),offset=(0,0)):\n\n hwin = win32gui.GetDesktopWindow()\n\n\n width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)\n height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)\n left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)+offset[0]\n top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)+offset[1]\n if shape:\n width = shape[0]\n height = shape[1]\n\n \n\n\n hwindc = win32gui.GetWindowDC(hwin)\n srcdc = win32ui.CreateDCFromHandle(hwindc)\n memdc = srcdc.CreateCompatibleDC()\n bmp = win32ui.CreateBitmap()\n bmp.CreateCompatibleBitmap(srcdc, width, height)\n memdc.SelectObject(bmp)\n memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)\n \n signedIntsArray = bmp.GetBitmapBits(True)\n img = np.fromstring(signedIntsArray, dtype='uint8')\n img.shape = (height,width,4)\n\n srcdc.DeleteDC()\n memdc.DeleteDC()\n win32gui.ReleaseDC(hwin, hwindc)\n win32gui.DeleteObject(bmp.GetHandle())\n\n return cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)\n\ndef setWindow(name=\"DeSmuME 0.9.11 x64\",x=-1920,y=0):\n win = gw.getWindowsWithTitle(name)\n if len(win) == 0:\n print(\"NO WINDOW ! EXITING\")\n return\n tm = win[0]\n tm.activate()\n\n tm.moveTo(x,y)","repo_name":"HugoTLR/Fun","sub_path":"PkmnCasino/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3137782134","text":"from __future__ import annotations\nimport time\nimport logging\nimport asyncio\n\nlogger = logging.getLogger(\"corkus.ratelimit\")\n\nclass RateLimiter:\n def __init__(self) -> None:\n self._total = 180\n self._remaining = 180\n self._reset = 0\n\n @property\n def total(self) -> int:\n return self._total\n\n @property\n def remaining(self) -> int:\n if self.reset < 0:\n return self.total\n else:\n return self._remaining\n\n @property\n def reset(self) -> int:\n if self._reset == 0:\n return 0\n else:\n return self._reset - int(time.time())\n\n async def limit(self) -> None:\n if self.remaining <= 1:\n logger.info(f\"You are being ratelimited, waiting for {self.reset}s\")\n await asyncio.sleep(self.reset)\n\n def update(self, headers: dict) -> None:\n self._total = int(headers.get(\"ratelimit-limit\", 180))\n self._remaining = int(headers.get(\"ratelimit-remaining\", 180))\n self._reset = int(time.time()) + int(headers.get(\"ratelimit-reset\", 0))\n","repo_name":"MrBartusek/corkus.py","sub_path":"corkus/utils/ratelimit.py","file_name":"ratelimit.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"39318951950","text":"#! /usr/bin/env python3\n\nguilds = {}\n\nwith open(\"${funguild.baseName}\",'w') as handle:\n with open(\"$funguild\") as f:\n for line in f:\n if not line.startswith('SequenceID'):\n line = line.split('\\\\t')\n try:\n guilds[line[5]] = guilds[line[5]] + 1\n except:\n guilds[line[5]] = 1\n\n for k in guilds.keys():\n if k == \"-\":\n print(\"\\\\t\".join(['Unassigned', str(guilds[k])]), file=handle)\n else:\n print(\"\\\\t\".join([k, str(guilds[k])]), file=handle)\n","repo_name":"maxemil/Biodiversity-pipeline","sub_path":"templates/parse_funguild.py","file_name":"parse_funguild.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71306937211","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport os\n\nfrom configuration import Configuration\nfrom itunes_library import ITunesLibrary\nfrom m3u_storage import M3UStorage\nfrom synchronisation import LibrarySynchronisation\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"conf_file\", help=\"Configuration file .json\")\n options = parser.parse_args()\n\n configuration = Configuration(options.conf_file)\n\n library = ITunesLibrary(configuration.itunes_media_path, configuration.itunes_library,\n configuration.itunes_media_db_path)\n storage = M3UStorage(configuration.storage_copy_media_path, configuration.storage_copy_playlist_path,\n configuration.relative_paths, configuration.storage_m3u_media_path, configuration.ignore_folders,\n configuration.storage_path_separator)\n connector = LibrarySynchronisation(library, storage)\n\n for song in library.songs.values():\n if song.location and not song.location.startswith(library.media_path):\n raise Exception(\"library internal path does not seem to be right: %s\" % library.songs[\n library.songs.keys()[0]].location)\n\n all_playlists = set(library.play_lists)\n\n g = lambda x: {library.get_play_lists_by_full_name(x)}\n s = lambda x: set(library.find_play_lists_by_full_name(x))\n\n include = set([])\n for i in configuration.include:\n if i.startswith(\"get_\"):\n include.update(g(i[4:]))\n elif i.startswith(\"search_\"):\n include.update(s(i[7:]))\n elif i == \"all\":\n include.update(all_playlists)\n else:\n raise Exception(\"Missconfiguration: objects in EXCLUDE must start with 'get' or 'search'\")\n\n exclude = set([])\n for i in configuration.exclude:\n if i.startswith(\"get\"):\n exclude.update(g(i[4:]))\n elif i.startswith(\"search\"):\n exclude.update(s(i[7:]))\n else:\n raise Exception(\"Missconfiguration: objects in EXCLUDE must start with 'get' or 'search'\")\n\n connector.add(include - exclude)\n connector.sync(True, configuration.cleanup_media_copy_storage)\n","repo_name":"har0ke/iTunesToM3ULibrary","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38555220528","text":"import lo_glrt\nfrom prn import *\nimport torch\nfrom score_functions import Score_Gaussian\n\n\ndef get_detectors(detector_arch, checkpoint_detector=None, **kwargs):\n if detector_arch.startswith('gaussian'):\n detector_arch = detector_arch.split('_')\n if detector_arch[1] == 'firstorder':\n score_function = Score_Gaussian(\n image_size=(3, kwargs['image_size'], kwargs['image_size']),\n patch_size=(kwargs['patch_size'], kwargs['patch_size']),\n init_loc=kwargs['location_generative_model'],\n train=False,\n input_patches=True,\n output_patches=True,\n preprocess=None\n )\n if detector_arch[2] == 'simple':\n detector = lo_glrt.LO_FirstOrder((3, kwargs['image_size'], kwargs['image_size']), (kwargs['patch_size'], kwargs['patch_size']), kwargs['num_classes'],\n init_perts=kwargs['location_perts_means'] + '_patchsize_{}.npz'.format(kwargs['patch_size']),\n type='simple', score_type = kwargs['score_type'], score_function=score_function)\n if detector_arch[2] == 'composite':\n detector = lo_glrt.LO_FirstOrder((3, kwargs['image_size'], kwargs['image_size']), (kwargs['patch_size'], kwargs['patch_size']), kwargs['num_classes'],\n init_perts=kwargs['location_perts_means'] + '_patchsize_{}.npz'.format(kwargs['patch_size']),\n type='composite', score_type = kwargs['score_type'], score_function=score_function)\n\n if checkpoint_detector:\n checkpoint_detector = torch.load(checkpoint_detector)\n detector.load_state_dict(checkpoint_detector['state_dict'])\n\n\n elif detector_arch.startswith('prn'):\n detector = PRN_detector(kwargs['image_size'])\n checkpoint_detector = torch.load(checkpoint_detector)\n detector.load_state_dict(checkpoint_detector)\n\n\n return detector\n","repo_name":"agoel10/locally_optimal_detector","sub_path":"get_detectors.py","file_name":"get_detectors.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8723373394","text":"import numpy as np\r\nimport sys\r\nimport scipy.stats as st\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import Lasso, Ridge\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n#Franke's function\r\n#Code taken from the project description\r\ndef FrankeFunction(x, y):\r\n term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))\r\n term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))\r\n term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))\r\n term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)\r\n return term1 + term2 + term3 + term4\r\n\r\n#Franke's function with added noise\r\ndef FrankeFunction_noise(x, y, noise):\r\n \r\n #reshape x and y if they have multiple dimensions\r\n if len(x.shape) > 1:\r\n x = np.ravel(x)\r\n y = np.ravel(y)\r\n\r\n n = len(x)\r\n term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))\r\n term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))\r\n term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))\r\n term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)\r\n return term1 + term2 + term3 - term4 + noise*np.random.randn(n) #noise term\r\n\r\n\r\n# Calculate the confidence interval of a data set with a normal distribution\r\n# mean, variance: self-explanatory\r\n# alpha: significance level (confidence level = 1 - alpha)\r\n# returns the lower and upper confidence boundaries\r\ndef confidence_interval(mean, variance, alpha):\r\n\r\n sigma = np.sqrt(variance) #standard deviation\r\n n = variance.shape[0] #degrees of freedom for the ppf function\r\n Z = st.t.ppf(1-alpha/2, n-1)\r\n \r\n lower = mean - Z*sigma\r\n upper = mean + Z*sigma\r\n return lower, upper\r\n\r\n\r\n#Calculate the estimator of an unknown variance\r\n#p: degree of the polynomial\r\n#y: actual data points\r\n#y_tilde: predicted data points\r\ndef variance_estimator(p, y, y_tilde):\r\n n = len(y)\r\n var = np.sum((y-y_tilde)**2)/(n-p-1)\r\n return var\r\n\r\n\r\n#Calculate the coefficient of determination\r\n#y: actual data points\r\n#y_tilde: modelled data points\r\ndef R2(y, y_tilde):\r\n\r\n n = len(y)\r\n y_mean = np.mean(y)\r\n\r\n SS_res = 0 #sum of squares of residuals\r\n SS_tot = 0 #total sum of squares\r\n \r\n for i in range(n):\r\n SS_res += (y[i] - y_tilde[i])**2\r\n SS_tot += (y[i] - y_mean)**2\r\n R2_val = 1 - (SS_res/SS_tot)\r\n return R2_val\r\n\r\n#Calculate the mean squared error\r\n#y: actual data points\r\n#y_tilde: modelled data points\r\ndef MSE(y, y_tilde):\r\n sum = 0\r\n n = len(y)\r\n for i in range(n):\r\n sum += (y[i] - y_tilde[i])**2\r\n mean_squared_error = sum/n\r\n return mean_squared_error\r\n\r\n#Calculate the mean error\r\n#y: actual data points\r\n#y_tilde: predicted data points\r\ndef mean_error(y, y_tilde):\r\n \r\n n = len(y)\r\n sum = 0\r\n for i in range(n):\r\n sum += np.abs((y[i]-y_tilde[i]))\r\n output = sum/n\r\n return output\r\n\r\n\r\n# Create a design matrix\r\n# degree: the degree of the polynomial\r\n# x and y: x-values and y-values\r\ndef design_matrix(degree, x, y):\r\n\r\n if len(x.shape) > 1:\r\n x = np.ravel(x)\r\n y = np.ravel(y)\r\n\r\n n = len(x)\r\n terms = int(((degree+1)*(degree+2))/2) #number of terms\r\n\r\n X = np.zeros([n, terms])\r\n X[:,0] = 1\r\n column = 1\r\n for i in range(1, degree+1):\r\n for j in range(i+1):\r\n X[:,column] = (x**j)*(y**(i-j))\r\n column += 1\r\n return X\r\n\r\n#Ordinary Least Squares on a data set\r\n#X_train: training design matrix\r\n#X_test: testing design matrix\r\n#y_train: data point sets corresponding to X_train\r\n#y_test: data point sets corresponding to X_test\r\n\r\n#y_tilde_train: approximated values corresponding to training data\r\n#y_tilde_test: approximated values corresponding to testing data\r\n#beta: coefficients of the polynomial that is the best fit\r\ndef OLS(X_train, X_test, y_train, y_test):\r\n\r\n beta = np.linalg.pinv(X_train.T @ X_train) @ X_train.T @ y_train\r\n y_tilde_train = X_train @ beta\r\n y_tilde_test = X_test @ beta\r\n\r\n return y_tilde_train, y_tilde_test, beta\r\n\r\n#Ridge regression on a data set, finds the optimal lambda\r\n#X_train: training design matrix\r\n#X_test: testing design matrix\r\n#y_train: data point sets corresponding to X_train\r\n#y_test: data point sets corresponding to X_test\r\n#lambda_candidates: potential lambda values to test\r\n#testsize: proportion of data set aside for testing\r\n\r\n#y_tilde_train: approximated values corresponding to training data\r\n#y_tilde_test: approximated values corresponding to testing data\r\n#beta: coefficients of the polynomial that is the best fit\r\n#best_lambda: the optimal lambda, i.e. the one that yields the lowest MSE\r\n#lambda_MSEs: array containing the MSEs associated with lambdas\r\n\r\ndef Ridge(X_train, X_test, y_train, y_test, lambda_candidates, testsize = 0.25):\r\n\r\n beta = np.zeros((len(lambda_candidates),X_train.shape[1]))\r\n lambda_MSEs = np.zeros(len(lambda_candidates))\r\n\r\n X_training, X_validate, y_training, y_validate = train_test_split(X_train, y_train, test_size = testsize)\r\n\r\n for i, lambda_val in enumerate(lambda_candidates):\r\n beta[i,:] = np.linalg.pinv(X_training.T @ X_training + lambda_val * np.identity((X_training.T @ X_training).shape[0])) @ X_training.T @ y_training\r\n y_tilde_validate = X_validate @ beta[i]\r\n\r\n lambda_MSEs[i] = MSE(y_validate, y_tilde_validate)\r\n\r\n best_lambda = lambda_candidates[np.argmin(lambda_MSEs)]\r\n beta = beta[np.argmin(lambda_MSEs)]\r\n\r\n y_tilde_train = X_train @ beta\r\n y_tilde_test = X_test @ beta\r\n\r\n return y_tilde_train, y_tilde_test, beta, best_lambda, lambda_MSEs\r\n\r\n#Scale design matrices using with the SKLearn StandardScaler using the first input\r\ndef scale(x_train, x_test):\r\n\r\n scaler = StandardScaler()\r\n scaler.fit(x_train)\r\n x_train_scaled = scaler.transform(x_train) \r\n x_train_scaled[:,0] = 1\r\n \r\n x_test_scaled = scaler.transform(x_test)\r\n x_test_scaled[:,0] = 1\r\n\r\n return x_train_scaled, x_test_scaled\r\n\r\n#Bootstrap resampling\r\n\r\n#X_train: training design matrix\r\n#X_test: testing design matrix\r\n#y_train: data point sets corresponding to X_train\r\n#y_test: data point sets corresponding to X_test\r\n#n: bootstrap iterations\r\n#method: linear regression method (can be OLS, Ridge or Lasso)\r\n\r\n#boot_MSE: mean value of the calculated MSE values of all bootstraps\r\n#boot_bias: mean value of the calculated bias values of all bootstraps\r\n#boot_variance: mean value of the calculated variance values of all bootstraps\r\n\r\ndef bootstrap(X_train, X_test, y_train, y_test, n, method):\r\n\r\n y_tilde_test = np.empty((y_test.shape[0], n))\r\n\r\n for i in range(n):\r\n random_indexes = np.random.randint(0, len(X_train), len(X_train))\r\n X = X_train[random_indexes]\r\n Y = y_train[random_indexes]\r\n if method == 'OLS':\r\n y_tilde_test[:,i] = OLS(X, X_test, Y, y_test)[1]\r\n\r\n elif method == 'Ridge':\r\n lambda_values = np.logspace(-3, 5, 200)\r\n y_tilde_test[:,i] = Ridge(X, X_test, Y, y_test, lambda_values)[1]\r\n\r\n elif method == 'Lasso':\r\n lambda_values = np.logspace(-10, 5, 100)\r\n MSE_test_array = np.zeros(len(lambda_values))\r\n Y_tilde_test_array = np.zeros([len(lambda_values), X_test.shape[0]])\r\n\r\n for j, lambda_val in enumerate(lambda_values):\r\n clf = Lasso(alpha=lambda_val, tol = 0.001).fit(X, Y)\r\n Y_tilde_test_array[j] = clf.predict(X_test)\r\n MSE_test_array[j] = MSE(y_test,Y_tilde_test_array[i])\r\n\r\n y_tilde_test[:,i] = Y_tilde_test_array[np.argmin(MSE_test_array)]\r\n\r\n else:\r\n print(\"Not a valid regression method\")\r\n sys.exit(0)\r\n\r\n y_test = y_test[:,np.newaxis]\r\n\r\n\r\n boot_MSE = np.mean(np.mean((y_test-y_tilde_test)**2, axis=1, keepdims=True))\r\n boot_bias = np.mean((y_test-np.mean(y_tilde_test, axis=1, keepdims=True))**2)\r\n boot_variance = np.mean(np.var(y_tilde_test, axis=1, keepdims=True))\r\n\r\n return boot_MSE, boot_bias, boot_variance\r\n\r\n#Performs cross-validation\r\n#degree: the degree of the polynomial\r\n#X: design matrix\r\n#y: corresponding data\r\n#K: number of folds to cross-validate\r\n#method: linear regression method (can be OLS, Ridge or Lasso)\r\n\r\n#MSE_mean: mean of the calculated MSE values for each fold\r\ndef cross_validation(degree, X, y, K, method):\r\n\r\n beta_len = int((2 + degree)*(1 + degree)/2)\r\n X_split = np.array(np.array_split(X, K))\r\n Y_split = np.array(np.array_split(y, K))\r\n\r\n MSEs = np.zeros(K)\r\n\r\n for i in range(K): #Run through every fold\r\n X_test = X_split[i]\r\n Y_test = Y_split[i]\r\n X_train = np.concatenate((X_split[:i], X_split[(i+1):]))\r\n Y_train = np.concatenate((Y_split[:i], Y_split[(i+1):])).ravel()\r\n X_train = X_train.reshape(-1, beta_len)\r\n X_train, X_test = scale(X_train, X_test)\r\n if method == 'OLS':\r\n y_pred = OLS(X_train, X_test, Y_train, Y_test)[1]\r\n\r\n elif method == 'Ridge':\r\n lambda_vals = np.logspace(-10, 3, 100)\r\n y_pred = Ridge(X_train,X_test,Y_train,Y_test,lambda_vals)[1]\r\n\r\n elif method == 'Lasso':\r\n lambda_vals = np.logspace(-10, 3, 100)\r\n\r\n MSE_test_array = np.zeros(len(lambda_vals))\r\n Y_tilde_test_array = np.zeros([len(lambda_vals),X_test.shape[0]])\r\n\r\n for j, lambda_val in enumerate(lambda_vals):\r\n clf = Lasso(alpha=lambda_val).fit(X_train, Y_train)\r\n Y_tilde_test_array[j] = clf.predict(X_test)\r\n MSE_test_array[j] = MSE(Y_test,Y_tilde_test_array[j])\r\n\r\n y_pred = Y_tilde_test_array[np.argmin(MSE_test_array)]\r\n\r\n else:\r\n print(\"Not a valid regression method\")\r\n sys.exit(0)\r\n\r\n MSEs[i] = MSE(Y_test, y_pred)\r\n\r\n return np.mean(MSEs)\r\n\r\n#Used for debugging, though the code for debugging has since been removed.\r\nif __name__ == '__main__':\r\n print(\"\\nYou accidentally ran the function file instead of an actual file.\")","repo_name":"karillouio/FYS-STK4155","sub_path":"Project1/Functions/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":9989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10865783544","text":"#!/usr/bin/env python\n\nimport eventlet\neventlet.monkey_patch()\n\nfrom random import randint\nfrom flask import Flask, render_template\nfrom flask_socketio import SocketIO\nfrom flask_socketio import send, emit\n\napp = Flask(__name__)\nsocket = SocketIO(app, logger=True, engineio_logger=True)\n\ndef send_tweet():\n data = {\n 'lat': randint(-180, 180),\n 'long': randint(-180, 180),\n }\n socket.emit('tweet', data)\n\ndef send_infinite_tweets():\n while True:\n send_tweet()\n eventlet.sleep(1)\n\neventlet.spawn(send_infinite_tweets)\n\nif __name__ == '__main__':\n socket.run(app)\n\n","repo_name":"aej/escapechristmas","sub_path":"client/backenddebug/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3140773414","text":"# BOJ 5373 (큐빙)\nimport sys\n\nsys.stdin = open(\"input.txt\", \"r\")\nsi = sys.stdin.readline\n\n\ndef rotate_current_face(front: int, rotate_d: str) -> list:\n \"\"\"rotate cube's current face\n\n Args:\n front (int): cube's face index what I need to rotate\n rotate_d (str): rotate direction, \"+\" is clockwise, \"-\" is counter clockwise;\n\n Returns:\n list: new - rotated current face of cube\n \"\"\"\n ret = []\n if rotate_d == \"+\": # clockwise\n for j in range(3):\n temp = []\n for i in range(2, -1, -1):\n temp.append(cube[front][i][j])\n ret.append(temp)\n elif rotate_d == \"-\": # counter-clockwise\n for j in range(2, -1, -1):\n temp = []\n for i in range(3):\n temp.append(cube[front][i][j])\n ret.append(temp)\n return ret\n\n\ndef copy(front: str) -> list:\n \"\"\"copy cube's cube when rotate that influenced by rotation\n\n Args:\n front (str): cube's face index\n\n Returns:\n list: return copied cubes\n \"\"\"\n if front == \"U\":\n # 1, 2, 3, 4번 면의 윗쪽이 영향을 받음\n d1, d2, d3, d4 = [], [], [], []\n for i in range(3):\n d1.append(cube[1][0][i])\n d2.append(cube[2][0][i])\n d3.append(cube[3][0][i])\n d4.append(cube[4][0][i])\n return d1, d2, d3, d4\n\n elif front == \"B\":\n d0, d2, d3, d5 = [], [], [], []\n for i in range(3):\n d0.append(cube[0][0][i])\n d2.append(cube[2][i][2])\n d3.append(cube[3][i][0])\n d5.append(cube[5][0][i])\n return d0, d2, d3, d5\n\n elif front == \"R\":\n d0, d1, d4, d5 = [], [], [], []\n for i in range(3):\n d0.append(cube[0][i][2])\n d1.append(cube[1][i][0])\n d4.append(cube[4][i][2])\n d5.append(cube[5][i][0])\n return d0, d1, d4, d5\n\n elif front == \"L\":\n d0, d1, d4, d5 = [], [], [], []\n for i in range(3):\n d0.append(cube[0][i][0])\n d1.append(cube[1][i][2])\n d4.append(cube[4][i][0])\n d5.append(cube[5][i][2])\n return d0, d1, d4, d5\n\n elif front == \"F\":\n d0, d2, d3, d5 = [], [], [], []\n for i in range(3):\n d0.append(cube[0][2][i])\n d2.append(cube[2][i][0])\n d3.append(cube[3][i][2])\n d5.append(cube[5][2][i])\n return d0, d2, d3, d5\n\n elif front == \"D\":\n d1, d2, d3, d4 = [], [], [], []\n for i in range(3):\n d1.append(cube[1][2][i])\n d2.append(cube[2][2][i])\n d3.append(cube[3][2][i])\n d4.append(cube[4][2][i])\n return d1, d2, d3, d4\n\n\ndef rotate(op: str):\n \"\"\"display cube's status after rotated\n\n Args:\n op (str): op is 2 word string. first word is one of the \"F\", \"B\", \"U\", \"D\", \"L\", \"R\"\n and second word is direction of rotation, \"+\", \"-\"\n \"\"\"\n if op[0] == \"U\":\n new_face = rotate_current_face(0, op[1])\n\n for i in range(3):\n for j in range(3):\n cube[0][i][j] = new_face[i][j]\n\n d1, d2, d3, d4 = copy(op[0])\n for i in range(3):\n if op[1] == \"+\": # clockwise\n cube[1][0][i] = d3[i]\n cube[2][0][i] = d1[i]\n cube[3][0][i] = d4[i]\n cube[4][0][i] = d2[i]\n else: # counter-clockwise\n cube[1][0][i] = d2[i]\n cube[2][0][i] = d4[i]\n cube[3][0][i] = d1[i]\n cube[4][0][i] = d3[i]\n\n elif op[0] == \"B\":\n new_face = rotate_current_face(1, op[1])\n\n for i in range(3):\n for j in range(3):\n cube[1][i][j] = new_face[i][j]\n\n d0, d2, d3, d5 = copy(op[0])\n for i in range(3):\n if op[1] == \"+\": # clockwise\n cube[0][0][i] = d2[i]\n cube[2][i][2] = d5[i]\n cube[3][i][0] = d0[2 - i]\n cube[5][0][i] = d3[2 - i]\n else: # counter-clockwise\n cube[0][0][i] = d3[2 - i]\n cube[2][i][2] = d0[i]\n cube[3][i][0] = d5[2 - i]\n cube[5][0][i] = d2[i]\n\n elif op[0] == \"R\":\n new_face = rotate_current_face(2, op[1])\n\n for i in range(3):\n for j in range(3):\n cube[2][i][j] = new_face[i][j]\n\n d0, d1, d4, d5 = copy(op[0])\n for i in range(3):\n if op[1] == \"+\": # clockwise\n cube[0][i][2] = d4[i]\n cube[1][i][0] = d0[2 - i]\n cube[4][i][2] = d5[2 - i]\n cube[5][i][0] = d1[i]\n else: # counter-clockwise\n cube[0][i][2] = d1[2 - i]\n cube[1][i][0] = d5[i]\n cube[4][i][2] = d0[i]\n cube[5][i][0] = d4[2 - i]\n\n elif op[0] == \"L\":\n new_face = rotate_current_face(3, op[1])\n \n for i in range(3):\n for j in range(3):\n cube[3][i][j] = new_face[i][j]\n\n d0, d1, d4, d5 = copy(op[0])\n for i in range(3):\n if op[1] == \"+\": # clockwise\n cube[0][i][0] = d1[2 - i]\n cube[1][i][2] = d5[i]\n cube[4][i][0] = d0[i]\n cube[5][i][2] = d4[2 - i]\n else: # counter-clockwise\n cube[0][i][0] = d4[i]\n cube[1][i][2] = d0[2 - i]\n cube[4][i][0] = d5[2 - i]\n cube[5][i][2] = d1[i]\n\n elif op[0] == \"F\":\n new_face = rotate_current_face(4, op[1])\n\n for i in range(3):\n for j in range(3):\n cube[4][i][j] = new_face[i][j]\n\n d0, d2, d3, d5 = copy(op[0])\n for i in range(3):\n if op[1] == \"+\": # clockwise\n cube[0][2][i] = d3[2 - i]\n cube[2][i][0] = d0[i]\n cube[3][i][2] = d5[2 - i]\n cube[5][2][i] = d2[i]\n else: # counter-clockwise\n cube[0][2][i] = d2[i]\n cube[2][i][0] = d5[i]\n cube[3][i][2] = d0[2 - i]\n cube[5][2][i] = d3[2 - i]\n\n elif op[0] == \"D\":\n new_face = rotate_current_face(5, op[1])\n for i in range(3):\n for j in range(3):\n cube[5][i][j] = new_face[i][j]\n\n d1, d2, d3, d4 = copy(op[0])\n for i in range(3):\n if op[1] == \"+\": # clockwise\n cube[1][2][i] = d2[i]\n cube[2][2][i] = d4[i]\n cube[3][2][i] = d1[i]\n cube[4][2][i] = d3[i]\n else: # counter-clockwise\n cube[1][2][i] = d3[i]\n cube[2][2][i] = d1[i]\n cube[3][2][i] = d4[i]\n cube[4][2][i] = d2[i]\n\n\nt = int(si())\nfor _ in range(t):\n cube = [\n [[\"w\", \"w\", \"w\"], [\"w\", \"w\", \"w\"], [\"w\", \"w\", \"w\"]],\n [[\"o\", \"o\", \"o\"], [\"o\", \"o\", \"o\"], [\"o\", \"o\", \"o\"]],\n [[\"b\", \"b\", \"b\"], [\"b\", \"b\", \"b\"], [\"b\", \"b\", \"b\"]],\n [[\"g\", \"g\", \"g\"], [\"g\", \"g\", \"g\"], [\"g\", \"g\", \"g\"]],\n [[\"r\", \"r\", \"r\"], [\"r\", \"r\", \"r\"], [\"r\", \"r\", \"r\"]],\n [[\"y\", \"y\", \"y\"], [\"y\", \"y\", \"y\"], [\"y\", \"y\", \"y\"]],\n ]\n n = int(si())\n operators = list(si().split())\n for i in range(n):\n op = operators[i]\n rotate(op)\n for i in range(3):\n for j in range(3):\n print(cube[0][i][j], end=\"\")\n print()\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"BOJ/review/boj_5373_1.py","file_name":"boj_5373_1.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4099984380","text":"#!/usr/bin/python3 -t\n\n# sort and two pointers\n# time O(max(nlogn, mlogm))\n# space O(1)\n\nclass Solution:\n \"\"\"\n @param nums1: an integer array\n @param nums2: an integer array\n @return: an integer array\n \"\"\"\n def intersection(self, nums1, nums2):\n # write your code here\n m = len(nums1)\n n = len(nums2)\n if n == 0 or m == 0:\n return []\n\n nums1.sort()\n nums2.sort()\n\n ret = set()\n i, j = 0, 0\n\n while i < m and j < n:\n if nums1[i] == nums2[j]:\n ret.add(nums1[i])\n i += 1\n j += 1\n elif nums1[i] < nums2[j]:\n i += 1\n else:\n j += 1\n\n return list(ret)\n\n\nif __name__ == '__main__':\n s = Solution()\n a = [1,2,2,1]\n b = [2,2]\n print(s.intersection(a, b))\n\n# sort and binary search solution\n\nclass Solution:\n \"\"\"\n @param nums1: an integer array\n @param nums2: an integer array\n @return: an integer array\n \"\"\"\n def intersection(self, nums1, nums2):\n # write your code here\n m = len(nums1)\n n = len(nums2)\n if m == 0 or n == 0:\n return []\n nums1.sort()\n nums2.sort()\n \n ret = set()\n \n if m < n:\n nums1, nums2 = nums2, nums1\n m, n = n, m\n \n for i in range(n):\n if self.found(nums1, nums2[i]):\n ret.add(nums2[i])\n \n return list(ret)\n \n def found(self, nums, target):\n n = len(nums)\n l = 0\n r = n-1\n while l/',views.getOrderById,name='user_order'),\n path('/pay/',views.payOrder,name='pay'),\n path('/deliver/',views.deliverOrder,name='deliver'),\n\n]\n","repo_name":"varun-official/Prime_Pick","sub_path":"backend/base/urls/order_urls.py","file_name":"order_urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24944762095","text":"import numpy as np\n\ndef cross_val(fold_num, r):\n data_dir = \"C:\\\\Users\\\\chels\\\\Desktop\\\\Nano2pO_update\\\\processed\\\\\"\n dire = \"C:\\\\Users\\\\chels\\\\Desktop\\\\Nano2pO_update\\\\data\\\\\"\n cul_loss = 0\n iter = 0\n data_pos = np.load(dire + r +'_pos.npy', allow_pickle=True)\n data_neg = np.load(dire + r+ '_neg.npy', allow_pickle=True)\n for iter in range(fold_num):\n y_test = np.load(data_dir + r + '\\\\imbalance_cv\\\\fold'+str(iter+1)+'_label.npy', allow_pickle=True)\n nano_test = np.load(data_dir + r + '\\\\imbalance_cv\\\\fold'+str(iter+1)+'_nano.npy', allow_pickle=True)\n seq_test = np.load(data_dir + r + '\\\\imbalance_cv\\\\fold'+str(iter+1)+'_seq.npy', allow_pickle=True)\n idx_test = np.load(data_dir + r + '\\\\imbalance_cv\\\\fold'+str(iter+1)+'_seq.npy', allow_pickle=True)\n train_pos = np.delete(data_pos, idx_test.item()['pos'+str(iter+1)], axis=0)\n train_neg = np.delete(data_neg, idx_test.item()['neg'+str(iter+1)], axis=0)\n train = np.concatenate([train_pos, train_neg], axis=0)\n nano_train = train[:, :164].reshape(-1, 4, 41).transpose([0, 2, 1]).astype(np.float32)\n seq_train = train[:, 164:].reshape(-1, 1001, 4).astype(np.float32)\n y_train = np.concatenate([np.ones(train_pos.shape[0]), np.zeros(train_neg.shape[0])])\n\n","repo_name":"WaAaaAterfall/DeepNm","sub_path":"scripts/imbalance_prediction/cross-val.py","file_name":"cross-val.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1693652946","text":"\"\"\"\nHi, here's your problem today. This problem was recently asked by Google:\n\nA chess board is an 8x8 grid. Given a knight at any position (x, y) and a number of moves k, we want to figure out after k random moves by a knight, the probability that the knight will still be on the chessboard. Once the knight leaves the board it cannot move again and will be considered off the board.\n\nHere's some starter code:\n\ndef is_knight_on_board(x, y, k, cache={}):\n # Fill this in.\n\nprint is_knight_on_board(0, 0, 1)\n# 0.25\n\"\"\"\n# There are 8 different positions from where the Knight can reach to (x,y) in one step, \n# and they are: (x+1,y+2), (x+2,y+1), (x+2,y-1), (x+1,y-2), (x-1,y-2), (x-2,y-1), (x-2,y+1), (x-1,y+2). \n\n\ndef is_knight_on_board(x, y, k, cache={}):\n # Fill this in.\n N = 8\n next_pos = [[-1, -2], [-1, 2], [1, -2], [1, 2], [2, 1], [2, -1], [-2, 1], [-2, -1]]\n dp = [[0 for i in range(N)] for i in range(N)]\n dp[x][y] = 1\n\n for _ in range(k):\n dp_temp = [[0 for i in range(N)] for i in range(N)]\n for i in range(N):\n for j in range(N):\n for pos in next_pos:\n nr, nc = i - pos[0], j - pos[1]\n if (nr >= 0 and nr < N and nc >= 0 and nc < N):\n dp_temp[i][j] += dp[nr][nc] * 0.125\n dp = dp_temp\n\n res = 0.0\n for i in range(N):\n for j in range(N):\n res += dp[i][j]\n return res\n\nprint(is_knight_on_board(0, 0, 1))\n# 0.25","repo_name":"QinmengLUAN/DailyPythonCoding","sub_path":"DailyProblem58_is_knight_on_board.py","file_name":"DailyProblem58_is_knight_on_board.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33104511189","text":"'''\nFriday the 13th\nGiven the month and year as numbers, return whether that month contains a \nFriday 13th.\n\nExamples\nhas_friday_13(3, 2020) ➞ True\n\nhas_friday_13(10, 2017) ➞ True\n\nhas_friday_13(1, 1985) ➞ False\nNotes\nJanuary will be given as 1, February as 2, etc ...\nCheck Resources for some helpful tutorials on Python's datetime module.\n'''\nfrom calendar import Calendar\n\nweekdays = {\n '0': 'Monday',\n '1': 'Tuesday',\n '2': 'Wednsday',\n '3': 'Tuersday',\n '4': 'Friday',\n '5': 'Saturday',\n '6': 'Sunday',\n}\n\ndef has_friday_13(month, year):\n cal = Calendar()\n monthdays = cal.monthdatescalendar(year, month)\n for week in monthdays:\n print(f'\\nWEEK=> {week}\\n')\n for day in week:\n if day.strftime('%d') == '13' and day.weekday() == 4:\n return True\n print(day.strftime('%d'), weekdays[str(day.weekday())])\n return False\n\n\nprint(has_friday_13(3, 2020))\nprint(has_friday_13(10, 2017))\nprint(has_friday_13(1, 1985))","repo_name":"JonasFiechter/Challenges","sub_path":"python/friday_13.py","file_name":"friday_13.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19333567359","text":"import tensorflow as tf\nfrom rg_net import net_facenet\nimport numpy as np\nimport pickle\nimport cv2\nimport os\nimport time\nfrom collections import Counter\nimport glob\nfrom data_pro.data_utils import load_image, data_iter\n\n\nclass FacenetPre():\n def __init__(self):\n\n self.model_dir = '/Users/finup/Desktop/rg/face_rg_files/premodels/pm_facenet'\n self.embs_dir = '/Users/finup/Desktop/rg/face_rg_files/embs_pkl/ep_facenet'\n\n # gpu设置\n gpu_config = tf.ConfigProto()\n gpu_config.allow_soft_placement = True\n gpu_config.gpu_options.allow_growth = True\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n self.sess = tf.Session(config=gpu_config)\n net_facenet.load_model(self.sess, self.model_dir)\n # 返回给定名称的tensor\n self.images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n self.embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n print('建立facenet embedding模型')\n\n\n # load已知人脸\n self.files_fresh, self.known_names, self.known_embs, self.known_vms = None, None, None, None\n self.load_knows_pkl()\n\n # image_pre = cv2.imread('data_pro/pre_img.jpg') # 首次run sess比较耗时,因此在初始化的时候预使用一张sample照片,使得线上实时流不受首次影响而延迟\n # crop_image = np.asarray([cv2.resize(image_pre, (160, 160))])\n # face_embs = self.sess.run(self.embeddings,\n # feed_dict={self.images_placeholder: crop_image, self.phase_train_placeholder: False})\n # print('init..')\n\n\n @ staticmethod\n def is_newest(model_path, init_time):\n current_time = os.path.getctime(model_path)\n return init_time != None and current_time == init_time\n\n def load_knows_pkl(self):\n # load 最新已知人脸pkl\n self.files_fresh = sorted(glob.iglob(self.embs_dir+'/*'), key=os.path.getctime, reverse=True)[0]\n print(self.files_fresh)\n with open(self.files_fresh, 'rb') as fr:\n piccode_path_dct = pickle.load(fr)\n self.known_names = np.asarray(list(piccode_path_dct.keys()))\n self.known_embs = np.asarray(list(piccode_path_dct.values()))\n # 计算已知人脸向量的摩长,[|B|= reshape( (N,), (N,1) ) ],以便后边的计算实时流向量,计算最相似用户时用\n self.known_vms = np.reshape(np.linalg.norm(self.known_embs, axis=1), (len(self.known_embs), 1))\n\n peoples = [i.split('-')[0] for i in self.known_names]\n count_p = Counter(peoples)\n print(count_p)\n print('已知人脸-总共有m个人:', len(list(set(peoples))))\n print('共计n个vectors:', len(self.known_names) - 1)\n print('平均每人照片张数:', int((len(self.known_names) - 1) / len(list(set(peoples)))))\n print('目前还有x人没有照片:', 61 - len(list(set(peoples))))\n\n def d_cos(self, v, vs=None): # 输入需要是一张脸的v:(512,) or (512,1), knows_v:(N, 512)\n if vs is None:\n vs = self.known_embs\n vs_norm = self.known_vms\n else:\n if len(vs.shape) == 1:\n vs = np.reshape(vs, (1, len(vs)))\n vs_norm = np.reshape(np.linalg.norm(vs, axis=1), (len(vs), 1))\n else:\n vs = vs\n vs_norm = np.reshape(np.linalg.norm(vs, axis=1), (len(vs), 1))\n\n v = np.reshape(v, (1, len(v))) # 变为1行\n num = np.dot(vs, v.T) # (N, 1)\n denom = np.linalg.norm(v) * vs_norm # [|A|=float] * [|B|= reshape( (N,), (N,1) ) ] = (N, 1)\n cos = num / denom # 余弦值 A * B / |A| * |B| 本身也是0-1之间的...\n sim = 0.5 + 0.5 * cos # 归一化到0-1之间, (N, 1)\n\n # print('cos describe', max(cos), min(cos), np.mean(cos), np.var(cos))\n # print('sim describe', max(sim), min(sim), np.mean(sim), np.var(sim))\n sim = np.reshape(sim, (len(sim),)) # reshape((N,1), (N,)) 变成一维,方便后边算最大值最小值\n\n \"\"\"\n 人脸库中的照片pre_img.jpg,余弦距离参考值如下,有人脸图片cos均值在0.40842828,sim均值在 0.7042141,因此至少sim要大于0.70\n cos describe [0.99029934] [-0.07334533] 0.40842828 0.016055087\n sim describe [0.9951497] [0.46332735] 0.7042141 0.0040137717\n pre_1pic ['20190904205458_正面_024404-张佳丽'] [1] [0.9951497]\n\n 无人脸的图片pre_bug.jpg,余弦距离参考值如下,无人脸有内容图片cos均值在0.11156807,sim均值在 0.55578405\n cos describe [0.47486433] [-0.09186573] 0.11156807 0.004270094\n sim describe [0.7374322] [0.45406714] 0.55578405 0.0010675235\n pre_1pic ['未知的同学'] [0.0] [0]\n\n 近乎全白的图片pre_white.jpg,余弦距离参考值如下,白图cos均值在0.015752314,sim均值在 0.50787616\n cos describe [0.44681713] [-0.17200288] 0.015752314 0.00459828\n sim describe [0.7234086] [0.41399854] 0.50787616 0.0011495701\n pre_1pic ['未知的同学'] [0.0] [0]\n \"\"\"\n\n return sim\n\n def emb_toget_name(self, detect_face_embs_i, known_names_i, known_embs_i): # 一张脸进来\n cos_sim = self.d_cos(detect_face_embs_i)\n is_known = 0\n sim_p = max(cos_sim)\n if sim_p >= 0.75: # 越大越严格\n loc_similar_most = np.where(cos_sim == sim_p)\n is_known = 1\n return known_names_i[loc_similar_most][0], is_known, sim_p\n else:\n loc_similar_most = np.where(cos_sim == sim_p)\n # print('未识别到但最相似的人是:', sim_p, known_names_i[loc_similar_most][0])\n return '未知的同学', is_known, sim_p\n\n def emb_toget_name_old(self, detect_face_embs_i, known_names_i, known_embs_i): # 一张脸进来\n L2_dis = np.linalg.norm(detect_face_embs_i - known_embs_i, axis=1)\n is_known = 0\n sim_p = min(L2_dis)\n if sim_p < 0.6: # 越小越严格\n loc_similar_most = np.where(L2_dis == sim_p)\n is_known = 1\n return known_names_i[loc_similar_most][0], is_known, sim_p\n else:\n loc_similar_most = np.where(L2_dis == sim_p)\n print('未识别到但最相似的人是:', sim_p, known_names_i[loc_similar_most][0])\n return '未知的同学', is_known, sim_p\n\n def gen_knowns_db(self, pic_path, pkl_name):\n\n # 读marking人脸图片list\n imgs_pic, fns = load_image(pic_path, (160, 160))\n\n embds_arr = self.run_embds(imgs_pic, 64)\n embds_dict = dict(zip(fns, list(embds_arr)))\n\n if len(embds_dict) != 0:\n # 存已知人脸embs dict\n with open(pkl_name, 'wb') as f:\n pickle.dump(embds_dict, f)\n print('saving knows pkl...', len(embds_dict), pkl_name)\n\n @staticmethod\n def prewhiten(x):\n mean = np.mean(x)\n std = np.std(x)\n std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))\n y = np.multiply(np.subtract(x, mean), 1 / std_adj) # 图像归一化处理\n return y\n\n def imgs_get_names(self, crop_image):\n # print('rg_start', len(crop_image))\n # crop_image_nor = []\n # for aligned_pic in range(len(crop_image)):\n # prewhitened = self.prewhiten(crop_image[aligned_pic])\n # crop_image_nor.append(prewhitened)\n # crop_image_nor = np.stack(crop_image_nor)\n # face_embs = self.sess.run(self.embeddings,\n # feed_dict={self.images_placeholder: crop_image_nor,\n # self.phase_train_placeholder: False})\n # print('rg_emb_ok')\n\n face_embs = self.run_embds(crop_image, 1)\n # face_embs = face_embs / np.linalg.norm(face_embs, axis=1, keepdims=True) # 然后再求方向向量\n\n\n face_names = []\n is_knowns = []\n sim_pro_lst = []\n\n fresh_pkl = sorted(glob.iglob(self.embs_dir+'/*'), key=os.path.getctime, reverse=True)[0]\n if fresh_pkl != self.files_fresh:\n print(fresh_pkl)\n print(self.files_fresh)\n self.load_knows_pkl()\n for face_k in range(len(face_embs)):\n face_name, is_known, sim_pro = self.emb_toget_name(face_embs[face_k], self.known_names, self.known_embs)\n face_names.append(face_name)\n is_knowns.append(is_known)\n sim_pro_lst.append(sim_pro)\n # print('rg_choose_ok')\n\n return face_names, is_knowns, face_embs, sim_pro_lst\n\n def verify_db(self, konwn_path, unkonwn_path):\n\n fr = open(konwn_path, 'rb')\n k_names_embs = pickle.load(fr)\n k_embs = np.asarray(list(k_names_embs.values()))\n k_names = [i.split('@')[-1].split('-')[0] for i in list(k_names_embs.keys())]\n\n fr = open(unkonwn_path, 'rb')\n uk_names_embs = pickle.load(fr) # key 043374-人力资源部-张晓宛\n uk_all_sim = []\n uk_all_label = []\n for uk_name, uk_emb in uk_names_embs.items():\n # 计算每一个未知人脸的和所有m已知人脸的相似度值。产出相似度矩阵 [uk_n, k_m]\n v = np.reshape(uk_emb, (len(uk_emb), 1))\n uk_sims = self.d_cos(v, vs=k_embs)\n uk_all_sim.append(uk_sims)\n\n uk_name = uk_name.split('@')[-1].split('-')[0]\n uk_labels = []\n for k_name in k_names:\n if uk_name == k_name:\n uk_labels.append(1)\n else:\n uk_labels.append(0)\n uk_all_label.append(uk_labels)\n\n uk_all_sim = np.asarray(uk_all_sim)\n uk_all_label = np.asarray(uk_all_label)\n\n uk_all_sim = np.ravel(uk_all_sim)\n uk_all_label = np.ravel(uk_all_label)\n print(uk_all_sim.shape, uk_all_label.shape)\n\n from sklearn.metrics import confusion_matrix, roc_auc_score, classification_report\n\n auc = np.round(roc_auc_score(uk_all_label, uk_all_sim, average='micro'),4)\n print('AUC 为:', auc)\n print('\\n')\n\n for hold_v in [0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99]:\n uk_all_sim_01 = [i >= hold_v for i in uk_all_sim]\n\n # report = classification_report(uk_all_label, uk_all_sim_01)\n # print(report)\n matrix = confusion_matrix(uk_all_label, uk_all_sim_01)\n print('hold_v >=', hold_v)\n print(matrix[0][0])\n print(matrix[0][1])\n print(matrix[1][0])\n print(matrix[1][1])\n print('\\n\\n')\n\n\n def run_embds(self, crop_images, batch_size=1):\n all_embeddings = None\n # for idx, data in enumerate(data_iter(crop_images, self.au_cfg.batch_size)):\n for idx, data in enumerate(data_iter(crop_images, batch_size)):\n print('batch n_th:', idx)\n data_tmp = np.asarray(data.copy(), dtype='float64') # fix issues #4 : (32, 112, 112, 3)\n data_tmp -= 127.5\n data_tmp *= 0.0078125\n\n face_embs = self.sess.run(self.embeddings, feed_dict={self.images_placeholder: data_tmp,\n self.phase_train_placeholder: False})\n if all_embeddings is None:\n all_embeddings = face_embs\n else:\n all_embeddings = np.row_stack((all_embeddings, face_embs))\n return all_embeddings\n\n\nif __name__ == \"__main__\":\n facenet_c = FacenetPre()\n\n time_stamp_pkl = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n facenet_c.gen_knowns_db('/Users/finup/Desktop/rg/face_rg_files/common_files/dc_marking_1known_trans',\n '/Users/finup/Desktop/rg/face_rg_files/embs_pkl/ep_facenet/' + time_stamp_pkl +'_facenet-known.pkl')\n facenet_c.gen_knowns_db('/Users/finup/Desktop/rg/face_rg_files/common_files/dc_marking_trans',\n '/Users/finup/Desktop/rg/face_rg_files/embs_pkl/ep_facenet/' + time_stamp_pkl +'_facenet-unknown.pkl')\n\n\n konwn_path='/Users/finup/Desktop/rg/face_rg_files/embs_pkl/ep_facenet/'+time_stamp_pkl+'_facenet-known.pkl'\n unkonwn_path = '/Users/finup/Desktop/rg/face_rg_files/embs_pkl/ep_facenet/'+time_stamp_pkl+'_facenet-unknown.pkl'\n facenet_c.verify_db(konwn_path, unkonwn_path)\n\n\n","repo_name":"sunruina2/face_rg_server","sub_path":"rg_model/model_facenet.py","file_name":"model_facenet.py","file_ext":"py","file_size_in_byte":12429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42265142103","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import Http404\nfrom django.utils import timezone\nfrom django.urls import reverse\nfrom blog.models import Blog\nfrom django.contrib.auth.models import User\n\nfrom blog.forms import BlogForm\n\ndef index(request):\n title = \"Welcome To Edge\"\n context = {\n 'title': title\n }\n return render(request, \"blog/index.html\", context)\n\ndef blog_list(request):\n all_blog = Blog.objects.all().order_by(\"-created_at\")\n paginator = Paginator(all_blog, 10) # Show 25 contacts per page\n page = request.GET.get('page')\n blogs = paginator.get_page(page)\n title = \"All my blog post\"\n context = {\n 'title': title,\n 'blogs': blogs\n }\n return render(request, \"blog/blog_list.html\", context)\n\n\"\"\"\n\ndef listing(request):\n contact_list = Contacts.objects.all()\n paginator = Paginator(contact_list, 25) # Show 25 contacts per page\n\n page = request.GET.get('page')\n contacts = paginator.get_page(page)\n return render(request, 'list.html', {'contacts': contacts})\n\"\"\"\n\n\ndef blog_detail(request, id=None):\n blog = get_object_or_404(Blog, id=id)\n context = {\n 'blog': blog\n }\n return render(request, \"blog/blog_detail.html\", context)\n\ndef add_blog(request):\n if not request.user.is_staff or not request.user.is_superuser:\n \traise Http404\n title = \"Create\"\n if request.method == \"POST\":\n form = BlogForm(request.POST, request.FILES)\n if form.is_valid():\n blog = form.save(commit=False)\n blog.author = request.user\n blog.created_at = timezone.now()\n blog.save()\n return redirect(\"details\", blog.id)\n else:\n form = BlogForm()\n return render(request, \"blog/form.html\", {\"form\": form, \"title\": title})\n\ndef edit_blog(request, id=None):\n if not request.user.is_staff or not request.user.is_superuser:\n raise Http404\n blog = get_object_or_404(Blog, id=id)\n title = \"Update\"\n #form = BlogForm(request.POST or None, instance=blog)\n if request.method == \"POST\":\n form = BlogForm(request.POST, request.FILES, instance=blog)\n #form = BlogForm(request.POST)\n if form.is_valid():\n blog = form.save(commit=False)\n blog.author = request.user\n blog.created_at = timezone.now()\n blog.save()\n return redirect(\"blog:blog\")\n else:\n form = BlogForm(instance=blog)\n return render(request, \"blog/form.html\", {\"form\": form, \"title\": title})\n\ndef delete_blog(request, id=None):\n if not request.user.is_staff or not request.user.is_superuser:\n raise Http404\n if request.user.is_superuser:\n blog = get_object_or_404(Blog, id=id)\n title = \"Delete Page\"\n else:\n blog = get_object_or_404(Blog, id=id, author=request.user)\n if request.method == \"POST\":\n blog.delete()\n return redirect(reverse(\"blog:blog\"))\n else:\n return render(request, \"blog/delete.html\", {\"blog\": blog,\"title\": title})\n","repo_name":"xarala221/djangoblog","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28372717864","text":"import pandas as pd\nimport numpy as np\nfrom bokeh.plotting import figure\nfrom bokeh.io import output_notebook, show, output_file\nfrom bokeh.models import ColumnDataSource, HoverTool, Panel, CategoricalColorMapper\nfrom bokeh.models.widgets import CheckboxGroup, Tabs, Panel\nfrom bokeh.layouts import column, row, WidgetBox\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.application import Application\nfrom bokeh.layouts import row, column, gridplot\n\nclass BokehHistogram():\n '''\n A class to simplify the making of interactive histograms with the Bokeh library.\n Requires: Bokeh, Pandas, and Numpy.\n '''\n\n def __init__(self, colors=[\"SteelBlue\", \"Tan\"], height=600, width=600):\n self.colors = colors\n self.height = height\n self.width = width\n\n def hist_hover(self, dataframe, column, bins=30, log_scale=False, show_plot=True):\n \"\"\"\n A method for creating a sinlge Bokeh histogram with hovertool interactivity.\n\n Parameters:\n ----------\n Input:\n dataframe {df}: Pandas dataframe\n column {string}: column of dataframe to plot in histogram\n bins {int}: number of bins in histogram\n log_scale {bool}: True to plot on a log scale\n colors {list -> string}: list of colors for histogram; first color default color, second color is hover color\n show_plot {bool}: True to display the plot, False to store the plot in a variable (for use in later methods)\n\n Output:\n plot: bokeh historgram with interactive hover tool\n\n \"\"\"\n # build histogram data with Numpy\n hist, edges = np.histogram(dataframe[column], bins = bins)\n hist_df = pd.DataFrame({column: hist,\n \"left\": edges[:-1],\n \"right\": edges[1:]})\n hist_df[\"interval\"] = [\"%d to %d\" % (left, right) for left, \n right in zip(hist_df[\"left\"], hist_df[\"right\"])]\n # bokeh histogram with hover tool\n if log_scale == True:\n hist_df[\"log\"] = np.log(hist_df[column])\n src = ColumnDataSource(hist_df)\n plot = figure(plot_height = self.height, plot_width = self.width,\n title = \"Histogram of {}\".format(column.capitalize()),\n x_axis_label = column.capitalize(),\n y_axis_label = \"Log Count\") \n plot.quad(bottom = 0, top = \"log\",left = \"left\", \n right = \"right\", source = src, fill_color = self.colors[0], \n line_color = \"black\", fill_alpha = 0.7,\n hover_fill_alpha = 1.0, hover_fill_color = self.colors[1])\n else:\n src = ColumnDataSource(hist_df)\n plot = figure(plot_height = self.height, plot_width = self.width,\n title = \"Histogram of {}\".format(column.capitalize()),\n x_axis_label = column.capitalize(),\n y_axis_label = \"Count\") \n plot.quad(bottom = 0, top = column,left = \"left\", \n right = \"right\", source = src, fill_color = self.colors[0], \n line_color = \"black\", fill_alpha = 0.7,\n hover_fill_alpha = 1.0, hover_fill_color = self.colors[1])\n\n # hover tool\n hover = HoverTool(tooltips = [('Interval', '@interval'),\n ('Count', str(\"@\" + column))])\n plot.add_tools(hover)\n\n # output\n if show_plot == True:\n show(plot)\n else:\n return plot\n\n def histotabs(self, dataframe, features, log_scale=False, show_plot=False):\n '''\n Builds tabbed interface for a series of histograms; calls hist_hover. Specifying 'show_plot=True' will simply display the histograms in sequence rather than in a tabbed interface.\n\n Parameters:\n ----------\n Input:\n dataframe {df}: a Pandas dataframe\n features {list -> string}: list of features to plot\n log_scale {bool}: True to plot on a log scale\n colors {list -> string}: list of colors for histogram; first color default color, second color is hover color\n show_plot {bool}: True to display the plot, False to store the plot in a variable (for use in later methods)\n\n Output:\n Tabbed interface for viewing interactive histograms of specified features\n\n '''\n hists = []\n for f in features:\n h = self.hist_hover(dataframe, f, log_scale=log_scale, show_plot=show_plot)\n p = Panel(child=h, title=f.capitalize())\n hists.append(p)\n t = Tabs(tabs=hists)\n show(t)\n\n def filtered_histotabs(self, dataframe, feature, filter_feature, log_scale=False, show_plot=False):\n '''\n Builds tabbed histogram interface for one feature filtered by another. Feature is numeric, fiter feature is categorical.\n\n Parameters:\n ----------\n Input:\n dataframe {df}: a Pandas dataframe\n features {list -> string}: list of features to plot\n log_scale {bool}: True to plot on a log scale\n colors {list -> string}: list of colors for histogram; first color default color, second color is hover color\n show_plot {bool}: True to display the plot, False to store the plot in a variable (for use in later methods)\n\n Output:\n Tabbed interface for viewing interactive histograms of specified feature filtered by categorical filter feature\n\n '''\n hists = []\n for col in dataframe[filter_feature].unique():\n sub_df = dataframe[dataframe[filter_feature] == col]\n histo = self.hist_hover(sub_df, feature, log_scale=log_scale, show_plot=show_plot)\n p = Panel(child = histo, title=col)\n hists.append(p)\n t = Tabs(tabs=hists)\n show(t)\n\n\ndef make_dataset(df, statuses=[\"viewed\", \"explored\",\"certified\"],\n range_start=0, range_end=200000, bin_width=1000):\n assert range_start < range_end, \"Start must be less than end!\"\n\n by_status = pd.DataFrame(columns=['proportion', 'left', 'right',\n 's_proportion', 's_interval',\n 'name', 'color'])\n range_extent = range_end - range_start\n # Iterate through the statuses\n for i, status in enumerate(statuses):\n # subset by status\n subset = df[df[status]==1]\n # histogram\n st_hist, edges = np.histogram(subset[\"nevents\"],\n bins=int(range_extent / bin_width),\n range = [range_start, range_end]) \n # divide counts by total to get proportion\n st_df = pd.DataFrame({'proportion': st_hist / np.sum(st_hist),\n 'left': edges[:-1], 'right': edges[1:]})\n # format the proportion\n st_df['s_proportion'] = ['%0.5f' % proportion for proportion in st_df['proportion']]\n # format the interval\n st_df['s_interval'] = ['%d to %d events' % (left, right) for left, right in zip(st_df['left'], st_df['right'])]\n # assign status labels\n st_df['name'] = status\n # color for each status\n st_df['color'] = Category20_16[i]\n # add to overall dataframe\n by_status = by_status.append(st_df)\n # Overall dataframe\n by_status = by_status.sort_values(['name', 'left'])\n # Convert dataframe to column data source for bokeh\n return ColumnDataSource(by_status)\n\ndef make_plot(src, plot_title=\"Histogram\"):\n # blank plot with correct labels\n p = figure(plot_width=700, plot_height=700, title=plot_title,\n x_axis_label=\"Events\", y_axis_label=\"Count\")\n \n # Quad glyphs to create a histogram\n p.quad(source=src, bottom=0, top='proportion', left='left',\n right='right', color='color', fill_alpha=0.7, hover_fill_color='color', legend='name', hover_fill_alpha=1.0, line_color='black')\n # Hover tool with vline mode\n hover = HoverTool(tooltips=[('Status', '@name'),\n ('Events', '@s_interval'),\n ('Proportion', '@s_proportion')],\n mode='vline')\n p.add_tools(hover)\n return p\n\ndef update(attr, old, new):\n # get the list of statuses for the graph\n statuses_to_plot = [status_selection.labels[i] for i in status_selection.active]\n # make a new dataset based on selected characters\n # use make dataset function defined above\n new_src = make_dataset(statuses_to_plot,\n range_start=0,\n range_end=200000,\n bin_width=1000)\n src.data.update(new_src.data)\n\n\n# Bokeh app\ndef modify_doc(doc):\n\n\tstatus_selection = CheckboxGroup(labels=['viewed','explored', 'certified'], active=[0,1])\n\tstatus_selection.on_change('active', update)\n\tinitial_status = [status_selection.labels[i] for i in \tstatus_selection.active]\n\tsrc = make_dataset(edX, initial_status)\n\tp = make_plot(src)\n\tcontrols = WidgetBox(status_selection)\n\tlayout = row(controls, p)\n\ttab = Panel(child=layout, title='Events Histogram')\n\ttabs = Tabs(tabs=[tab])\n\tdoc.add_root(tabs)\n\n","repo_name":"jeremymiller00/harvard_ed_x","sub_path":"src/eda_functions.py","file_name":"eda_functions.py","file_ext":"py","file_size_in_byte":9184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73846666490","text":"# 빈 칸 채우기\ndef rearrange_board(board):\n t_board = list(zip(*board))\n length = len(t_board[0])\n for i in range(len(t_board)):\n n_row = ''.join(t_board[i]).replace('0', '')\n t_board[i] = '0'*(length-len(n_row)) + n_row\n \n tt_board = list(zip(*t_board))\n next_board = list(map(lambda x: ''.join(x), tt_board))\n return next_board\n \n# 없애기 + 카운팅\ndef remove_blocks(blocks, board):\n board = list(map(lambda x: list(x), board))\n \n for (r, c) in blocks:\n board[r][c] = '0'\n\n return board\n\n\n# 지울 수 있는 블록 탐색\ndef check_removable(board):\n removables = set()\n for r in range(len(board)-1):\n for c in range(len(board[0])-1):\n if board[r][c] != '0' :\n me = board[r][c]\n for i in range(3):\n nr, nc = r+dr[i], c+dc[i]\n if board[nr][nc] != me:\n break\n else:\n removables.add((r,c))\n for i in range(3):\n nr, nc = r+dr[i], c+dc[i]\n removables.add((nr,nc)) \n return removables\n\ndr = [0, 1, 1]\ndc = [1, 0, 1]\n\ndef solution(m, n, board):\n answer = 0 \n\n removables = check_removable(board)\n while removables:\n answer += len(removables)\n removed = remove_blocks(removables, board)\n board = rearrange_board(removed)\n removables = check_removable(board)\n \n return answer\n\n\nm = 4\nn = 5\nboard = [\n 'CCBDE', \n 'AAADE', \n 'AAABF', \n 'CCBBF']\n\nsolution(m, n, board)","repo_name":"eunjung-jenny/algorithm_programmers","sub_path":"2018_Kakao_blind/1차_프렌즈4블록.py","file_name":"1차_프렌즈4블록.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2392315949","text":"import time\nimport numpy as np\nimport nidaqmx\nfrom nidaqmx import constants\nfrom nidaqmx import stream_readers\nfrom matplotlib import pyplot as plt\nfrom threading import Thread, Event\nfrom functools import partial\nimport inspect\nfrom tqdm import tqdm\nsystem = nidaqmx.system.System.local()\nsystem.driver_version\n\nfor device in system.devices:\n print(device.name)\n print(device.ai_simultaneous_sampling_supported)\n print(device.ai_samp_modes)\n\nclass PLLreadout_nidaqmx(Thread):\n def __init__(self, scanTime, sampRate=300, bufferSize=1000, triggerSrc='') -> None:\n super().__init__()\n self.Ch00_name = 'A00'\n self.scanTime = scanTime\n print('The sweep time is '+str(self.scanTime)+' !\\n')\n self.sampRate= sampRate\n self.bufferSize = bufferSize\n self.dataSize = int(self.scanTime*self.sampRate)\n self.triggerSrc = triggerSrc\n \n self.container = np.zeros((11, self.dataSize))\n\n\n # container for the init function output\n self.init_output = None\n # container for the finish function output\n self.finish_output = None\n # stop event to stop measurement prematurely\n self.stop_event = Event()\n\n def stop(self):\n \"\"\"stops the measurement before executing the next step and if\n supported by the sequence functions also the currently running step\"\"\"\n self.stop_event.set()\n\n def run(self):\n \"\"\"performs the measurement. do not call directly, use start()\"\"\"\n while 1:\n self.read()\n\n def sleep(self, duration, get_now=time.perf_counter):\n now = get_now()\n end = now + duration\n while now < end:\n now = get_now()\n\n def read(self):\n with nidaqmx.Task() as task, nidaqmx.Task() as task2, nidaqmx.Task() as task3:\n task.ai_channels.add_ai_voltage_chan(physical_channel=\"/Dev1/ai0:7\", min_val=-10, max_val=10)\n task.ai_channels.add_ai_voltage_chan(physical_channel=\"/Dev1/ai16:18\", min_val=-10, max_val=10)\n if self.triggerSrc == '':\n print('Using start trigger from SRS delay generator')\n task.triggers.start_trigger.cfg_dig_edge_start_trig(trigger_source=\"/Dev1/PFI0\", trigger_edge=constants.Edge.RISING)#step by step measurement doesn't need the trigger\n #startTrigger\n #task2.di_channels.add_di_chan(lines=\"Dev1/PFI0\")#PFI0 is the trigger from the Standform delayer generator\n #task.ci_channels.add_ci_count_edges_chan(counter=\"Dev1/ctr0\")\n #task.timing.cfg_samp_clk_timing(rate=self.sampRate, source=self.triggerSrc,sample_mode=constants.AcquisitionType.FINITE, samps_per_chan=self.dataSize)\n task.timing.cfg_samp_clk_timing(rate=self.sampRate,sample_mode=constants.AcquisitionType.FINITE, samps_per_chan=self.dataSize)\n #trigger source is empty for fast scan for using internal clock. Use laser trigger as source for step-by-step measurement.\n \n # input_buf_size\n task.in_stream.input_buf_size = self.sampRate * 5 # plus some extra space \n reader = stream_readers.AnalogMultiChannelReader(task.in_stream) \n \n task.start()\n print('Start Scan!')\n reader.read_many_sample(self.container, self.dataSize, timeout=constants.WAIT_INFINITELY) \n print('Stop Scan!')\n task.wait_until_done(self.scanTime)\n time.sleep(5)\n \nif __name__ == '__main__':\n a = PLLreadout_nidaqmx(scanTime=10)\n \n a.start()\n delay = np.array(a.container).flatten()\n print(delay)\n\n\n plt.plot(delay)\n plt.show()\n\n\n","repo_name":"wangzhenhao1994/Lab_software","sub_path":"nidaqmx_usb6229_fastscan_asyn.py","file_name":"nidaqmx_usb6229_fastscan_asyn.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38869678974","text":"from src.config.config import Config\nfrom src.models.rule import Rule\nfrom src.models.term import Term\nfrom src.modules.fuzzy.fuzzy_number import FuzzyNumber\nfrom src.modules.geolocation.calc_dist import CalculateDistance\nfrom src.reasoner import Reasoner\n\n\ndef main():\n knowledge_base2 = [\n Rule(FuzzyNumber('near', (0, 0, 400), 'X'), []),\n Rule(FuzzyNumber('popular', (200, 500, 500), 'X'), []),\n Rule(FuzzyNumber('cheap', (0, 0, 100), 'X'), []),\n Rule(Term('hotel', ['sheraton', 354, 744, 4.2, 5]), []),\n Rule(Term('landmark', ['st_nedelia', 5, 330, 4.7, 5]), []),\n Rule(Term('landmark', ['nevski', 10, 1104, 4.7, 5]), []),\n Rule(Term('expensive', ['X', '_', '_', '_', '_']),\n [Term('landmark', ['Y', '_', 'P', '_', '_']),\n CalculateDistance('X', 'Y', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('popular', (20, 500, 500), 'P')]),\n # Rule(Term('expensive', ['X', 'C', '_', '_', '_']),\n # FuzzyNumber('expensive', (120, 500, 500), 'C'))\n ]\n\n CalculateDistance.create_api(Config.get_secrets()['gmaps']['key'])\n\n knowledge_base3 = [\n # Distance\n Rule(FuzzyNumber('near', (0, 0, 400), 'X'), []),\n Rule(FuzzyNumber('distant', (300, 1400, 2000), 'X'), []),\n Rule(FuzzyNumber('away', (1500, 2000, 3000), 'X'), []),\n\n # Cost\n Rule(FuzzyNumber('cheapRoom', (0, 0, 100), 'X'), []),\n Rule(FuzzyNumber('regularRoom', (80, 100, 150), 'X'), []),\n Rule(FuzzyNumber('expensiveRoom', (120, 500, 500), 'X'), []),\n\n Rule(FuzzyNumber('cheapDinner', (0, 0, 20), 'X'), []),\n Rule(FuzzyNumber('regularDinner', (15, 25, 40), 'X'), []),\n Rule(FuzzyNumber('expensiveDinner', (30, 60, 60), 'X'), []),\n\n Rule(FuzzyNumber('cheapCoffee', (0, 0, 2), 'X'), []),\n Rule(FuzzyNumber('regularCoffee', (1, 5, 8), 'X'), []),\n Rule(FuzzyNumber('expensiveCoffee', (6, 10, 10), 'X'), []),\n\n Rule(FuzzyNumber('cheapVisit', (0, 0, 5), 'X'), []),\n Rule(FuzzyNumber('regularVisit', (4, 7, 10), 'X'), []),\n Rule(FuzzyNumber('expensiveVisit', (9, 15, 15), 'X'), []),\n\n # Popularity\n Rule(FuzzyNumber('unknown', (0, 70, 100), 'X'), []),\n Rule(FuzzyNumber('common', (80, 200, 500), 'X'), []),\n Rule(FuzzyNumber('popular', (400, 1500, 1500), 'X'), []),\n\n # Quality\n Rule(FuzzyNumber('poor', (0, 0, 4), 'X'), []),\n Rule(FuzzyNumber('good', (3, 4, 5), 'X'), []),\n Rule(FuzzyNumber('great', (4, 5, 5), 'X'), []),\n\n Rule(Term('hotel', ['sheraton', 'pl. \"Sveta Nedelya\" 5, 1000 Sofia Center, Sofia', 354, 744, 4.2]), []),\n Rule(Term('hotel', ['kapri', 'ul. \"Han Omurtag\" 76, 1124 g.k. Yavorov, Sofia', 109, 24, 4.2]), []),\n Rule(Term('hotel', ['sense', 'bul. \"Tsar Osvoboditel\" 16, 1000 Sofia Center, Sofia', 187, 651, 4.5]), []),\n Rule(Term('hotel', ['grand_hotel', 'ul. \"General Yosif V. Gourko\" 1, 1000 Sofia Center, Sofia', 170, 717, 4.6]), []),\n Rule(Term('hotel', ['les_fleurs', '21, Vitosha Blvd., 1000 Sofia', 274, 183, 4.3]), []),\n Rule(Term('hotel', ['hilton', 'Bulevard \"Bulgaria\" 1, 1421 g.k. Lozenets, Sofia', 230, 405, 4.5]), []),\n Rule(Term('hotel', ['hemus', 'бул. Черни Връх 31, 1421 g.k. Lozenets, Sofia', 77, 420, 3.4]), []),\n\n Rule(Term('dinner', ['happy', 'ulitsa „Georgi S. Rakovski“ 145A, 1000 Sofia Center, Sofia', 20, 1126, 4.2]), []),\n Rule(Term('dinner', ['social', 'bul. \"Vitosha\" 16, 1000 Sofia Center, Sofia', 20, 1024, 4.2]), []),\n Rule(Term('dinner', ['shtastlivetsa', 'bul. \"Vitosha\" 27, 1000 Sofia Center, Sofia', 30, 1354, 4.5]), []),\n Rule(Term('dinner', ['mrpizza', 'bulevard \"Cherni vrah\" 38, 1000 g.k. Lozenets, Sofia', 15, 534, 4.1]), []),\n Rule(Term('dinner', ['sasa', 'pl. \"Narodno sabranie\" 4, 1000 Sofia Center, Sofia', 35, 268, 4.3]), []),\n Rule(Term('dinner', ['skaptobara', 'ul. \"Iskar\" 11А, 1000 Sofia Center, Sofia', 10, 494, 4.5]), []),\n\n Rule(Term('landmark', ['st_nedelia', 'площад Света Неделя 20, 1000 Sofia Center, Sofia', 0, 330, 4.7]), []),\n Rule(Term('landmark', ['nevski', 'pl. \"Sveti Aleksandar Nevski\", 1000 Sofia Center, Sofia', 0, 1104, 4.7]), []),\n Rule(Term('landmark', ['orlov_most', 'бул. „Цар Освободител“ 33, 1504 Sofia Center, Sofia', 0, 1671, 4.2]), []),\n Rule(Term('landmark', ['national_palace_culture', 'National Culture Palace, Bulevard \"Bulgaria\", 1463 Ndk, Sofia', 10, 1096, 4.2]), []),\n Rule(Term('landmark', ['vitosha', 'ul. \"Detski mir\", Sofia', 0, 2584, 4.7]), []),\n\n Rule(Term('coffee', ['costa', 'ploshtad \"Knyaz Aleksandar I\" 4, 1000 Sofia Center, Sofia', 5, 336, 4.2]), []),\n Rule(Term('coffee', ['starbucks', 'бул. Васил Левски, 1А, 1042 Sofia Center, Sofia', 5, 534, 4.1]), []),\n Rule(Term('coffee', ['memento', 'ulitsa „Georgi S. Rakovski“ 106, 1000 Sofia Center, Sofia', 6, 508, 4.3]), []),\n Rule(Term('coffee', ['modera', 'София, ул. Йордан Йосифов 8а, 1700 Studentski Kompleks, Sofia', 4, 389, 4.1]), []),\n Rule(Term('coffee', ['tu', 'bulevard \"Cherni vrah\" 96, 1407 Hladilnika, Sofia', 3, 11, 2.2]), []),\n\n Rule(Term('expensive', ['X', 'XA', '_', '_', '_']),\n [Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('popular', (400, 1500, 1500), 'P')]),\n\n Rule(Term('regular', ['X', 'XA', '_', '_', '_']),\n [Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('common', (80, 200, 500), 'P')]),\n\n Rule(Term('cheap', ['X', 'XA', '_', '_', '_']),\n [Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('unknown', (0, 70, 100), 'P')]),\n\n Rule(Term('great', ['X', 'XA', '_', '_', '_']),\n [Term('dinner', ['X', '_', '_', '_', '_']),\n Term('hotel', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('popular', (400, 1500, 1500), 'P')]),\n\n Rule(Term('good', ['X', 'XA', '_', '_', '_']),\n [Term('dinner', ['X', '_', '_', '_', '_']),\n Term('hotel', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('common', (80, 200, 500), 'P')]),\n\n Rule(Term('poor', ['X', 'XA', '_', '_', '_']),\n [Term('dinner', ['X', '_', '_', '_', '_']),\n Term('hotel', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('unknown', (0, 70, 100), 'P')]),\n\n Rule(Term('great', ['X', 'XA', '_', '_', '_']),\n [Term('hotel', ['X', '_', '_', '_', '_']),\n Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('away', (1500, 2000, 3000), 'D'),\n FuzzyNumber('popular', (400, 1500, 1500), 'P')]),\n\n Rule(Term('good', ['X', 'XA', '_', '_', '_']),\n [Term('hotel', ['X', '_', '_', '_', '_']),\n Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('distant', (300, 1400, 2000), 'D'),\n FuzzyNumber('popular', (400, 1500, 1500), 'P')]),\n\n Rule(Term('poor', ['X', 'XA', '_', '_', '_']),\n [Term('hotel', ['X', '_', '_', '_', '_']),\n Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('popular', (400, 1500, 1500), 'P')]),\n\n Rule(Term('great', ['X', 'XA', '_', '_', '_']),\n [Term('coffee', ['X', '_', '_', '_', '_']),\n Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('popular', (400, 1500, 1500), 'P')]),\n\n Rule(Term('good', ['X', 'XA', '_', '_', '_']),\n [Term('coffee', ['X', '_', '_', '_', '_']),\n Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('common', (80, 200, 500), 'P')]),\n\n Rule(Term('poor', ['X', 'XA', '_', '_', '_']),\n [Term('coffee', ['X', '_', '_', '_', '_']),\n Term('landmark', ['Y', 'YA', '_', 'P', '_']),\n CalculateDistance('XA', 'YA', 'D'),\n FuzzyNumber('near', (0, 0, 400), 'D'),\n FuzzyNumber('unknown', (0, 70, 100), 'P')]),\n ]\n\n reasoner = Reasoner(knowledge_base3)\n\n query_results = reasoner.complex_query([Term('expensive', ['sheraton', 'pl. \"Sveta Nedelya\" 5, 1000 Sofia Center, Sofia', 354, 744, 4.2])])\n print('Query result:')\n\n for result in query_results:\n print(result)\n\n print()\n\n search_result = reasoner.complex_query( [Term('landmark', ['X', '_', 'C', 'P', '_']),\n FuzzyNumber('popular', (20, 500, 500), 'P'),\n FuzzyNumber('cheap', (0, 0, 5), 'C')])\n\n print('Search result:')\n for result in search_result:\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"GeorgiMateev/fuzzy-search","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9900,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"14427588330","text":"'''\n # @ Author: Gilberto Charles\n # @ Create Time: 2019-08-14 12:17:02\n # @ Modified by: Gilberto Charles\n # @ Modified time: 2019-08-14 12:17:33\n # @ Description: Main function to read and populate the Adjacency Matrix\n '''\n\n\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom Graph import AdjacencyMatrix\n\ndef main():\n ## Preenchimento da matriz\n qt_nodes = int(input(f\"Quantos vértices deseja adicionar? \"))\n qt_arestas = int(input(f\"Quantas arestas deseja adicionar? \"))\n matriz_adjacencia = np.zeros([qt_nodes, qt_nodes], dtype = int)\n for i in range(0, qt_arestas):\n aresta1 = int(input(f\"Entre com o primeiro número da aresta {i + 1}: \"))\n aresta2 = int(input(f\"Entre com o segundo número da aresta {i + 1}: \"))\n matriz_adjacencia[aresta1 -1 ,aresta2 - 1] = 1\n matriz_adjacencia[aresta2 - 1,aresta1 - 1] = 1\n print()\n ## Print da matriz para o terminal\n print('-='*20)\n print('Imprimindo a matriz completa')\n print(matriz_adjacencia)\n\n Object = AdjacencyMatrix(qt_nodes)\n Object.plot_graph_1(matriz_adjacencia, 'Matriz de Adjacencia')\nif __name__ == '__main__':\n main()\n","repo_name":"charlscript/adjacencyMatrixGraph","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69799459772","text":"# https://jokerldg.github.io/algorithm/2021/06/12/palindrome.html\n\nfrom sys import stdin\n\n\ndef ispalindrome(text, left, right):\n rev_text = text[::-1]\n if text == rev_text:\n return 0\n else:\n while left < right:\n # 만약 양쪽이 같지 않다면 left와 right를 이동한 상태에서 점검\n if text[left] != text[right]:\n left_pal = ispseudo(text, left + 1, right)\n right_pal = ispseudo(text, left, right - 1)\n\n if left_pal or right_pal:\n return 1\n else:\n return 2\n # 양쪽이 같을 경우 다음 알파벳으로 이동\n else:\n left += 1\n right -= 1\n\n\ndef ispseudo(text, left, right):\n while left < right:\n if text[left] == text[right]:\n left += 1\n right -= 1\n else:\n return False\n return True\n\n\nn = int(stdin.readline().strip())\n\nfor _ in range(n):\n text = input().strip()\n left, right = 0, len(text) - 1\n print(ispalindrome(text, left, right))\n","repo_name":"AvelChoi/Baekjoon_study","sub_path":"17609.py","file_name":"17609.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20742815252","text":"from django.shortcuts import render\n\nfrom .models import Person, MailList\n\n# Create your views here.\n\n\nMENU = [\n {'title': 'Home page', 'page': 'home'},\n {'title': 'About', 'page': 'about'},\n {'title': 'Contact', 'page': 'contact'}\n]\n\n\ndef home(request):\n TITLE, PAGE = MENU[0]['title'], MENU[0]['page']\n return render(request, f'{PAGE}.html', {'TITLE': TITLE, 'MENU': MENU})\n\ndef about(request):\n TITLE, PAGE = MENU[1]['title'], MENU[1]['page']\n ############\n # Database #\n ############\n PEOPLE = [{\n 'name': person.name,\n 'title': person.title,\n 'desc': person.descript,\n 'email': person.email,\n 'img': person.img\n } for person in Person.objects.all()]\n return render(request, f'{PAGE}.html', {'TITLE': TITLE, 'MENU': MENU, 'PEOPLE': PEOPLE})\n\ndef contact(request):\n if request.method == 'POST':\n fullname = request.POST['fullname']\n email = request.POST['email']\n ml = MailList(fullname=fullname, email=email)\n ml.save()\n print(f'{fullname} was added to the mailing list!')\n TITLE, PAGE = MENU[2]['title'], MENU[2]['page']\n return render(request, f'{PAGE}.html', {'TITLE': TITLE, 'MENU': MENU})\n","repo_name":"diegoinacio/basic-app-django","sub_path":"project/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74518126970","text":"def merge_sort(arr):\n left_index = 0\n right_index = 0\n\n if len(arr) < 2:\n return arr\n\n left = arr[0:(len(arr)//2)]\n right = arr[len(arr)//2:len(arr)]\n\n left = merge_sort(left)\n right = merge_sort(right)\n\n return merge(left, right, left_index, right_index)\n\ndef merge(left, right, left_index, right_index):\n results = []\n\n while len(left) < left_index and len(right) > right_index:\n if left[left_index] < right[right_index]:\n results.append(right[right_index])\n right_index += 1\n else:\n results.append(left[left_index])\n left_index += 1\n\n while len(left) > left_index:\n results.append(left[left_index])\n left_index += 1\n\n while len(right) > right_index:\n results.append(right[right_index])\n right_index += 1\n\n return results","repo_name":"cliffpham/algos_data_structures","sub_path":"algos/sorts/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43160901476","text":"import disnake \nfrom disnake.ext import commands\nfrom buttons.create_ticket import CreateTicketButton\n\nclass TicketCog(commands.Cog):\n def __init__(self,bot:commands.Bot):\n self.bot = bot\n print('Тикеты готовы')\n \n\n @commands.command(name='ticket_public')\n async def ticket_public(self,ctx):\n embed = disnake.Embed(\n title='Связь с персоналом',\n description='Чтобы связаться с персоналом нажмите на кнопку ниже\\n**Рофлотикеты наказуемы**',\n color=disnake.Color.from_rgb(101, 201, 180)\n )\n view = CreateTicketButton(bot=self.bot)\n await ctx.send(embed=embed,view=view)\n\n\n @commands.Cog.listener()\n async def on_ready(self):\n channel = await self.bot.fetch_channel(1126618053581484032)\n message = await channel.fetch_message(1126966106041634968)\n embed = disnake.Embed(\n title='Связь с персоналом',\n description='Чтобы связаться с персоналом нажмите на кнопку ниже\\n**Рофлотикеты наказуемы**',\n color=disnake.Color.from_rgb(101, 201, 180)\n )\n await message.edit(view=CreateTicketButton(self.bot),embed=embed)\n print('Всё готово,сообщение отредактировано')\n\n\ndef setup(bot):\n bot.add_cog(TicketCog(bot))\n","repo_name":"Dovolentoboy/Ticket_System","sub_path":"tickets/ticket.py","file_name":"ticket.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12850115904","text":"\"\"\"\nUtility functions for state functions\n\n.. versionadded:: 2018.3.0\n\"\"\"\n\n\nimport copy\n\nimport salt.state\nfrom salt.exceptions import CommandExecutionError\n\n_empty = object()\n\n\ndef gen_tag(low):\n \"\"\"\n Generate the running dict tag string from the low data structure\n \"\"\"\n return \"{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}\".format(low)\n\n\ndef search_onfail_requisites(sid, highstate):\n \"\"\"\n For a particular low chunk, search relevant onfail related states\n \"\"\"\n onfails = []\n if \"_|-\" in sid:\n st = salt.state.split_low_tag(sid)\n else:\n st = {\"__id__\": sid}\n for fstate, fchunks in highstate.items():\n if fstate == st[\"__id__\"]:\n continue\n else:\n for mod_, fchunk in fchunks.items():\n if not isinstance(mod_, str) or mod_.startswith(\"__\"):\n continue\n else:\n if not isinstance(fchunk, list):\n continue\n else:\n # bydefault onfail will fail, but you can\n # set onfail_stop: False to prevent the highstate\n # to stop if you handle it\n onfail_handled = False\n for fdata in fchunk:\n if not isinstance(fdata, dict):\n continue\n onfail_handled = fdata.get(\"onfail_stop\", True) is False\n if onfail_handled:\n break\n if not onfail_handled:\n continue\n for fdata in fchunk:\n if not isinstance(fdata, dict):\n continue\n for knob, fvalue in fdata.items():\n if knob != \"onfail\":\n continue\n for freqs in fvalue:\n for fmod, fid in freqs.items():\n if not (\n fid == st[\"__id__\"]\n and fmod == st.get(\"state\", fmod)\n ):\n continue\n onfails.append((fstate, mod_, fchunk))\n return onfails\n\n\ndef check_onfail_requisites(state_id, state_result, running, highstate):\n \"\"\"\n When a state fail and is part of a highstate, check\n if there is onfail requisites.\n When we find onfail requisites, we will consider the state failed\n only if at least one of those onfail requisites also failed\n\n Returns:\n\n True: if onfail handlers succeeded\n False: if one on those handler failed\n None: if the state does not have onfail requisites\n\n \"\"\"\n nret = None\n if state_id and state_result and highstate and isinstance(highstate, dict):\n onfails = search_onfail_requisites(state_id, highstate)\n if onfails:\n for handler in onfails:\n fstate, mod_, fchunk = handler\n for rstateid, rstate in running.items():\n if \"_|-\" in rstateid:\n st = salt.state.split_low_tag(rstateid)\n # in case of simple state, try to guess\n else:\n id_ = rstate.get(\"__id__\", rstateid)\n if not id_:\n raise ValueError(\"no state id\")\n st = {\"__id__\": id_, \"state\": mod_}\n if mod_ == st[\"state\"] and fstate == st[\"__id__\"]:\n ofresult = rstate.get(\"result\", _empty)\n if ofresult in [False, True]:\n nret = ofresult\n if ofresult is False:\n # as soon as we find an errored onfail, we stop\n break\n # consider that if we parsed onfailes without changing\n # the ret, that we have failed\n if nret is None:\n nret = False\n return nret\n\n\ndef check_result(running, recurse=False, highstate=None):\n \"\"\"\n Check the total return value of the run and determine if the running\n dict has any issues\n \"\"\"\n if not isinstance(running, dict):\n return False\n\n if not running:\n return False\n\n ret = True\n for state_id, state_result in running.items():\n expected_type = dict\n # The __extend__ state is a list\n if \"__extend__\" == state_id:\n expected_type = list\n if not recurse and not isinstance(state_result, expected_type):\n ret = False\n if ret and isinstance(state_result, dict):\n result = state_result.get(\"result\", _empty)\n if result is False:\n ret = False\n # only override return value if we are not already failed\n elif result is _empty and isinstance(state_result, dict) and ret:\n ret = check_result(state_result, recurse=True, highstate=highstate)\n # if we detect a fail, check for onfail requisites\n if not ret:\n # ret can be None in case of no onfail reqs, recast it to bool\n ret = bool(\n check_onfail_requisites(state_id, state_result, running, highstate)\n )\n # return as soon as we got a failure\n if not ret:\n break\n return ret\n\n\ndef merge_subreturn(original_return, sub_return, subkey=None):\n \"\"\"\n Update an existing state return (`original_return`) in place\n with another state return (`sub_return`), i.e. for a subresource.\n\n Returns:\n dict: The updated state return.\n\n The existing state return does not need to have all the required fields,\n as this is meant to be called from the internals of a state function,\n but any existing data will be kept and respected.\n\n It is important after using this function to check the return value\n to see if it is False, in which case the main state should return.\n Prefer to check `_ret['result']` instead of `ret['result']`,\n as the latter field may not yet be populated.\n\n Code Example:\n\n .. code-block:: python\n\n def state_func(name, config, alarm=None):\n ret = {'name': name, 'comment': '', 'changes': {}}\n if alarm:\n _ret = __states__['subresource.managed'](alarm)\n __utils__['state.merge_subreturn'](ret, _ret)\n if _ret['result'] is False:\n return ret\n \"\"\"\n if not subkey:\n subkey = sub_return[\"name\"]\n\n if sub_return[\"result\"] is False:\n # True or None stay the same\n original_return[\"result\"] = sub_return[\"result\"]\n\n sub_comment = sub_return[\"comment\"]\n if not isinstance(sub_comment, list):\n sub_comment = [sub_comment]\n original_return.setdefault(\"comment\", [])\n if isinstance(original_return[\"comment\"], list):\n original_return[\"comment\"].extend(sub_comment)\n else:\n if original_return[\"comment\"]:\n # Skip for empty original comments\n original_return[\"comment\"] += \"\\n\"\n original_return[\"comment\"] += \"\\n\".join(sub_comment)\n\n if sub_return[\"changes\"]: # changes always exists\n original_return.setdefault(\"changes\", {})\n original_return[\"changes\"][subkey] = sub_return[\"changes\"]\n\n return original_return\n\n\ndef get_sls_opts(opts, **kwargs):\n \"\"\"\n Return a copy of the opts for use, optionally load a local config on top\n \"\"\"\n opts = copy.deepcopy(opts)\n\n if \"localconfig\" in kwargs:\n return salt.config.minion_config(kwargs[\"localconfig\"], defaults=opts)\n\n if \"saltenv\" in kwargs:\n saltenv = kwargs[\"saltenv\"]\n if saltenv is not None:\n if not isinstance(saltenv, str):\n saltenv = str(saltenv)\n if opts[\"lock_saltenv\"] and saltenv != opts[\"saltenv\"]:\n raise CommandExecutionError(\n \"lock_saltenv is enabled, saltenv cannot be changed\"\n )\n opts[\"saltenv\"] = kwargs[\"saltenv\"]\n\n if \"pillarenv\" in kwargs or opts.get(\"pillarenv_from_saltenv\", False):\n pillarenv = kwargs.get(\"pillarenv\") or kwargs.get(\"saltenv\")\n if pillarenv is not None and not isinstance(pillarenv, str):\n opts[\"pillarenv\"] = str(pillarenv)\n else:\n opts[\"pillarenv\"] = pillarenv\n\n return opts\n","repo_name":"saltstack/salt","sub_path":"salt/utils/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":8630,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"70421114492","text":"from DataLoader.Helper.Helper_Global2Local import Global2Local\nimport threading\nfrom Common.CommonClasses import *\nfrom DataLoader.DataLoader0_ReadAnns import DataLoader0_ReadAnns\nfrom DataLoader.DataVis import *\nfrom DataLoader.Helper.Helper_TargetUnpacker import *\nfrom Common.Calculation import Calculation\nimport time\n\nclass DataLoader1_ReadAll():\n def __init__(self, start=0, N=1000):\n self.beginAt = start\n self.totalN = N#self.anns.N\n self.initHelper()\n\n def initHelper(self):\n self.anns = DataLoader0_ReadAnns()\n self.conv_g2l = Global2Local()\n self.dataLabel = np.zeros(\n (self.totalN, GridParams().numGridX, GridParams().numGridY, GridParams().numBBox, GridParams().dimFeat),\n dtype=float)\n self.imgList = np.zeros((self.totalN, 448, 448, 3), dtype=float)\n\n def getDataLabelFromTo(self, start, partN):\n end, N = getEnd(start, partN, self.totalN)\n for i in range(start, end):\n img, objIds, isMoreThanOneObjPerGrid, counter, label = self.anns.getTargetAt(i + self.beginAt)\n self.imgList[i] = img\n self.dataLabel[i,:] = label\n # if np.mod(i, 100) == 0:\n # print(i)\n print(\"Done Reading imgs from %d to %d\" %(start, end))\n\n def getDataLable(self):\n print(\"Allocating threads to read imgs\")\n partN = 500\n #nThread = int(self.anns.N/partN) + 1\n nThread = getNumThread(self.totalN, partN)\n #print(nThread)\n threads=[]\n for i in range(0, nThread):\n start = i*partN\n threads.append(threading.Thread(target=self.getDataLabelFromTo, args=(start, partN)))\n threads[i].start()\n #print(i)\n\n for thread in threads:\n thread.join()\n\nif __name__ == '__main__':\n\n\n r = DataLoader0_ReadAnns()\n visG = Visualizer_Global()\n unpacker = TargetUnpacker()\n c = Calculation()\n\n reader = DataLoader1_ReadAll(1000, 1000)\n s = time.time()\n reader.getDataLable()\n print(time.time() - s)\n\n index = 150\n img = reader.imgList[index].copy()\n label = reader.dataLabel[index]\n objIds, offset, bb = unpacker.unpackLabel(label)\n print(label.shape)\n fakebbs = np.ones_like(bb) * 5 + bb\n iou = c.getIOU(fakebbs, bb)\n img = visG.drawBBox(img, fakebbs, YOLOObjects().getNamesFromObjIds(objIds))\n img = visG.drawBBox(img, bb, YOLOObjects().getNamesFromObjIds(objIds))\n visG.showImg(img)\n","repo_name":"ElliotHYLee/MyYOLO","sub_path":"DataLoader/DataLoader1_ReadAll.py","file_name":"DataLoader1_ReadAll.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41633979334","text":"import numpy as np\nimport Bees\nimport random\nimport copy\nfrom Structure import *\n\n\nclass ABC_algorithm():\n # artificial bee colony algorithm \n \n def __init__(self, Demands_amount, Demands, Stations_amount, Stations, Blocks_amount, Blocks, Employed_bees_num, Onlooker_bees_num, Max_improvement_try, Pc, Pm, K_Tournomet_Percent, Percedure_Type):\n self.demands_amount = Demands_amount\n self.demands = Demands\n self.stations_amount = Stations_amount\n self.stations = Stations\n self.blocks_amount = Blocks_amount\n self.blocks = Blocks\n self.employed_bees_num = Employed_bees_num\n self.onlooker_bees_num = Onlooker_bees_num\n self.max_improvement_try = Max_improvement_try\n self.crossover_probbility = Pc\n self.mutation_probblity = Pm/self.demands_amount\n self.percedure_type = Percedure_Type\n self.k_tournoment = int(K_Tournomet_Percent*self.employed_bees_num)\n \n\n def employed_bees(self, population):\n # making initial random answers (equal to amount of employed bees number)\n # do the improvement-try once on each of them\n # return the made answers\n \n if(len(population) == 0):\n for i in range(self.employed_bees_num):\n bee = self._making_bee()\n population.append(bee)\n \n # we try for improvement one time for each bee, if change happens we add one to improvement-try property of that bee\n for bee in population:\n change_flag = self._try_for_improvement(population, bee)\n if(change_flag): \n bee.improvement_try = 0\n else: \n bee.improvement_try += 1\n \n def _making_bee(self):\n # each bee is a (amount of demands * amount of blocks) matrix\n \n bee = Bees.Bee(self.demands, self.blocks)\n\n data = []\n for demand in self.demands:\n demand_answer = self._make_demand_answer(demand)\n data.append(demand_answer)\n \n bee.data = data\n return bee\n \n def _make_demand_answer(self, demand):\n data = [0 for i in range(self.blocks_amount)]\n destination_flag = False\n\n # finding the first cell\n choosing_options = []\n for b_indx in range(self.blocks_amount):\n if ((demand.origin == self.blocks[b_indx].origin) and \n (demand.destination >= self.blocks[b_indx].destination)):\n choosing_options.append(b_indx)\n choosed_index = random.choice(choosing_options)\n data[choosed_index] = 1\n \n if(demand.destination == self.blocks[choosed_index].destination):\n destination_flag = True\n \n while(destination_flag == False):\n choosing_options = []\n for b_indx in range(self.blocks_amount):\n if ((self.blocks[choosed_index].destination == self.blocks[b_indx].origin) and \n (demand.destination >= self.blocks[b_indx].destination)):\n choosing_options.append(b_indx)\n \n if(len(choosing_options)==0):\n print(\"we are in trouble!!! in (make_demand_answer)\") \n \n choosed_index = random.choice(choosing_options)\n data[choosed_index] = 1\n \n if(demand.destination == self.blocks[choosed_index].destination):\n destination_flag = True\n \n return data \n\n def _validality_check(self, bee):\n \n feasiblity_flag = True\n\n block_limits_check = [0 for i in range(self.stations_amount)]\n vagon_limits_check = [0 for i in range(self.stations_amount)]\n \n checked_blocks = [0 for i in range(self.blocks_amount)]\n\n for demand_solution in range(len(bee.data)):\n for b in range(self.blocks_amount):\n if(feasiblity_flag):\n if (bee.data[demand_solution][b]==1):\n o = self.blocks[b].origin\n d = self.blocks[b].destination\n if(checked_blocks[b]!=1):\n checked_blocks[b] = 1\n block_limits_check[o] += 1\n vagon_limits_check[d] += self.demands[demand_solution].volume\n if(block_limits_check[o]>self.stations[o].block_capacity):\n feasiblity_flag = False\n if(vagon_limits_check[d]>self.stations[d].vagon_capacity):\n feasiblity_flag = False\n bee.feasiblity = feasiblity_flag\n return feasiblity_flag\n \n def onlooker_bees(self, population):\n # by rolette wheel precedure we do \"onlooker_bees_num\" times cross_over and mutation,\n # on solution that employed bees have made\n \n for bee in population:\n if(bee.fitness == None):\n Bees._calculating_fitness(bee, self.blocks, self.demands)\n \n sum_of_fitnesses = sum([bee.fitness for bee in population])\n \n for i in range(self.onlooker_bees_num):\n\n if(self.percedure_type == \"Roulette Wheel\"):\n # selecting the bee by roulette wheel\n bee = self._roulette_wheel(population, sum_of_fitnesses)\n elif(self.percedure_type == \"Tournoment\"): \n # sele a bee by tournoment procedure\n bee = self._tournoment(population)\n \n # we try for improvement one time for each bee, if change happens we add one to improvement-try property of that bee\n change_flag = self._try_for_improvement(population, bee)\n if(change_flag): \n bee.improvement_try = 0\n else: \n bee.improvement_try += 1\n \n def scout_bees(self, population):\n delete_bees = []\n new_bees = []\n for bee in population:\n if(bee.improvement_try >= self.max_improvement_try):\n delete_bees.append(bee)\n new_bees.append(self._making_bee())\n for i in range(len(delete_bees)):\n population.remove(delete_bees[i])\n population.append(new_bees[i])\n \n def _try_for_improvement(self, population, bee):\n # we do the cross over and mutation here\n # we also return that if the process made any changes or not\n \n change_flag = False\n new_bee = copy.deepcopy(bee)\n \n # doing the cross over on selected bee and a neighbor (that will be handled in _cross_over)\n self._cross_over_one_point(population, new_bee)\n \n # doing the mutation on selected bee\n self._mutation(new_bee) \n\n validality_flag_current_bee = self._validality_check(bee)\n validality_flag_new_bee = self._validality_check(new_bee)\n \n if(validality_flag_current_bee == False):\n # we need to set the new feasiblity and the new fitness\n \n bee.data = new_bee.data\n bee.feasiblity = new_bee.feasiblity\n Bees._calculating_fitness(bee, self.blocks, self.demands)\n\n change_flag = True\n \n elif(validality_flag_current_bee == True and validality_flag_new_bee == True):\n # validality_flag_current_bee is true here\n \n # since the feasiblities are both true we do not need to set it again\n # we need to set the new fitness\n improvement_flag = self._improvement_check(bee, new_bee)\n \n if(improvement_flag):\n bee.data = new_bee.data\n bee.fitness = new_bee.fitness\n Bees._calculating_fitness(bee, self.blocks, self.demands)\n \n change_flag = True\n \n return change_flag \n \n def _tournoment(self, population):\n \n tournoment_list = []\n for i in range(self.k_tournoment):\n tournoment_list.append(random.choice(population))\n \n max_Fitness = -100000\n max_Bee = None\n for bee in tournoment_list:\n if(bee.fitness > max_Fitness):\n max_Fitness = bee.fitness\n max_Bee = bee\n return max_Bee\n \n def _roulette_wheel(self, population, sum_of_fitnesses):\n \n # choose a random number for selecting our bee \n pick = random.uniform(0, sum_of_fitnesses)\n \n # selecting our bee by the \"pick\" number and roulette wheel procedure\n current = 0\n for bee in population:\n current += bee.fitness\n if current >= pick:\n return bee \n \n def _cross_over_one_point(self, population, bee):\n # for each answer that employed bees have made, we select a radom neighbor\n # for each answer we also select a random position, and it replaced with its neighbors pos\n # if the changed answer be better than the previous one and it be valid, it will change\n # we also return that if the cross-over has done a change or not\n \n x = random.random()\n\n if(x<=self.crossover_probbility):\n term_pos = random.randint(1, self.demands_amount-1)\n neighbor_bee = random.choice(population)\n self.replace_terms(bee, neighbor_bee, term_pos)\n \n def replace_terms(self, bee, neighbor_bee, random_pos):\n # in here we change parts of our choromosome base on choosed term\n \n data = []\n for i in range(0, random_pos):\n data.append(bee.data[i])\n for j in range(random_pos, self.demands_amount):\n data.append(neighbor_bee.data[j])\n \n bee.data = data\n \n def _mutation(self, bee):\n # for each answer that employed bees have made, we select a random position and we change it with 0 or 1 (randomly)\n # only if the changed answer be better than the previous one and it be valid, it will change\n # we also return that if the muatation has done a change or not\n \n for i in range(self.demands_amount): \n x = random.random()\n if(x<=self.mutation_probblity):\n bee.data[i] = self._make_demand_answer(self.demands[i])\n \n def _improvement_check(self, current_bee, new_bee):\n # checking that the new bee (changed bee by cross_over or mutation) has imporoved or not\n \n Bees._calculating_fitness(current_bee, self.blocks, self.demands)\n Bees._calculating_fitness(new_bee, self.blocks, self.demands)\n return True if new_bee.fitness>current_bee.fitness else False\n \n def finding_best_bee(self, population):\n # finding the best solution, with best fitness\n # the answer must be feasible\n \n best_fitness = -1000000\n best_bee = None\n for bee in population:\n # if(bee.fitness == None):\n Bees._calculating_fitness(bee, self.blocks, self.demands)\n if((bee.feasiblity==True) and (bee.fitness>best_fitness)):\n best_fitness = bee.fitness\n best_bee = bee\n\n return best_bee, best_fitness\n \n def validality_amount(self, population):\n invalid_amount = 0\n for i in population:\n if (i.feasiblity == False):\n invalid_amount += 1\n population_amount = len(population)\n valid_amount = population_amount - invalid_amount\n print(f\"amount of invalid data: {invalid_amount}\") \n print(f\"amount of valid data: {valid_amount}\") \n print(f\"total population: {population_amount}\") \n","repo_name":"niusha-yaghini/Artificial_Bee_Colony","sub_path":"Artificial_Bee_Colony.py","file_name":"Artificial_Bee_Colony.py","file_ext":"py","file_size_in_byte":11966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69870996411","text":"import sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn import ensemble\nimport joblib\n\ndef load_model():\n\tmean = np.load('best_mean.npy')\n\tstd = np.load('best_std.npy')\n\tmodel = joblib.load('best_model.joblib')\n\treturn (mean , std , model)\n\ndef load_data():\n\twith open(sys.argv[1] , 'r') as file:\n\t\ttest_x = np.array([line.split(',') for line in file])\n\ttest_x = test_x[1 : ].astype(np.float)\n\n\treturn test_x\n\ndef normalize(test_x , mean , std):\n\ttest_x = (test_x - mean) / std\n\treturn test_x\n\ndef predict(test_x , model):\n\ttest_y = model.predict(test_x)\n\treturn test_y\n\ndef dump(test_y):\n\tnumber_of_data = test_y.shape[0]\n\tdf = pd.DataFrame({'id' : np.arange(1 , number_of_data + 1) , 'label' : test_y})\n\tdf.to_csv(sys.argv[2] , index = False)\n\treturn\n\ndef main():\n\t(mean , std , model) = load_model()\n\ttest_x = load_data()\n\ttest_x = normalize(test_x , mean , std)\n\n\ttest_y = predict(test_x , model)\n\n\tdump(test_y)\n\n\treturn\n\nif (__name__ == '__main__'):\n\tmain()","repo_name":"jnfem112/ML2019FALL","sub_path":"hw2/best_test.py","file_name":"best_test.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"18063015861","text":"# Function that calculates the average and total rainfall. The program will ask the user for the number of years of data they wish to enter.\n\nnumYears = int(input(\"Please enter the number of years you wish to enter rainfall data for: \"));\nwhile(numYears < 0):\n numYears = int(input(\"The number of years cannot be negative. Please enter the number of years you wish to enter rainfall data for: \"));\ntotalRain = 0;\navgRain = 0;\nrainStat = 0;\n\nfor x in range(1,numYears+1, 1):\n for(y) in range(1,13,1):\n rainStat = int(input(\"Please enter the amount of rain for month \" + str(y) + \":\"));\n while(rainStat < 0):\n rainStat = int(input(\"Rain amount cannot be negative. Please enter the amount of rain for month \" + str(y) + \":\"));\n totalRain += rainStat;\n print(\"The total rain fall for year\", x,\"is\", totalRain);\n print(\"The average rainfall per month is\", format((totalRain/12), '.2f') + \" inches\");\n totalRain = 0;\n\n","repo_name":"Ryandalion/Python","sub_path":"Repetition Structures/Average Rainfall/Average Rainfall/Average_Rainfall.py","file_name":"Average_Rainfall.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4636655515","text":"\n\n\n\nclass Solution:\n\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: inty\n \"\"\"\n if len(nums) < 2:\n return sum(nums)\n #case 1 : skip 1st home\n # dp1 =[0, 0]\n c, p = 0,0\n for num in nums[1:]:\n c, p = max(p+num, c), c\n # dp1.append( max (dp1[-2] + num, dp1[-1]))\n # case 2 : skip last home\n # dp2 = [0, 0]\n c1, p1 = 0,0\n for num in nums[:-1]:\n c1, p1 = max(p1+num, c1), c1\n # dp2.append(max(dp2[-2] + num, dp2[-1]))\n return max(c1, c)\n # return max(dp1[-1], dp2[-1])\n\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.rob([2,3,2]))\n\n","repo_name":"3to80/Algorithm","sub_path":"LeetCode/Leet#213/Leet#213.py","file_name":"Leet#213.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25150253885","text":"\n# 재귀\n\n\nN, M = map(int, input().split())\n# 1 <= M <= N <= 8\nans = []\n# ans에 들어가면 True로 바꿔서 판별\na = [False] * (N + 1)\n\ndef NM():\n if len(ans) == M:\n print(*ans)\n return\n for i in range(1, N + 1):\n # True이면 건너뛰어서 중복안되게\n if a[i]:\n continue\n # ans에 넣고 True로 바꾸기\n ans.append(i)\n a[i] = True\n # 재귀\n NM()\n # ans에 넣은 후에 빼고 False로 바꾸기\n ans.pop()\n a[i] = False\n\nNM()","repo_name":"jhkim9028/TIL","sub_path":"Baekjoon/15649.py","file_name":"15649.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72057764412","text":"aantalCroissantjes = input(\"hoeveel croissantjes?:\")\naantalCroissantjes = int(aantalCroissantjes)\n\n\ntotaalcroissantjes = aantalCroissantjes * 0.39\nprint(totaalcroissantjes, \"euro\")\n\naantalStokbroden = input(\"hoeveel stokbroden?:\")\naantalStokbroden = int(aantalStokbroden)\n\ntotaalstokbroden = aantalStokbroden * 2.78\nprint(totaalstokbroden, \"euro\")\n\n\ntotaalprijs = totaalstokbroden + totaalcroissantjes - 1.50\n\nprint(\"‘De feestlunch kost je bij de bakker\", totaalprijs ,\"euro voor de 17 croissantjes en de 2 stokbroden als de 3 kortingsbonnen nog geldig zijn!’\")\n\n\n\n","repo_name":"shainy27/leren_programmeren_23","sub_path":"leerroutes/leren_programmeren/hello_python/croisantjes.py","file_name":"croisantjes.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17794150365","text":"import csv, os, re\nimport pandas as pd\n\ncd = os.getcwd()\ntext_files = os.path.join(cd,'txt/')\n\ndef extract_txt(fname):\n with open(text_files + fname,'r') as f:\n lines = f.read().splitlines()\n lines = [l for l in lines if l is not '']\n if len(lines)>1:\n park = lines[0].split(' - ')[0]\n idx = [lines.index(j) for j in lines if park in j]\n slicers = create_splits(idx)\n\n #Every Race in File\n races = [lines[x] for x in slicers]\n for race in races:\n extract_race_data(race)\n\ndef extract_race_data(race):\n last_raced_header = [race.index(x) for x in race if 'Last Raced' in x]\n if len(last_raced_header)>0:\n lrc_idx = last_raced_header[0]+1\n num_horses = 0\n for x in race[lrc_idx:]:\n if RepresentsInt(x[0]) or '---' in x:\n num_horses+=1\n else:\n n = num_horses\n num_horses=0\n break\n break_out_info(race,n)\n\ndef break_out_info(race,n):\n previous_races = breakout_last_races(race,n)\n horse_names = breakout_horse_names(race,n)\n print (race)\n\ndef breakout_last_races(race, n):\n lr_dict = {'Last Raced':race.index(x) for x in race if x in 'Last Raced'}\n start = (lr_dict['Last Raced'])+1\n end = start+n\n return race[slice(start,end)]\n\ndef breakout_horse_names(race, n):\n lr_dict = {'x':race.index(x) for x in race if x in 'Pgm Horse Name (Jockey)'}\n start = (lr_dict['x'])+1\n end = start+(n*2)\n pend = race[slice(start,end)]\n tmp = [x for x in pend if not any(word in x for word in ['Start','Past Performance Running Line Preview','Str'])]\n tmp =[x for x in tmp if not (RepresentsInt(x[0]) and len(x)>2)]\n return tmp\n\ndef create_splits(idx):\n #Zipper to add length\n splits = [idx[i:i + 2] for i in range(0, len(idx),1)]\n splits[-1].append(0)\n slicers = [slice(a[0],int(a[1])-1) for a in splits]\n return slicers\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\nif __name__=='__main__':\n for fn in os.listdir(text_files):\n extract_txt(fn)\n","repo_name":"PharmyOf1/HorseRacingExploration","sub_path":"txt_to_data.py","file_name":"txt_to_data.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43859930248","text":"from Celery_study.celery import app\nfrom .models import Contact\nfrom .service import send\n\n\n@app.task\ndef send_email(user_email):\n send(user_email)\n\n\n@app.task\ndef task_two():\n for contact in Contact.objects.all():\n send(contact.email)\n\n\n@app.task\ndef task_three(a, b):\n return a * b\n\n\n@app.task(\n bind=True,\n default_retry_delay=5 * 60, # время через которое нужно запускать (в секундах)\n)\ndef my_task_retry(self, x, y):\n try:\n return x + y\n except Exception as exc: # если исключение, то перезапуск\n raise self.retry(\n exc=exc,\n countdown=60 # время через которое перезапустится\n )\n\n# отложенный запуск настраивается через аргумент my_task.apply_async((args),countdown=60)\n\n# my_task.apply_async((args), link=my_task.s(20)) # таска отработает и вызовет еще раз сама себя с помощью аргумента \\\n# link, далее результат первого выполнения передастся в первый аргумент.\n#\n# data = my_task.delay(3) # задача вызывается с помощью .delay\n# data.status # статус выполнения задачи\n# data.id # идентефикатор задачи\n# data.get() # результат задачи\n# from celery.result import AsyncResult\n# res = AsyncResult(data.id) # async result\n# res.status # async status\n","repo_name":"Acidastro/Celery_study","sub_path":"send_email/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"40447339765","text":"\nfrom concurrent.futures import process\nfrom flask import Flask, request\nfrom google.transit import gtfs_realtime_pb2\nimport requests as r \nimport mta_plugins as mta\nimport psutil\nimport os\n\napplication = Flask(__name__)\n\n@application.route('/stats')\ndef stat():\n cpu = str(psutil.cpu_percent()) + '%'\n memory = psutil.virtual_memory()\n # Divide from Bytes -> KB -> MB\n available = round(memory.available/1024.0/1024.0,1)\n total = round(memory.total/1024.0/1024.0,1)\n mem_stat = str(available) + 'MB free / ' + str(total) + 'MB total ( ' + str(memory.percent) + '% )'\n disk = psutil.disk_usage('/')\n free = round(disk.free/1024.0/1024.0/1024.0,1)\n total = round(disk.total/1024.0/1024.0/1024.0,1)\n disk_stat = str(free) + 'GB free / ' + str(total) + 'GB total ( ' + str(disk.percent) + '% )'\n return f\"CPU usage:{cpu} | Memory usage:{mem_stat} | Disk usage: {disk_stat}\"\n\n@application.route('/')\ndef index():\n args = request.args\n lat = float(args.get('Latitude'))\n long = float(args.get('Longitude'))\n feed = gtfs_realtime_pb2.FeedMessage()\n resp = r.get('https://api-endpoint.mta.info/Dataservice/mtagtfsfeeds/nyct%2Fgtfs-jz',headers={\"x-api-key\":os.getenv(\"APIKEY\")})\n try:\n feed.ParseFromString(resp.content)\n station = mta.find_closest_station(long,lat)\n trains = mta.filter_results(feed,station)\n print(trains)\n return {\"trains\":trains}\n # return station\n except :\n return \"Oops! That was no valid data. Try again..\"\n\nif __name__ == '__main__':\n application.run()\n","repo_name":"Joshimmor/MTAServiceAPI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35081774306","text":"# coding=utf-8\n__author__ = '刘影'\nfrom selenium import webdriver\nfrom common.webObject import webObject\nfrom common.log import Log\nimport HTMLTestRunner\nlog = Log()\n\nclass checkAssert:\n def __init__(self, webObject):\n self.webObject = webObject\n\n def checkText(self,idpro,value,checkvar,expectvar):\n el = self.webObject.testEle(idpro,value)\n try:\n if \"text\" == checkvar:\n s1 = el.text\n if s1 == expectvar:\n log.info(\"检查文本,根据对象{0}[{1}],与期望值[{2}]一致\".format(idpro,value,expectvar))\n else:\n log.error(\"检查文本,根据对象{0}[{1}],与期望值[{2}]不一致\".format(idpro, value,expectvar))\n raise AssertionError\n else:\n s1 = el.get_attribute(expectvar)\n if s1 == expectvar:\n print('预期值与期望值一致')\n else:\n print('预期值与期望值不一致')\n except AssertionError as e:\n log.error(\"根据对象{0}页面中未能找到{1}元素,检查属性失败\".format(idpro,value))\n raise e\n\n\n\n","repo_name":"dream61/UIDemoPython","sub_path":"common/CheckAssert.py","file_name":"CheckAssert.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"69811179773","text":"#!/usr/bin/python\nimport numpy as np\nimport cv2\nimport sys\nimport argparse\nfrom multi_band_blending import multi_band_blending\nfrom stitch import stitch\n\n\ndef buildmap_pgm(pgm_addr):\n pgm = open(pgm_addr)\n lines = pgm.readlines()\n Wd = int(lines[2].split(' ')[0])\n Hd = int(lines[2].split(' ')[1])\n result_map = np.zeros((Hd, Wd), np.float32)\n for y in range(4, 4 + Hd):\n locs = lines[y].split(' ')\n for x in range(Wd):\n result_map.itemset((y - 4, x), int(locs[x]))\n return result_map\n\n\ndef buildmap(Ws, Hs, Wd, Hd, hfovd=160.0, vfovd=160.0):\n # Build the fisheye mapping\n map_x = np.zeros((Hd, Wd), np.float32)\n map_y = np.zeros((Hd, Wd), np.float32)\n vfov = (vfovd / 180.0) * np.pi\n hfov = (hfovd / 180.0) * np.pi\n vstart = ((180.0 - vfovd) / 180.00) * np.pi / 2.0\n hstart = ((180.0 - hfovd) / 180.00) * np.pi / 2.0\n count = 0\n # need to scale to changed range from our\n # smaller cirlce traced by the fov\n xmax = np.cos(hstart)\n xmin = np.cos(hstart + hfov)\n xscale = xmax - xmin\n xoff = xscale / 2.0\n zmax = np.cos(vstart)\n zmin = np.cos(vfov + vstart)\n zscale = zmax - zmin\n zoff = zscale / 2.0\n # Fill in the map, this is slow but\n # we could probably speed it up\n # since we only calc it once, whatever\n for y in range(0, int(Hd)):\n for x in range(0, int(Wd)):\n count = count + 1\n phi = vstart + (vfov * (float(y) / float(Hd)))\n theta = hstart + (hfov * (float(x) / float(Wd)))\n xp = (np.sin(phi) * np.cos(theta) + xoff) / xscale\n zp = (np.cos(phi) + zoff) / zscale\n xS = Ws - (xp * Ws)\n yS = Hs - (zp * Hs)\n map_x.itemset((y, x), int(xS))\n map_y.itemset((y, x), int(yS))\n\n return map_x, map_y\n\n\ndef rotate(img, theta):\n M = cv2.getRotationMatrix2D((640, 640), theta, 1)\n result = cv2.warpAffine(img, M, (1280, 1280))\n return result\n\n\ndef pad(img, pxs, flags):\n l = img[:, :1280, :]\n r = img[:, 1280:, :]\n\n l = cv2.copyMakeBorder(l, pxs, pxs, pxs, pxs, flags)\n r = cv2.copyMakeBorder(r, pxs, pxs, pxs, pxs, flags)\n\n result = np.append(l, r, axis=1)\n result = cv2.resize(result, (2560, 1280))\n return result\n\n\ndef y_align(img, pxs):\n l = img[:, :1280, :]\n r = img[:, 1280:, :]\n\n region_l = l[1280 - pxs:, :, :]\n region_r = r[:pxs, :, :]\n region_l = cv2.flip(region_l, -1)\n region_r = cv2.flip(region_r, -1)\n\n l_result = np.append(region_r, l[:1280 - pxs, :, :], axis=0)\n r_result = np.append(r[pxs:, :, :], region_l, axis=0)\n result = np.append(l_result, r_result, axis=1)\n\n return result\n\n\ndef pivot_smooth(img, shape, wd, flags):\n pivot_m1 = img[:, 1280 - wd:1279 + wd, :]\n pivot_l = img[:, :wd, :]\n pivot_r = img[:, 2560 - wd:, :]\n pivot_m2 = np.append(pivot_r, pivot_l, axis=1)\n if flags:\n pivot_m1 = cv2.GaussianBlur(pivot_m1, shape, 0)\n pivot_m2 = cv2.GaussianBlur(pivot_m2, shape, 0)\n else:\n pivot_m1 = cv2.blur(pivot_m1, shape)\n pivot_m2 = cv2.blur(pivot_m2, shape)\n\n result = np.copy(img)\n result[:, 1280 - wd:1279 + wd, :] = pivot_m1\n result[:, :wd, :] = pivot_m2[:, wd:, :]\n result[:, 2560 - wd:, :] = pivot_m2[:, :wd, :]\n return result\n\n\ndef pivot_stitch(img, wd):\n # Stitch the area in between\n D = stitch(img[:, 1280 - wd:1280], img[:, 1280:1280 + wd], sigma=15.0)\n\n # Warp backwards\n pt1 = np.dot(D['H'], [wd, 400, 1])\n pt3 = np.dot(D['H'], [wd, 800, 1])\n pt1 = pt1 / pt1[2]\n pt3 = pt3 / pt3[2]\n src = np.zeros((4, 2), np.float32)\n dst = np.zeros((4, 2), np.float32)\n src[0] = [0, 0]\n src[1] = pt1[:2]\n src[2] = [0, 1280]\n src[3] = pt3[:2]\n dst = np.array(src)\n dst[1] = [2 * wd - 1, 400]\n dst[3] = [2 * wd - 1, 800]\n\n result = np.copy(img)\n M = cv2.getPerspectiveTransform(src, dst)\n result[:, 1280 - wd:1280 +\n wd] = cv2.warpPerspective(D['res'], M, (2 * wd, 1280))\n result[:, 1280 - wd:1280 + wd] = D['res']\n return result\n\n\ndef main(input, output):\n cap = cv2.VideoCapture(input)\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(output, fourcc, 30.0, (2560, 1280))\n\n # Obtain xmap and ymap\n xmap = buildmap_pgm(\n './RemapFilter/Samsung Gear 2560x1280/xmap_samsung_gear_2560x1280.pgm')\n ymap = buildmap_pgm(\n './RemapFilter/Samsung Gear 2560x1280/ymap_samsung_gear_2560x1280.pgm')\n #xmap, ymap = buildmap(2560, 1280, 2560, 1280)\n\n # Perform remap for each frame\n while(cap.isOpened()):\n ret, frame = cap.read()\n if ret == True:\n # Fisheye rotation\n frame = np.append(frame[:, :1280, :], rotate(\n frame[:, 1280:, :], -0.5), axis=1)\n\n # Fisheye padding\n frame = pad(frame, 10, cv2.BORDER_REFLECT_101)\n\n # Remapping, fisheye -> equirectangular\n frame = cv2.remap(frame, xmap, ymap, cv2.INTER_LINEAR)\n\n # Vertical alignment\n frame = y_align(frame, 3)\n\n # Stitching\n #frame = pivot_stitch(frame, 200)\n\n # Pivot smoothing / blending\n #frame = pivot_smooth(frame, (10, 10), 10, False)\n frame = cv2.resize(multi_band_blending(\n frame[:, :1280, :], frame[:, 1280:, :], overlap_w=20, sigma=2.0), (2560, 1280))\n frame = frame.astype(np.uint8)\n\n ratio = 1.02\n frame[:, 1280:] = cv2.resize(frame[:, 1280:], (1280, int(\n 1280 * ratio)))[int(1280 * (ratio - 1) / 2):int(1280 * (ratio - 1) / 2) + 1280, :]\n\n # Write the remapped frame\n out.write(frame)\n\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n\n # Release everything if job is finished\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser(\n description=\"A summer research project to seamlessly stitch dual-fisheye video into 360-degree videos\")\n ap.add_argument('input', metavar='INPUT.XYZ',\n help=\"path to the input dual fisheye video\")\n ap.add_argument('-o', '--output', metavar='OUTPUT.XYZ', required=False, default='output.MP4',\n help=\"path to the output equirectangular video\")\n\n args = vars(ap.parse_args())\n main(args['input'], args['output'])\n","repo_name":"cynricfu/dual-fisheye-video-stitching","sub_path":"waste/.demo.py","file_name":".demo.py","file_ext":"py","file_size_in_byte":6587,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"78"} +{"seq_id":"72922838331","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTakes a list of integers, for example this one:\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nand prints out a list with all the integers of the list\nthat are less than a number given by the user\nand another list with the remaining integers.\n\nIf an incorrect number is given,\nuser has a second chance to enter the number.\n\n@author: Barbora Doslikova\n\"\"\"\n \nsample = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n\ndef listSorter(sampleList, compareInt):\n correctOutput = []\n incorrectOutput = []\n \n for i in sampleList:\n if i < compareInt:\n correctOutput.append(i)\n else:\n incorrectOutput.append(i) \n print(correctOutput, incorrectOutput)\n\ntry:\n comparator = int(input(\"Give a whole number: \"))\nexcept ValueError:\n comparator = int(input(\"Give a *whole number*: \"))\n\nlistSorter(sample, comparator)\n\n","repo_name":"BarboraDoslikova/sort-lists","sub_path":"sort-lists.py","file_name":"sort-lists.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35966797690","text":"import copy\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\n# max flow\ndef push(u, v, f, e, c, graf, h, H, overflow):\n d = min(e[u], c[u][v] - f[u][v])\n f[u][v] += d\n f[v][u] -= d\n e[u] -= d\n if f[u][v] != 0 and u not in graf[v]: #test6 out of range graf have max idx 98 but v is 99\n graf[v].append(u)\n if e[u] == 0:\n overflow[H].remove(u)\n if not overflow[H]:\n H = newH(H, overflow)# перерасчет наивысшей\n if c[u][v] == f[u][v]:\n graf[u].remove(v)\n if e[v] == 0 and v != n - 1 and v != 0 and v not in overflow[h[v]]:\n overflow[h[v]].append(v)\n if h[v] > H:\n H = h[v]\n e[v] += d\n return H\n\n\ndef newH(H, overflow):\n for i in range(H - 1, -1, -1):\n if overflow[i]:\n return i\n return -1\n\n \ndef key_min(u, h, c, f, v):\n if c[u][v] - f[u][v]> 0 and h[v] >= h[u]:\n return h[v]\n else:\n return 10000000\n\n\ndef lift(u, h, c, f, graf, H, overflow):\n min_h = min(graf[u], key = lambda k: key_min(u, h, c, f, k))\n overflow[h[u]].remove(u)\n h[u] = h[min_h] + 1\n if len(overflow) <= h[u]:\n overflow.append([u])\n H = h[u]\n else:\n overflow[h[u]].append(u)\n if H < h[u]:\n H = h[u]\n return H\n\n\ndef global_r_opt(c, h, H, overflow, graf0, graf1):\n listT = [[n - 1]]\n listPoints = [True for i in range(n)]\n listPoints[n - 1] = False\n level = 0\n temp = [0]\n while temp:\n temp = []\n for u in listT[level]:\n for v in graf1[u]:\n if listPoints[v]:\n if c[v][u] > 0:\n temp.append(v)\n if h[v] < len(listT):\n if e[v] > 0:\n overflow[h[v]].remove(v)\n overflow[len(listT)].append(v)\n h[v] = len(listT)\n if H < h[v] and e[v] >= 0:\n H = h[v]\n listPoints[v] = False\n level += 1\n if temp:\n listT.append(temp)\n listS = [[0]] # переделать через изначальный граф, сохранить граф\n temp = [0]\n level = 0\n listPoints[0] = False\n while temp:\n temp = []\n for u in listS[level]:\n for v in graf0[u]:\n if listPoints[v]:\n if c[u][v] > 0:\n temp.append(v)\n if h[v] < len(listS):\n if e[v] > 0:\n overflow[h[v]].remove(v)\n overflow[len(listS)].append(v)\n h[v] = len(listS)\n if H < h[v] and e[v] > 0:\n H = h[v]\n listPoints[v] = False\n level += 1\n if temp:\n listS.append(temp)\n return H\n\n\ndef count_e(graf, e, c, f):\n for v in graf[0]:\n f[0][v] = c[0][v]\n f[v][0] = -c[0][v]\n e[v] = f[0][v]\n # overflow.append(v)\n if(v != 0):\n graf[v].append(0) # out of range when 1 edge\n graf[0] = []\n\n\ndef c_for_opt(c, f):\n c_new = copy.deepcopy(c)\n for i in range(n):\n for j in range(n):\n if f[i][j] > 0:\n c_new[i][j] = c[i][j] - f[i][j]\n return c_new\n\n\n#основная формула для сегментации \ndef edge_weight(C_p,C_q):\n number = -(C_p-C_q)*(C_p-C_q)/(2 * sigma * sigma)\n return np.exp(number)\n\n#считываем изображение, строим из него граф, подаём на вход. Попавшие в min cut вершины - чёрные, остальные - белые. \nimage = Image.open(\"ceramic-gr-100.jpg\").convert('L') #Открываем изображение. \ndraw = ImageDraw.Draw(image) #Создаем инструмент для рисования. \nwidth = image.size[0] #Определяем ширину. \nheight = image.size[1] #Определяем высоту. \t\npix = image.load() #Выгружаем значения пикселей.\nmatrix = np.asarray(image)\nintence_vals = matrix.ravel()\n# lambda and sigma \nsigma = 10\n\n#0 - чёрный цвет, 255 - белый\n#matrix2[(elem-1) % width][(elem-1) // height]\n\nvertex_count = len(intence_vals)\nflow_matrix = np.zeros((vertex_count + 2, vertex_count + 2))\nedge_array = []\n\n\n#пользовательский ввод:\nprint(\"Введите число вершин фона \\n\")\nbcg_pixel_num = int(input())\n# гистограмы (или частота встречаемости интенсивности пикселя) фона:\nhist_bcg = np.zeros(256)\n\nbcg = set() #вершины фона\nprint(\"Введите координаты пикселей фона через пробел \\n\")\n\nfor i in range (bcg_pixel_num):\n x, y = map(int,input().split())\n x-=1\n y-=1\n bcg.add(y * width + x + 1)\n hist_bcg[matrix[y][x]] += 1 # прибавляем число вхождений пикселя заднонной интенсивности на 1 \n\n#!!! в matrix почему-то инвертированы размерноси по сравнению с size[0] и size[1]\nprint(\"Введите число вершин объекта \\n\")\nobj = set()\nobj_pixel_num = int(input())\n# гистограма (или частота встречаемости интенсивности пикселя) объекта:\nhist_obj = np.zeros(256)\nprint(\"Введите координаты пикселей объекта через пробел \\n\")\nfor i in range (obj_pixel_num):\n x, y = map(int,input().split())\n x-=1\n y-=1\n obj.add(y * width + x + 1)\n hist_obj[matrix[y][x]] += 1 # прибавляем число вхождений пикселя заднонной интенсивности на 1 \n\nvertex_set = set()\nfor i in range(flow_matrix.shape[0]):\n vertex_set.add(i)\n\nvertex_set = vertex_set ^ bcg ^ obj # вершины, не лежащие в объекте и фоне\n\nlam = 0.001\n\n#special edges:\nfor pixel in bcg:\n flow_matrix[0, pixel] = 0\n flow_matrix[pixel, flow_matrix.shape[0] - 1] = 100000\n\nfor pixel in obj:\n flow_matrix[0,pixel] = 100000\n flow_matrix[pixel, flow_matrix.shape[0] - 1] = 0\n\n#histogram and lambda edges:\nfor pixel in vertex_set:\n if pixel != 0 and pixel != flow_matrix.shape[0] - 1:\n flow_matrix[0,pixel] = - lam * np.log(hist_bcg[intence_vals[pixel - 1]]/bcg_pixel_num + 1e-4) # частота встречаемости / кол-во = вероятность\n flow_matrix[pixel, flow_matrix.shape[0] - 1] = - lam * np.log(hist_obj[intence_vals[pixel - 1]]/obj_pixel_num + 1e-4)# * ln(Hist_obj[pixel])\n\n#horizontal edges:\nfor i in range(0 , height - 1):\n for j in range(0 , width - 1):\n if i < height - 1:\n weight = edge_weight(int(matrix[i][j]),int(matrix[i + 1][j])) \n flow_matrix[i * width + j + 1][(i + 1) * width + j + 1] = weight\n if j < width - 1:\n weight = edge_weight(int(matrix[i][j]),int(matrix[i][j + 1])) \n flow_matrix[i * width + j + 1][i * width + j + 1 + 1] = weight\n if i > 0:\n weight = edge_weight(int(matrix[i][j]),int(matrix[i - 1][j])) \n flow_matrix[i * width + j + 1][(i - 1) * width + j + 1 + 1] = weight\n if j > 0:\n weight = edge_weight(int(matrix[i][j]),int(matrix[i][j - 1])) \n flow_matrix[i * width + j + 1][i * width + (j - 1) + 1 + 1] = weight\n#graph fillina template \n\n#print(flow_matrix)\nfor i in range (vertex_count + 2):\n if(len(np.argwhere(flow_matrix[i]))>0):\n edge_list = np.hstack(np.argwhere(flow_matrix[i])).tolist()\n else:\n edge_list = []\n edge_array.append(edge_list)\nn = vertex_count + 2 # число вершин\ngraf = edge_array\ngraf0 = copy.deepcopy(graf) # сохраним исходный граф\ngraf1 = [[] for i in range(n)] # сохраняем граф в обратном виде\nindex = 0\nfor u in graf:\n for v in u:\n graf1[v].append(index)\n index += 1\n# заполнение за линейное время при вводе\n# написать ввод\n\n\nc = flow_matrix\nf = [[0 for i in range(n)] for j in range(n)]\ne = [0 for i in range(n)] # массив размерности n, при заполнении\nh = [0 for i in range(n)]\nh[0] = n\noverflow = [[] for i in range(n + 1)] # храним список вершин с высотой по индексу\noverflow[0].extend(graf[0]) # после оптимизации изменить\nif n-1 in overflow[0]:\n overflow[0].remove(n-1)\ncount_e(graf, e, c, f) # проталкивание в смежные с истоком\nH = global_r_opt(c, h, -1, overflow, graf0, graf1) # первая оптимизация\n\nm = 50 # частота оптимизации\ncount = 0\n# overflow.sort(key = lambda k: h[k], reverse = True)\n# отсортировать по высоте от максимума к минимуму\n#print(graf)\n#print('c =', c)\n#print('f =', f)\n#print('e =', e)\n#print('h =', h)\n#print('H =', H)\n#print(overflow)\n\n\nwhile H >= 0:\n if(len(overflow[H])>0):\n u = overflow[H][0] \n else: \n H = H-1\n continue\n v = n\n for vi in graf[u]:\n if h[vi] + 1 == h[u] and c[u][vi] - f[u][vi] > 0:\n v = vi\n break\n if v < n:\n H = push(u, v, f, e, c, graf, h, H, overflow)\n else:\n H = lift(u, h, c, f, graf, H, overflow)\n #count += 1\n #if count > m:\n # count = 0\n # H = global_r_opt(c_for_opt(c, f), h, H, overflow, graf0, graf1)\n\n#print('Ответ:', e[n - 1])\n#print(graf)\nvisited = set() # Set to keep track of visited nodes.\n\ndef dfs(visited, graph, node):\n if node not in visited:\n #print (node)\n visited.add(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)\n\ndfs(visited, graf, 0)\n\nmatrix2 = matrix.copy()\n\nfor i in range(height):\n for j in range(width):\n matrix2[i][j] = 0\nfor elem in visited:\n if elem != 0 and elem < width * height:\n matrix2[(elem - 1) // width][(elem - 1) % width] = 255 \nresult = Image.fromarray(matrix2)\nresult.save('our-banana.jpg') ","repo_name":"vladislav3112/image-segmentation","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":10334,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4642091942","text":"import sys\nsys.path.append('../P1')\n\nimport matplotlib.pyplot as plt\nimport pylab as pl\nimport numpy as np\nimport pandas as pd\nimport sys\nfrom random import randint\n\nimport loadParametersP1 as P1P\nimport loadFittingDataP1 as P1F\n\n# This code is garbage\n# Need better ways of organizing plots next time\n\ndef getData(ifPlotData=True):\n # load the fitting data and (optionally) plot out for examination\n # return the X and Y as a tuple\n\n data = pl.loadtxt('curvefittingp2.txt')\n\n X = data[0,:]\n Y = data[1,:]\n\n if ifPlotData:\n plt.plot(X,Y,'o')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n return (X,Y)\n\ndef polynomial_basis(X, m):\n # X is a 1D array\n matrix = np.empty((len(X), m+1))\n for i in range(len(X)):\n for j in range(m+1):\n matrix[i][j] = X[i]**j\n return matrix\n\ndef cosine_basis(X, m):\n # X is a 1D array\n matrix = np.empty((len(X), m))\n for i in range(len(X)):\n for j in range(m):\n matrix[i][j] = np.cos(X[i] * (j+1) * np.pi)\n return matrix\n\ndef eval_poly(X, coeffs):\n coeffs = coeffs.flatten()\n X = X.flatten()\n Y = np.empty(shape=(len(X),))\n\n for i in range(len(X)):\n Y[i] = sum([coeffs[n] * (X[i] ** n) for n in range(len(coeffs))])\n return Y\n\ndef eval_cos(X, coeffs):\n coeffs = coeffs.flatten()\n X = X.flatten()\n Y = np.empty(shape=(len(X),))\n\n for i in range(len(X)):\n Y[i] = sum([coeffs[j] * (np.cos(X[i] * (j+1) * np.pi)) \\\n for j in range(len(coeffs))])\n return Y\n\ndef eval_actual(x):\n return np.cos(np.pi * x) + np.cos(2 * np.pi * x)\n\ndef beta_closed_form(X, Y):\n # X, Y are 2D arrays\n return (np.linalg.inv(X.T @ X) @ X.T @ Y.reshape(-1, 1))\n\ndef sum_sq_err(X, Y, beta):\n return np.sum((np.reshape(Y, (-1, 1)) \\\n - X @ np.reshape(beta, (-1, 1)))**2)\n\ndef sse_gradient(X, Y, beta):\n # X, Y are 2D arrays\n # gradient = -2[X]^T[Y] + 2[X]^T[X][\\beta]\n # returns 2D array\n return -2 * X.T @ Y + 2 * X.T @ X @ beta\n\ndef get_obj_func(X, Y):\n def obj(beta):\n return sum_sq_err(X, Y, beta)\n return obj\n\ndef get_batch_grad_func(X, Y):\n def grad(beta):\n return sse_gradient(X, Y, beta)\n return grad\n\n# global i\n# i = 0\ndef get_sgd_grad_func(X, Y):\n def grad(beta):\n # global i\n # i = (i+1) % X.shape[1]\n i = randint(0, X.shape[1] - 1)\n return sse_gradient(X[i].reshape(1, -1), Y[i].reshape(1, -1), beta)\n return grad\n\ndef part_one(save=True, plot=True):\n X, Y = getData(False)\n M = [0,1,3,10]\n solutions = {}\n\n for i, m in enumerate(M):\n X_poly = polynomial_basis(X, m)\n beta = beta_closed_form(X_poly, Y)\n solutions[m] = beta\n\n if plot:\n x_values = np.linspace(0, 1, 100)\n y_values = eval_poly(x_values, beta)\n actual_y_values = eval_actual(x_values)\n \n plt.figure(1, figsize=(3*len(M), 3))\n plt.subplot(1, len(M), i+1)\n plt.plot(X, Y, 'ro', label='data')\n plt.plot(x_values, y_values, label='poly fit')\n plt.plot(x_values, actual_y_values, label='source')\n plt.axis([0,1,-3,3])\n plt.gca().set_aspect(0.17, adjustable='box')\n plt.title('Polynomial Fit (M = '+str(m)+')')\n plt.tight_layout()\n # plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n\n if i == 0:\n plt.legend()\n\n if plot and save:\n plt.savefig('figs/part_1.png')\n\n return solutions\n\ndef part_two(beta=None, diff=1e-8):\n if beta is None:\n beta = np.zeros((1, 1))\n\n M = beta.shape[0] - 1\n X, Y = getData(False)\n X = polynomial_basis(X, M).reshape(-1, M+1)\n Y = Y.reshape(-1, 1)\n\n stepSize = 1\n sq_errs = []\n\n closed_form_grad = sse_gradient(X, Y, beta)\n numerical_grad = P1P.centralDifferences(get_obj_func(X, Y), stepSize)(beta)\n err = np.linalg.norm(closed_form_grad - numerical_grad)\n assert err < diff\n sq_errs.append(err)\n\n return closed_form_grad, np.array(numerical_grad), sq_errs\n\ndef part_three_bgd(eta=1e-2, threshold=1e-8, start=None, M=[0,1,3,10], \\\n save=False, plot=True, plot_source=True):\n # bgd \n # param = None, 'eta', 'epsilon'\n X, Y = getData(False)\n X = X.reshape(-1, 1)\n Y = Y.reshape(-1, 1)\n\n solutions = {}\n for i, m in enumerate(M):\n X_poly = polynomial_basis(X, m)\n obj = get_obj_func(X_poly, Y)\n bgd = get_batch_grad_func(X_poly, Y)\n\n if start is None:\n seed = np.zeros((X_poly.shape[1], 1))\n else:\n seed = start\n\n # calling gradient descent from P1\n beta = P1P.gradientDescent(bgd, seed, eta, \\\n (obj, threshold, 'objective'), [])\n solutions[m] = beta\n\n if plot:\n x_values = np.linspace(0, 1, 100)\n y_values = eval_poly(x_values, beta)\n actual_y_values = eval_actual(x_values)\n \n plt.figure(3, figsize=(12, 3))\n plt.subplot(1, 5, i+1)\n\n if plot_source:\n plt.plot(X, Y, 'ro', label='data')\n plt.plot(x_values, actual_y_values, label='source')\n\n plt.plot(x_values, y_values, label='bgd (η='+str(eta)+')')\n plt.axis([0,1,-3,3])\n plt.gca().set_aspect(0.17, adjustable='box')\n plt.title('Batch GD (M = '+str(m)+')')\n plt.tight_layout()\n # plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n\n if i == 0:\n plt.legend()\n\n if plot and save:\n plt.savefig('figs/part_3_bgd.png')\n\n return solutions\n\ndef part_three_sgd(eta=1e-2, threshold=1e-8, start=None, M = [0,1,3,10],\\\n save=False, plot=True, plot_source=True):\n # sgd \n X, Y = getData(False)\n X = X.reshape(-1, 1)\n Y = Y.reshape(-1, 1)\n\n solutions = {}\n for i, m in enumerate(M):\n X_poly = polynomial_basis(X, m)\n obj = get_obj_func(X_poly, Y)\n sgd = get_sgd_grad_func(X_poly, Y)\n\n if start is None:\n seed = np.zeros((X_poly.shape[1], 1))\n else:\n seed = start\n\n # calling gradient descent from P1\n beta = P1P.gradientDescent(sgd, seed, eta, \\\n (obj, threshold, 'objective'), [])\n solutions[m] = beta\n\n if plot:\n x_values = np.linspace(0, 1, 100)\n y_values = eval_poly(x_values, beta)\n actual_y_values = eval_actual(x_values)\n\n plt.figure(3, figsize=(12, 3))\n plt.subplot(1, 5, i+3)\n\n if plot_source:\n plt.plot(X, Y, 'ro', label='data')\n plt.plot(x_values, actual_y_values, label='source')\n\n plt.plot(x_values, y_values, label='sgd (η='+str(eta)+')')\n plt.axis([0,1,-3,3])\n plt.gca().set_aspect(0.17, adjustable='box')\n plt.title('Stochastic GD (M = '+str(m)+')')\n plt.tight_layout()\n # plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n\n if i == 0:\n plt.legend()\n\n if plot and save:\n plt.savefig('figs/part_3_sgd.png')\n\n return solutions\n\ndef part_three(save=True, plot=True):\n return part_three_bgd(save=save, plot=plot), \\\n part_three_sgd(save=save, plot=plot)\n\ndef part_four(save=True, plot=True):\n X, Y = getData(False)\n M = [1,2,4,8]\n solutions = {}\n\n for i, m in enumerate(M):\n x_cos = cosine_basis(X, m)\n beta = beta_closed_form(x_cos, Y)\n solutions[m] = beta\n\n if plot:\n x_values = np.linspace(0, 1, 100)\n y_values = eval_cos(x_values, beta)\n actual_y_values = eval_actual(x_values)\n\n plt.figure(4, figsize=(3 * len(M), 3))\n plt.subplot(1, len(M), i+1)\n plt.plot(X, Y, 'ro', label='data')\n plt.plot(x_values, y_values, label='cos fit')\n plt.plot(x_values, actual_y_values, label='source')\n plt.axis([0,1,-3,3])\n plt.gca().set_aspect(0.17, adjustable='box')\n plt.title('Cosine Basis (M = '+str(m)+')')\n plt.tight_layout()\n # plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n\n if i == 0:\n plt.legend()\n\n if plot and save:\n plt.savefig('figs/part_4.png')\n\n return solutions\n\ndef generate_results(write_to_file=False, \n save=[True,True,True],\n plot=[True,True,True]):\n try:\n if write_to_file:\n sys.stdout = open('results.txt', 'w')\n\n part_1 = part_one(save[0], plot[0])\n part_2 = part_two()\n part_3 = part_three(save[1], plot[1])\n part_4 = part_four(save[2], plot[2])\n\n print('Part 1: Coefficients to polynomial basis regression:\\n{}\\n'.format(\n part_1))\n print('Part 2: Closed-form gradient vs. numerical gradient:\\n{}\\n'.format(\n part_2))\n print('Part 3: Batch GD vs. Stochastic GD\\n{}\\n'.format(part_3))\n print('Part 4: Coefficients to cosine basis regression:\\n{}\\n'.format(\n part_4))\n finally:\n sys.stdout.close()\n\ndef main():\n X, Y = getData(False)\n M = [2,6]\n t=1e-20\n start=None\n # start=np.array([-2,10,-2,10]).reshape((-1,1))\n\n # p2_test_1 = np.array([100,2,4,5,6,2]).reshape(-1,1)\n # p2_test_2 = np.array([0,2,20,34,-20,5,6,-398]).reshape(-1,1)\n # part_one()\n # print(part_two(p2_test_1))\n # print(part_two(p2_test_2))\n\n part_three_bgd(eta=1e-4, start=start, M=M, threshold=t, plot_source=True)\n # part_three_bgd(eta=1e-3, start=start, M=M, threshold=t, plot_source=False)\n part_three_bgd(eta=0.01, start=start, M=M, threshold=t, plot_source=True)\n # part_three_bgd(eta=0.05, start=start, M=M, threshold=t, plot_source=False)\n\n # part_three_sgd(eta=0.002, start=start, M=M, threshold=t, plot_source=True)\n # part_three_sgd(eta=0.01, start=start, M=M, threshold=t, plot_source=False)\n # part_three_sgd(eta=0.02, start=start, M=M, threshold=t, plot_source=False)\n # part_three_sgd(eta=0.2, start=start, M=M, threshold=t, plot_source=False)\n\n # plt.figure(900).savefig('figs/part_3_b_s.png')\n\n # plt.figure(2).savefig('figs/part_3_bgd_etas.png')\n # plt.figure(3).savefig('figs/part_3_sgd_etas.png')\n\n # part_four()\n # generate_results(save=[False,False,False], plot=[False,False,False])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"andy-kwei/2017-Fall-6.867","sub_path":"pset1/P2/loadFittingDataP2.py","file_name":"loadFittingDataP2.py","file_ext":"py","file_size_in_byte":10552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43549845751","text":"import os\nimport time\nimport codecs\nimport array as arr\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nclass GetData:\n def __init__(self):\n\t\n self.driverpath = 'driver/chromedriver.exe'\n \n self.chrome_options = webdriver.ChromeOptions()\n self.chrome_options.add_argument(\"--incognito\")\n\n self.driver = webdriver.Chrome(chrome_options=self.chrome_options, executable_path=self.driverpath)\n self.driver.set_window_size(800, 600)\n self.info = {}\n\n def build(self, info):\n self.info = info\n\n def process(self):\n self.driver.get(self.info['URL'][1])\n self.driver.execute_script(\"window.stop();\")\n\n time.sleep(2)\n\n for u in self.info:\n if u == 'URL' or u == '':\n continue;\n\n elementName = self.info[u][0]\n data = self.info[u][1]\n\n if u[0] == 'C':\n try:\n self.Item = self.driver.find_element_by_xpath(\"//div[contains(text(), '\" + elementName + \"')]\")\n Itemid = self.Item.get_attribute(\"id\");\n\n self.Item = self.driver.find_element_by_xpath(\"//div[@aria-labelledby='\" + Itemid + \"']\")\n self.Item = self.Item.find_element_by_xpath(\".//div[@data-value='\" + data + \"']\")\n\n self.Item.click();\n self.Item.click();\n except NoSuchElementException:\n continue\n \n elif u[0] == 'B':\n try:\n self.Item = self.driver.find_element_by_xpath(\"//div[contains(text(), '\" + elementName + \"')]\")\n Itemid = self.Item.get_attribute(\"id\")\n\n self.Item = self.driver.find_element_by_xpath(\"//div[@aria-labelledby='\" + Itemid + \"']\")\n self.Item = self.Item.find_element_by_xpath(\".//div[@data-answer-value='\" + data + \"']\")\n self.Item.click();\n except NoSuchElementException:\n continue\n else:\n try:\n self.Item = self.driver.find_element_by_xpath(\"//div[contains(text(), '\" + elementName + \"')]\")\n #while len(self.Item.find_elements_by_xpath(\"//input[@type='text']\")) == 0:\n #self.Item = self.Item.find_element_by_xpath(\"./..\"); \n #self.Item = self.Item.find_element_by_xpath(\"./..\"); \n #self.Item = self.Item.find_element_by_xpath(\"./..\"); \n #self.Item = self.Item.find_element_by_xpath(\"./..\"); \n\n Itemid = self.Item.get_attribute(\"id\")\n\n self.Item = self.Item.find_element_by_xpath(\"//input[@aria-labelledby='\" + Itemid + \"']\");\n self.Item.send_keys(str(data));\n except NoSuchElementException:\n continue\n\n self.LastButton = self.driver.find_element_by_xpath(\"//*[@id='mG61Hd']/div[2]/div/div[3]/div[1]/div/div/span/span\")\n self.LastButton.click();\n\n self.driver.quit()\n\n\n \n ","repo_name":"RLukas2/Selenium_Testing","sub_path":"Bot_testing.py","file_name":"Bot_testing.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5760082841","text":"# -*- coding: utf-8 -*-\n\nfrom typing import List, Dict\nfrom werkzeug.exceptions import BadRequest\n\nfrom ex_dataclass import ex_dataclass, field, EXpack\n\nfrom ..domain.split.val_obj import BillMatcher, CompositePolicy, SubPolicy, CommonConfig\nfrom ..domain.split.entity import SplitRule\nfrom ..domain.report.entity import ReportBill\nfrom ..domain.report.service import \\\n ServiceTypeConsumptionTrend, BusinessDataPoint, ServiceTypeDataPoint, TypeLevel1DataPoint\n\nimport common.static as const\nimport common.util.decimal as decimal\n\n\n@ex_dataclass\nclass ReportOverview:\n total: float = field(required=True)\n bills: List[ReportBill] = field(required=True)\n\n\n@ex_dataclass\nclass ReportPlots:\n consumption_trend: List[ServiceTypeConsumptionTrend] = field(required=True)\n business_distribution: List[BusinessDataPoint] = field(required=True)\n service_type_distribution: List[ServiceTypeDataPoint] = field(required=True)\n type_level_1_distribution: List[TypeLevel1DataPoint] = field(required=True)\n\n\n@ex_dataclass\nclass ReportDetails(EXpack):\n overview: ReportOverview = field(required=True)\n plots: ReportPlots = field(required=True)\n\n def asdict(self) -> Dict:\n res = super().asdict()\n bill_dict_list = []\n for bill in self.overview.bills:\n bill_dict_list.append(bill.to_dict(excludes=['type', 'parent_id']))\n res['overview']['bills'] = bill_dict_list\n return res\n\n\n@ex_dataclass\nclass SimpleSplitRule(EXpack):\n contract_id: str = field(default=None)\n provider_name: str = field(default=None)\n bill_subject_name: str = field(default=None)\n service_type: str = field(default=None)\n service_name: str = field(default=None)\n service_details: str = field(default=None)\n desc: str = field(default=None)\n tag1: str = field(default=None)\n tag2: str = field(default=None)\n tag3: str = field(default=None)\n tag4: str = field(default=None)\n tag5: str = field(default=None)\n split_policy: str = field(default=None)\n\n def to_split_rule(self) -> SplitRule:\n rule = SplitRule.create()\n rule.bill_matchers = self.__build_bill_matcher().asdict()\n rule.split_policy = self.__build_split_policy().asdict()\n rule.desc = self.desc\n return rule\n\n def __build_bill_matcher(self) -> BillMatcher:\n dict_data = self.asdict()\n dict_data.pop('split_policy')\n bm = BillMatcher(**dict((k, v) for (k, v) in dict_data.items() if (v is not None and v != '')))\n return bm\n\n def __build_split_policy(self) -> CompositePolicy:\n \"\"\" 按约定的格式解析split_policy,返回CompositePolicy\n\n 1. 每一行表示一条分摊策略\n 2. 条目中,若数值字符串不包含小数点,则表示策略类型为固定数值;否则表示按比例\n\n 业务1: 200000\n 业务2: 0.8\n 业务3: 0.1\n 运维: 0.1\n\n \"\"\"\n text = self.split_policy.strip()\n\n policies: List[SubPolicy] = []\n\n for line in text.split('\\n'):\n items = line.split(':')\n\n if len(items) != 2:\n raise BadRequest(f'解析分摊策略失败,原因:行{line}不符合约定的格式(e.g. \"业务1: 200000\", \"业务2: 0.8\")。')\n\n business = items[0].strip()\n value_or_percent = items[1].strip()\n\n policy = self.__build_sub_policy_or_raise(value_or_percent, business)\n\n policies.append(policy)\n\n cp = CompositePolicy(type=const.SPLIT_TYPE_COMP, configs=policies)\n\n return cp\n\n @classmethod\n def __build_sub_policy_or_raise(cls, sub_policy_str: str, business: str) -> SubPolicy:\n \"\"\" 根据给定的字符串生成SubPolicy对象\n\n :param sub_policy_str: 可能是以下三种情况\n\n 1. 固定值类型子策略:xxxxx.xx\n 2. 按比例类型子策略:xx.xx%\n 3. 都不是\n \"\"\"\n value_or_percent = sub_policy_str\n\n policy = SubPolicy(configs=CommonConfig(business=business))\n\n if cls.__is_fixed_value_str(value_or_percent):\n\n decimal_value = cls.__convert_fixed_value_str_2_decimal_or_raise(value_or_percent)\n policy.type = const.SPLIT_TYPE_FIXED\n policy.configs.value = decimal_value\n\n elif cls.__is_percent_str(value_or_percent):\n\n decimal_value = cls.__convert_percent_str_2_decimal_or_raise(value_or_percent)\n policy.type = const.SPLIT_TYPE_PROP\n policy.configs.percent = decimal_value\n\n else:\n\n cls.__raise_decode_sub_policy_failed(sub_policy_str)\n\n return policy\n\n @classmethod\n def __is_fixed_value_str(cls, value_or_percent_str: str) -> bool:\n return '%' not in value_or_percent_str\n\n @classmethod\n def __is_percent_str(cls, value_or_percent_str: str) -> bool:\n return '%' in value_or_percent_str\n\n @classmethod\n def __convert_fixed_value_str_2_decimal_or_raise(cls, fixed_value_str: str) -> decimal.Decimal:\n try:\n return decimal.Decimal(fixed_value_str)\n except decimal.DecimalException:\n cls.__raise_decode_sub_policy_failed(fixed_value_str)\n\n @classmethod\n def __convert_percent_str_2_decimal_or_raise(cls, percent_str: str) -> decimal.Decimal:\n try:\n return decimal.Decimal(percent_str[:-1]) / decimal.Decimal('100.00')\n except decimal.DecimalException:\n cls.__raise_decode_sub_policy_failed(percent_str)\n\n @classmethod\n def __raise_decode_sub_policy_failed(cls, sub_policy_str: str):\n raise BadRequest(f'解析分摊策略失败,原因:解析{sub_policy_str}固定数值或比例失败。')\n","repo_name":"Ethan-zhengyw/mvc_ddd","sub_path":"service/controls/apps/dto.py","file_name":"dto.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10979610895","text":"from .linalg import as_array\n\ndef get_data(filename, delimiter=','):\n with open(filename) as f:\n CDC = [line.strip().split(delimiter)[1] for i, line in enumerate(f)]\n return as_array(CDC)\n\ndef check_bounds(x):\n if x < 0: x = 0.0\n if x > 1: x = 1.0\n return x\n\ndef read_params(file_path):\n params = {}\n with open(file_path) as f:\n for line in f:\n key, data, data_type = line.rstrip().split('\\t')\n if data_type == 'string':\n pass\n elif data_type == 'int':\n data = int(data)\n elif data_type in ['float', 'double']:\n data = float(data)\n elif data_type == 'boolean':\n data = True if data == 'true' else False\n elif data_type == 'list of string':\n data = data.split(',')\n elif data_type == 'list of int': \n data = map(int, data.split(','))\n else:\n print('This line is ignored')\n\n params[key] = data\n \n return params\n \n \n","repo_name":"weipeng/pyepi","sub_path":"common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31020057160","text":"\"\"\"\nCreated on May 22, 2018\n\n@author: Moritz\n\"\"\"\n\nimport numpy as np\n\n\ndef sample_identity_node(node, n_samples, rand_gen=None, ranges=None):\n\n if ranges is None or ranges[node.scope[0]] is None:\n return rand_gen.choice(node.vals, n_samples)\n else:\n # Generate bins for the specified range\n rang = ranges[node.scope[0]]\n\n # Iterate over the specified ranges\n intervals = rang.get_ranges()\n probs = np.zeros(len(intervals))\n bin_vals = []\n for i, interval in enumerate(intervals):\n\n if len(interval) == 1:\n lower = np.searchsorted(node.vals, interval[0], side=\"left\")\n higher = np.searchsorted(node.vals, interval[0], side=\"right\")\n else:\n lower = np.searchsorted(node.vals, interval[0], side=\"left\")\n higher = np.searchsorted(node.vals, interval[1], side=\"right\")\n\n probs[i] = (higher - lower) / len(node.vals)\n bin_vals.append(node.vals[lower:higher])\n\n probs /= np.sum(probs)\n\n # samples = []\n # choices = np.arange(len(bin_vals))\n # while len(samples) < n_samples:\n # rand_bin = rand_gen.choice(choices, p=probs)\n # bin_val = bin_vals[rand_bin]\n # samples.append(rand_gen.choice(bin_val))\n\n insts = probs * n_samples\n insts = np.round(insts)\n\n samples = []\n for i, inst in enumerate(insts):\n samples = np.concatenate([samples, rand_gen.choice(bin_vals[i], int(inst))])\n rand_gen.shuffle(samples)\n\n return samples\n","repo_name":"SPFlow/SPFlow","sub_path":"src/spn/experiments/AQP/leaves/identity/SamplingRange.py","file_name":"SamplingRange.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":269,"dataset":"github-code","pt":"78"} +{"seq_id":"35758804128","text":"\"\"\"Module of the XGC1 loader for regerating general plots using ADIOS2\nSome parts are taken from Michael's xgc.py which is taken from Loic's load_XGC_local for BES.\nIt reads the data from the simulation especially 1D results and other small data output.\n\nTODO\n3D data are loaded only when it is specified.\n\"\"\"\n\nimport numpy as np\nimport os\nfrom matplotlib.tri import Triangulation\nimport adios2\nimport matplotlib.pyplot as plt\nfrom scipy.io import matlab\nfrom scipy.optimize import curve_fit\nfrom scipy.special import erfc\nimport scipy.sparse as sp\n\n\nclass xgc1(object):\n \n def __init__(self):\n \"\"\" \n initialize it from the current directory.\n not doing much thing. \n \"\"\" \n self.path=os.getcwd()+'/'\n \n\n def load_unitsm(self):\n \"\"\"\n read in units file\n \"\"\"\n self.unit_file = self.path+'units.m'\n self.unit_dic = self.load_m(self.unit_file) #actual reading routine\n self.psix=self.unit_dic['psi_x']\n self.eq_x_r = self.unit_dic['eq_x_r']\n self.eq_x_z = self.unit_dic['eq_x_z']\n self.eq_axis_r = self.unit_dic['eq_axis_r']\n self.eq_axis_z = self.unit_dic['eq_axis_z']\n self.eq_axis_b = self.unit_dic['eq_axis_b']\n self.sml_dt = self.unit_dic['sml_dt']\n self.sml_wedge_n = self.unit_dic['sml_wedge_n']\n self.diag_1d_period = self.unit_dic['diag_1d_period']\n\n def load_oned(self):\n \"\"\"\n load xgc.oneddiag.bp and some post process\n \"\"\"\n self.od=self.data1(\"xgc.oneddiag.bp\") #actual reading routine\n self.od.psi=self.od.psi[0,:]\n self.od.psi00=self.od.psi00[0,:]\n try:\n self.od.psi00n=self.od.psi00/self.psix #Normalize 0 - 1(Separatrix)\n except:\n print(\"psix is not defined - call load_unitsm() to get psix to get psi00n\")\n # Temperatures\n try: \n Teperp=self.od.e_perp_temperature_df_1d\n except:\n print('No electron')\n self.electron_on=False\n else:\n self.electron_on=True\n Tepara=self.od.e_parallel_mean_en_df_1d #parallel flow ignored, correct it later\n self.od.Te=(Teperp+Tepara)/3*2\n Tiperp=self.od.i_perp_temperature_df_1d\n Tipara=self.od.i_parallel_mean_en_df_1d #parallel flow ignored, correct it later\n self.od.Ti=(Tiperp+Tipara)/3*2\n\n #ExB shear calculation\n if(self.electron_on):\n shear=self.od.d_dpsi(self.od.e_poloidal_ExB_flow_1d,self.od.psi_mks)\n self.od.grad_psi_sqr = self.od.e_grad_psi_sqr_1d\n else:\n shear=self.od.d_dpsi(self.od.i_poloidal_ExB_flow_1d,self.od.psi_mks)\n self.od.grad_psi_sqr = self.od.i_grad_psi_sqr_1d\n self.od.shear_r=shear * np.sqrt(self.od.grad_psi_sqr) # assuming electron full-f is almost homogeneouse\n\n if(self.electron_on):\n self.od.density = self.od.e_gc_density_df_1d\n else:\n self.od.density = self.od.i_gc_density_df_1d\n\n #gradient scale\n self.od.Ln = self.od.density / self.od.d_dpsi(self.od.density, self.od.psi_mks) / np.sqrt(self.od.grad_psi_sqr)\n self.od.Lti =self.od.Ti / self.od.d_dpsi(self.od.Ti , self.od.psi_mks) / np.sqrt(self.od.grad_psi_sqr)\n if(self.electron_on):\n self.od.Lte =self.od.Te / self.od.d_dpsi(self.od.Te , self.od.psi_mks) / np.sqrt(self.od.grad_psi_sqr)\n \n\n #find tmask\n d=self.od.step[1]-self.od.step[0]\n st=self.od.step[0]/d\n ed=self.od.step[-1]/d\n st=st.astype(int)\n ed=ed.astype(int)\n idx=np.arange(st,ed, dtype=int)\n\n self.od.tmask=idx #mem allocation\n for i in idx:\n tmp=np.argwhere(self.od.step==i*d)\n #self.od.tmask[i-st/d]=tmp[-1,-1] #LFS zero based, RHS last element\n self.od.tmask[i-st]=tmp[-1,-1] #LFS zero based, RHS last element\n\n \"\"\" \n class for reading data file like xgc.oneddiag.bp\n Trying to be general, but used only for xgc.onedidag.bp\n \"\"\"\n class data1(object):\n def __init__(self,filename):\n with adios2.open(filename,\"r\") as self.f:\n #read file and assign it\n self.vars=self.f.available_variables()\n for v in self.vars:\n stc=self.vars[v].get(\"AvailableStepsCount\")\n ct=self.vars[v].get(\"Shape\")\n sgl=self.vars[v].get(\"SingleValue\")\n stc=int(stc)\n if ct!='':\n ct=int(ct)\n setattr(self,v,self.f.read(v,start=[0], count=[ct], step_start=0, step_count=stc))\n elif v!='gsamples' and v!='samples' :\n setattr(self,v,self.f.read(v,start=[], count=[], step_start=0, step_count=stc)) #null list for scalar \n def d_dpsi(self,var,psi):\n \"\"\"\n radial derivative using psi_mks.\n \"\"\"\n dvdp=var*0; #memory allocation\n dvdp[:,1:-1]=(var[:,2:]-var[:,0:-2])/(psi[:,2:]-psi[:,0:-2])\n dvdp[:,0]=dvdp[:,1]\n dvdp[:,-1]=dvdp[:,-2]\n return dvdp\n\n \"\"\"\n class for head load diagnostic output.\n Only psi space data currently?\n \"\"\"\n class datahlp(object):\n def __init__(self,filename,irg):\n with adios2.open(filename,\"r\") as self.f:\n #irg is region number 0,1 - outer, inner\n #read file and assign it\n self.vars=self.f.available_variables()\n for v in self.vars:\n stc=self.vars[v].get(\"AvailableStepsCount\")\n ct=self.vars[v].get(\"Shape\")\n sgl=self.vars[v].get(\"SingleValue\")\n stc=int(stc)\n if ct!='':\n c=[int(i) for i in ct.split(',')] #\n if len(c)==1 : # time and step \n setattr(self,v,self.f.read(v,start=[0], count=c, step_start=0, step_count=stc))\n elif len(c)==2 : # c[0] is irg\n setattr(self,v,np.squeeze(self.f.read(v,start=[irg,0], count=[1,c[1]], step_start=0, step_count=stc)))\n elif len(c)==3 : # ct[0] is irg, read only \n setattr(self,v,np.squeeze(self.f.read(v,start=[irg,0,0], count=[1,c[1],c[2]], step_start=0, step_count=stc)))\n elif v!='zsamples' and v!='rsamples':\n setattr(self,v,self.f.read(v,start=[], count=[], step_start=0, step_count=stc)) #null list for scalar\n #keep last time step\n self.r=self.r[-1,:]\n self.z=self.z[-1,:]\n \n \"\"\" \n get some parameters for plots of heat diag\n\n \"\"\"\n def post_heatdiag(self,dt,ds):\n #\n \"\"\"\n self.hl[i].rmid=np.interp(self.hl[i].psin,self.bfm.psino,self.bfm.rmido)\n self.hl[i].drmid=self.hl[irg].rmid*0 # mem allocation\n self.hl[i].drmid=[1:-1]=(self.hl[i].rmid[2:]-self.hl[i].rmid[0:-2])*0.5\n self.hl[i].drmid[0]=self.hl[i].drmid[1]\n self.hl[i].drmid[-1]=self.hl[i].drmid[-2]\n \"\"\"\n self.drmid=self.rmid*0 # mem allocation\n self.drmid[1:-1]=(self.rmid[2:]-self.rmid[0:-2])*0.5\n self.drmid[0]=self.drmid[1]\n self.drmid[-1]=self.drmid[-2]\n\n #get separatrix r\n self.rs=np.interp([1],self.psin,self.rmid)\n \n self.rmidsepmm=(self.rmid-self.rs)*1E3 # dist from sep in mm\n\n #get heat\n self.qe=(self.e_perp_energy_psi + self.e_para_energy_psi)/dt/ds\n self.qi=(self.i_perp_energy_psi + self.i_para_energy_psi)/dt/ds\n self.ge=self.e_number_psi/dt/ds\n self.gi=self.i_number_psi/dt/ds\n self.qt=self.qe+self.qi\n #imx=self.qt.argmax(axis=1)\n mx=np.amax(self.qt,axis=1)\n self.lq_int=mx*0 #mem allocation\n\n for i in range(mx.shape[0]):\n self.lq_int[i]=np.sum(self.qt[i,:]*self.drmid)/mx[i]\n\n \"\"\"\n getting total heat (radially integrated) to inner/outer divertor.\n \"\"\"\n def total_heat(self,dt,wedge_n):\n qe=wedge_n * (np.sum(self.e_perp_energy_psi,axis=1)+np.sum(self.e_para_energy_psi,axis=1))\n qi=wedge_n * (np.sum(self.i_perp_energy_psi,axis=1)+np.sum(self.i_para_energy_psi,axis=1))\n\n #find restart point and remove -- \n\n # find dt in varying sml_dt after restart\n\n self.qe_tot=qe/dt\n self.qi_tot=qi/dt\n \n #compare 2D data \n #qe2=np.sum(self.e_perp_energy+self.e_para_energy,axis=2)\n #qe2=np.sum(qe2,axis=1)\n #self.qe_tot2=qe2*wedge_n/dt\n #qi2=np.sum(self.i_perp_energy+self.i_para_energy,axis=2)\n #qi2=np.sum(qi2,axis=1)\n #self.qi_tot2=qi2*wedge_n/dt\n\n \"\"\"\n Functions for eich fit\n q(x) =0.5*q0* exp( (0.5*s/lq)^2 - (x-dsep)/lq ) * erfc (0.5*s/lq - (x-dsep)/s)\n \"\"\"\n def eich(self,xdata,q0,s,lq,dsep):\n return 0.5*q0*np.exp((0.5*s/lq)**2-(xdata-dsep)/lq)*erfc(0.5*s/lq-(xdata-dsep)/s)\n\n \"\"\"\n Eich fitting of one profile data\n \"\"\"\n def eich_fit1(self,ydata,pmask):\n q0init=np.max(ydata)\n sinit=2 # 2mm\n lqinit=1 # 1mm\n dsepinit=0.1 # 0.1 mm\n\n p0=np.array([q0init, sinit, lqinit, dsepinit])\n if(pmask==None):\n popt,pconv = curve_fit(self.eich,self.rmidsepmm,ydata,p0=p0)\n else:\n popt,pconv = curve_fit(self.eich,self.rmidsepmm[pmask],ydata[pmask],p0=p0)\n\n return popt, pconv\n\n \"\"\"\n perform fitting for all time steps.\n \"\"\"\n def eich_fit_all(self,**kwargs):\n # need pmask for generalization?\n pmask = kwargs.get('pmask', None)\n\n self.lq_eich=self.lq_int*0 #mem allocation\n\n for i in range(self.time.size):\n try :\n popt,pconv = self.eich_fit1(self.qt[i,:],pmask)\n except:\n popt=[0, 0, 0, 0]\n \n self.lq_eich[i]= popt[2]\n \n \"\"\"\n data for bfieldm\n \"\"\"\n class databfm(object):\n def __init__(self):\n with adios2.open(\"xgc.bfieldm.bp\",\"r\") as self.f:\n self.vars=self.f.available_variables()\n v='/bfield/rvec'\n ct=self.vars[v].get(\"Shape\")\n c=int(ct)\n self.rmid=self.f.read(v,start=[0],count=[c],step_start=0, step_count=1)\n v='/bfield/psi_eq_x_psi'\n ct=self.vars[v].get(\"Shape\")\n c=int(ct)\n self.psin=self.f.read(v,start=[0],count=[c],step_start=0, step_count=1)\n\n\n def load_heatdiag(self):\n \"\"\"\n load xgc.heatdiag.bp and some post process\n \"\"\"\n self.hl=[]\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",0) ) #actual reading routine\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",1) )#actual reading routine\n\n for i in [0,1] :\n try:\n self.hl[i].psin=self.hl[i].psi[0,:]/self.psix #Normalize 0 - 1(Separatrix)\n except:\n print(\"psix is not defined - call load_unitsm() to get psix to get psin\")\n\n #read bfieldm data if available\n self.load_bfieldm()\n\n dt=self.unit_dic['sml_dt']*self.unit_dic['diag_1d_period']\n wedge_n=self.unit_dic['sml_wedge_n']\n for i in [0,1]:\n dpsin=self.hl[i].psin[1]-self.hl[i].psin[0] #equal dist\n #ds = dR* 2 * pi * R / wedge_n\n ds=dpsin/self.bfm.dpndrs* 2 * 3.141592 * self.bfm.r0 /wedge_n #R0 at axis is used. should I use Rs?\n self.hl[i].rmid=np.interp(self.hl[i].psin,self.bfm.psino,self.bfm.rmido)\n self.hl[i].post_heatdiag(dt,ds)\n self.hl[i].total_heat(dt,wedge_n)\n\n def load_bfieldm(self):\n self.bfm = self.databfm()\n self.bfm.r0=self.unit_dic['eq_axis_r']\n\n #get outside midplane only\n msk=np.argwhere(self.bfm.rmid>self.bfm.r0)\n n0=msk[0,1]\n self.bfm.rmido=self.bfm.rmid[0,n0:]\n self.bfm.psino=self.bfm.psin[0,n0:]\n\n #find separtrix index and r\n msk=np.argwhere(self.bfm.psino>1)\n n0=msk[1]\n self.bfm.rs = self.bfm.rmido[n0]\n\n #get dpdr (normalized psi) at separatrix\n self.bfm.dpndrs = (self.bfm.psino[n0]-self.bfm.psino[n0-1])/(self.bfm.rmido[n0]-self.bfm.rmido[n0-1])\n\n\n \n def load_m(self,fname):\n \"\"\"load the whole .m file and return a dictionary contains all the entries.\n \"\"\"\n f = open(fname,'r')\n result = {}\n for line in f:\n words = line.split('=')\n key = words[0].strip()\n value = words[1].strip(' ;\\n')\n result[key]= float(value)\n f.close()\n return result \n\n def plot1d_if(self,obj,**kwargs):\n \"\"\"\n plot 1D (psi) var of initial and final\n with ylabel of varstr\n Maybe it can be moved to data1 class -- but it might be possible to be used other data type??\n \"\"\"\n var=kwargs.get('var',None)\n varstr = kwargs.get('varstr', None)\n box = kwargs.get('box', None)\n psi = kwargs.get('psi', None)\n xlim = kwargs.get('xlim', None)\n initial = kwargs.get('initial',True)\n \n if(type(psi).__module__ != np.__name__): #None or not numpy data\n psi=obj.psi #default psi is obj.psi\n \n if(type(var).__module__ != np.__name__):\n if(varstr==None): \n print(\"Either var or varstr should be defined.\")\n else:\n var=getattr(obj,varstr) #default var is from varstr\n \n stc=var.shape[0]\n fig, ax=plt.subplots()\n lbl=[\"Initial\",\"Final\"]\n if(xlim==None):\n if(initial):\n ax.plot(psi,var[0,],label='Initial')\n ax.plot(psi,var[stc-1,],label='Final')\n else:\n msk=(psi >= xlim[0]) & (psi <= xlim[1])\n if(initial):\n ax.plot(psi[msk],var[0,msk],label='Initial')\n ax.plot(psi[msk],var[stc-1,msk],label='Final')\n \n ax.legend()\n ax.set(xlabel='Normalized Pol. Flux')\n if(varstr!=None):\n ax.set(ylabel=varstr)\n \n #add time stamp of final?\n return fig, ax \n\n \"\"\"\n setup self.mesh\n \"\"\"\n def setup_mesh(self):\n self.mesh=self.meshdata()\n\n \"\"\"\n setup f0mesh\n \"\"\" \n def setup_f0mesh(self):\n self.f0=self.f0meshdata()\n\n class meshdata(object): \n \"\"\"\n mesh data class for 2D contour plot\n \"\"\"\n def __init__(self):\n with adios2.open(\"xgc.mesh.bp\",\"r\") as fm:\n rz=fm.read('rz')\n self.cnct=fm.read('/cell_set[0]/node_connect_list')\n self.r=rz[:,0]\n self.z=rz[:,1]\n self.triobj = Triangulation(self.r,self.z,self.cnct)\n try:\n self.surf_idx=fm.read('surf_idx')\n except:\n print(\"No surf_idx in xgc.mesh.bp\") \n else:\n self.surf_len=fm.read('surf_len')\n self.psi_surf=fm.read('psi_surf')\n self.node_vol=fm.read('node_vol')\n self.qsafety=fm.read('qsafety')\n self.psi=fm.read('psi')\n\n\n class f0meshdata(object): \n \"\"\"\n mesh data class for 2D contour plot\n \"\"\"\n def __init__(self):\n with adios2.open(\"xgc.f0.mesh.bp\",\"r\") as f:\n T_ev=f.read('f0_T_ev')\n\n self.ti0=T_ev[-1,:] # last species. need update for multi ion\n if(T_ev.shape[0]>=2):\n self.te0=T_ev[0,:]\n self.den0=f.read('f0_den')\n self.dsmu=f.read('f0_dsmu')\n self.dvp =f.read('f0_dvp')\n self.smu_max=f.read('f0_smu_max')\n self.vp_max=f.read('f0_vp_max')\n\n \"\"\"\n flux surface average data structure\n \"\"\"\n class fluxavg(object):\n def __init__(self):\n with adios2.open(\"xgc.fluxavg.bp\",\"r\") as f:\n eindex=f.read('eindex')\n nelement=f.read('nelement')\n self.npsi=f.read('npsi')\n value=f.read('value')\n\n \n\n\n #setup matrix\n mat = IncrementalCOOMatrix(shape, np.float64)\n\n for i in range(shape[0]):\n for j in range(shape[1]):\n mat.append(i, j, dense[i, j])\n\n \n class voldata(object):\n \"\"\"\n read volume data\n \"\"\"\n def __init__(self):\n with adios2.open(\"xgc.volumes.bp\",\"r\") as f:\n self.od=f.read(\"diag_1d_vol\")\n #try:\n self.adj_eden=f.read(\"psn_adj_eden_vol\")\n\n class turbdata(object):\n \"\"\"\n data for turb intensity\n assuming convert_grid2 for flux average\n \"\"\"\n def __init__(self,istart,iend,istep,midwidth,mesh,f0):\n # setup flux surface average\n\n self.midwidth=midwidth\n self.istart=istart\n self.iend=iend\n self.istep=istep\n\n #setup flux surface average matrix\n\n\n # read whole data\n for i in range(istart,iend,istep):\n # 3d file name\n filename= \"xgc.3d.%5.5d.bp\" % (i)\n\n #read data\n with adios2.open(filename,\"r\") as f:\n dpot=f.read(\"dpot\")\n dden=f.read(\"eden\")\n\n nzeta=dpot.shape[0] \n print(nzeta) #check correct number\n dpotn0=np.mean(dpot,axis=0)\n dpot=dpot-dpotn0 #numpy broadcasting\n #toroidal average of (dpot/Te)^2\n var=np.mean(dpot**2,axis=0)/f0.Te0**2\n #flux surface average of dpot/Te (midplane only)\n \n #self.dpot_te_sqr=\n\n dden=dden - np.mean(dden,axis=0) # remove n=0 mode\n var=dpot/f0.Te0 + dden/f0.ne0\n var=np.mean(var**2,axis=0) # toroidal average\n #flux surface average of dn/n0\n\n #self.dn_n0_sqr=\n \n\n\n\n def load_volumes(self):\n \"\"\"\n setup self.vol\n \"\"\"\n self.vol=self.voldata()\n\n def heat_flux_all(self):\n \n #load volume data\n if(not hasattr(self,\"vol\")):\n self.vol=self.voldata()\n \n #check reading oneddiag?\n \n #get dpsi\n pmks=self.od.psi_mks[0,:]\n dpsi=np.zeros_like(pmks)\n dpsi[1:-1]=0.5*(pmks[2:]-pmks[0:-2])\n dpsi[0]=dpsi[1]\n dpsi[-1]=dpsi[-2]\n self.od.dvdp=self.vol.od/dpsi\n self.od.dpsi=dpsi\n \n nt=self.od.time.size\n ec=1.6E-19 #electron charge\n dvdpall=self.od.dvdp * self.sml_wedge_n\n \n #ion flux\n self.od.efluxi = self.od.i_gc_density_df_1d * self.od.i_radial_en_flux_df_1d * dvdpall\n self.od.efluxexbi = self.od.i_gc_density_df_1d * self.od.i_radial_en_flux_ExB_df_1d * dvdpall\n self.od.cfluxi = self.od.i_gc_density_df_1d * self.od.Ti * ec * self.od.i_radial_flux_df_1d * dvdpall\n self.od.cfluxexbi = self.od.i_gc_density_df_1d * self.od.Ti * ec * self.od.i_radial_flux_ExB_df_1d * dvdpall\n self.od.pfluxi = self.od.i_gc_density_df_1d * self.od.i_radial_flux_df_1d * dvdpall\n self.od.pfluxexbi = self.od.i_gc_density_df_1d * self.od.i_radial_flux_ExB_df_1d * dvdpall\n \n \n self.od.efluxe = self.od.e_gc_density_df_1d * self.od.e_radial_en_flux_df_1d * dvdpall\n self.od.efluxexbe = self.od.e_gc_density_df_1d * self.od.e_radial_en_flux_ExB_df_1d * dvdpall\n self.od.cfluxe = self.od.e_gc_density_df_1d * self.od.Te * ec * self.od.e_radial_flux_df_1d * dvdpall\n self.od.cfluxexbe = self.od.e_gc_density_df_1d * self.od.Te * ec * self.od.e_radial_flux_ExB_df_1d * dvdpall\n self.od.pfluxe = self.od.e_gc_density_df_1d * self.od.e_radial_flux_df_1d * dvdpall\n self.od.pfluxexbe = self.od.e_gc_density_df_1d * self.od.e_radial_flux_ExB_df_1d * dvdpall\n \n\n\n def plot2d(self,filestr,varstr,**kwargs):\n \"\"\"\n general 2d plot\n filestr: file name\n varstr: variable name\n plane: 0 based plane index - ignored for axisymmetric data\n Improve it to handle box \n additional var to add: box, levels, cmap, etc\n \"\"\"\n box= kwargs.get('box', None) # rmin, rmax, zmin, zmax\n plane=kwargs.get('plane',0)\n levels = kwargs.get('levels', None)\n cmap = kwargs.get('cmap', 'jet')\n \n f=adios2.open(filestr,'r')\n var=f.read(varstr)\n fig, ax=plt.subplots()\n\n if(box!=None):\n ax.set_xlim(box[0], box[1])\n ax.set_ylim(box[2], box[3])\n if(True):\n try:\n cf=ax.tricontourf(self.mesh.triobj,var[plane,], cmap=cmap,extend='both')\n except:\n cf=ax.tricontourf(self.mesh.triobj,var, cmap=cmap, extend='both')\n \n cbar = fig.colorbar(cf)\n\n if(box!=None):\n ax.set_xlim(box[0], box[1])\n ax.set_ylim(box[2], box[3])\n\n #else:\n if(False):\n #if(box!=None):\n Rmin=box[0]\n Rmax=box[1]\n Zmin=box[2]\n Zmax=box[3]\n\n #ax.set_xlim(Rmin, Rmax)\n #ax.set_ylim(Zmin, Zmax)\n \"\"\" \n #color bar change\n new_clim = (0, 100)\n # find location of the new upper limit on the color bar\n loc_on_cbar = cbar.norm(new_clim[1])\n # redefine limits of the colorbar\n cf.colorbar.set_clim(*new_clim)\n cf.colorbar.set_ticks(np.linspace(*new_clim, 50))\n # redefine the limits of the levels of the contour\n cf.set_clim(*new_clim)\n # updating the contourplot\n cf.changed()\n \"\"\"\n \n #find subset triobj\n #limit to the user-input ranges\n idxsub = ( (self.mesh.r>=Rmin) & (self.mesh.r<=Rmax) & (self.mesh.z>=Zmin) & (self.mesh.z<=Zmax) )\n rsub=self.mesh.r[idxsub]\n zsub=self.mesh.z[idxsub]\n\n\n #find which triangles are in the defined spatial region\n tmp=idxsub[self.mesh.cnct] #idxsub T/F array, same size as R\n goodtri=np.all(tmp,axis=1) #only use triangles who have all vertices in idxsub\n trisub=self.mesh.cnct[goodtri,:]\n #remap indices in triangulation\n indices=np.where(idxsub)[0]\n for i in range(len(indices)):\n trisub[trisub==indices[i]]=i\n\n trisubobj = Triangulation(rsub,zsub,trisub)\n\n try:\n cf=ax.tricontourf(trisubobj,var[plane,idxsub], cmap=cmap,extend='both')\n except:\n cf=ax.tricontourf(trisubobj,var[idxsub], cmap=cmap, extend='both')\n \n cbar = fig.colorbar(cf)\n\n\n ax.set_title(varstr + \" from \" + filestr)\n return ax, cf\n \n\n \n def fsa_simple(self,var,**kwargs):\n \"\"\"\n simple flux surface average using mesh data\n self.meshdata should be called before\n\n var: variable to average \n plane: 0 based plane index - ignored for axisymmetric data\n Improve it to handle box \n additional var to add: box, levels, cmap, etc\n \"\"\"\n favg=np.zeros(self.mesh.psi_surf.size)\n for i in range(0,self.mesh.psi_surf.size):\n s1=0\n s2=0\n for j in range(0,self.mesh.surf_len[i]):\n idx=self.mesh.surf_idx[i,j] - 1\n s1=s1+var[idx]*self.mesh.node_vol[i]\n s2=s2+self.mesh.node_vol[i]\n favg[i]=s1/s2\n return favg\n\n def print_plasma_info(self):\n # print some plasma information (mostly from unit_dic)\n print(\"magnetic axis (R,Z) = (%5.5f, %5.5f) m\" % (self.eq_axis_r, self.eq_axis_z))\n print(\"magnetic field at axis = %5.5f T\" % self.eq_axis_b)\n print(\"X-point (R,Z) = (%5.5f, %5.5f)\" % (self.eq_x_r, self.eq_x_z))\n print(\"simulation delta t = %e s\" % self.sml_dt)\n print(\"wedge number = %d\" % self.sml_wedge_n)\n print(\"Ion mass = %d\" % self.unit_dic['ptl_ion_mass_au'])\n print(\"particle number = %e\" % (self.unit_dic['sml_totalpe']* self.unit_dic['ptl_num']))\n \n","repo_name":"jychoi-hpc/XGC_reader","sub_path":"xgc_reader.py","file_name":"xgc_reader.py","file_ext":"py","file_size_in_byte":24965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"23339722485","text":"from src.ai import *\nimport asyncio\n\npos = {'map':(4328,840), 'm_back':(3000,90), 'm_nextmap':(4572,1257), 'm_hero':(4322,1555), 'm_close_hero':(4333,299), 'm_hero_top':(4100,500), 'm_hero_bottom':(4000,1333), 'bot_bar': (3925,1100)}\ncycle = 50 #minutes\nmax = 29 #cycle\n\ndef mtos(m):\n\treturn m*60\n\ndef stom(s):\n\treturn s/60\n\ndef countdown(s):\n\tcount = 0\n\twhile count < s:\n\t\tsleep(1)\n\t\tcount+=1\n\ndef printpos():\n\tprint(gui.position())\n\n\nfor j in range(max):\n\tprint('bigcy{}'.format(j))\n\tfor i in range(int(cycle/5)):\n\t\tprint(\"cycle{}/{}\".format(i,int(cycle/5)))\n\t\tif i+1==int(cycle/5):\n\t\t\tprint('workall...')\n\t\t\tclick(pos['m_hero'])\n\t\t\tsleep(1)\n\t\t\tclick()\n\t\t\tsleep(2)\n\t\t\tmoveto(pos['map'])\n\t\t\tsleep(2)\n\t\t\tscroll(40)\n\t\t\tfor _ in range(15):\n\t\t\t\tclick(pos['m_hero_bottom'])\n\t\t\tclick(pos['m_close_hero'])\n\t\t\tclick(pos['map'])\n\t\tclick(pos['m_nextmap'])\n\t\tsleep(2)\n\t\tprint('remap..')\n\t\tclick(pos['m_back'])\n\t\tsleep(1)\n\t\tclick(pos['map'])\n\t\tsleep(mtos(5))","repo_name":"VocanicZ/bombobot","sub_path":"pro/bombobot/bottimer.py","file_name":"bottimer.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7074801574","text":"import streamlit as st\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n@st.cache\ndef FitPolynomialRegression(K, x, y):\n Xmatrix = np.ones((x.size, 1))\n x = x.reshape((x.size, 1))\n y = y.reshape((y.size, 1))\n for i in range(1, K + 1):\n colVec = np.power(x, i)\n Xmatrix = np.hstack((Xmatrix, colVec))\n Xmatrix_pinv = np.linalg.pinv(Xmatrix)\n ret = np.dot(Xmatrix_pinv, y)\n return ret.reshape(1, ret.size)\n\n\n@st.cache\ndef EvalPolynomial(x, w):\n y = []\n for i in range(x.size):\n output = 0\n for j in range(w.size):\n output += w[j] * x[i] ** j\n y.append(float(output))\n return np.array(y)\n\n\n# @st.cache\ndef GetBestPolynomial(xTrain, yTrain, xTest, yTest, h):\n errorsTrainVec = []\n errorsTestVec = []\n xTrain = xTrain.reshape((xTrain.shape[0], 1))\n xTest = xTest.reshape((xTest.shape[0], 1))\n yTest = yTest.reshape((yTest.shape[0], 1))\n for i in range(1, h + 1):\n weights = FitPolynomialRegression(i, xTrain, yTrain)\n weights = weights.reshape(weights.size, 1)\n outputsTrain = EvalPolynomial(xTrain, weights)\n outputsTrain = outputsTrain.reshape(outputsTrain.size, 1)\n outputsTest = EvalPolynomial(xTest, weights)\n outputsTest = outputsTest.reshape(outputsTest.size, 1)\n\n errorTrain = (np.linalg.norm(yTrain - outputsTrain)) ** 2\n errorTest = (np.linalg.norm(yTest - outputsTest)) ** 2\n errorsTrainVec.append(errorTrain / 75)\n errorsTestVec.append(errorTest / 25)\n plt.scatter(xTrain, outputsTrain, color='green', label='Training')\n plt.scatter(xTest, yTest, color='orange', label='Testing')\n plt.xlabel('Inputs (x)')\n plt.ylabel('Outputs (y)')\n title = 'Data Fitting, h = ' + str(i)\n plt.title(title)\n plt.legend()\n st.pyplot()\n # plt.show()\n errorsTrainVec = np.array(errorsTrainVec)\n errorsTestVec = np.array(errorsTestVec)\n errorsTrainVec = errorsTrainVec.reshape((errorsTrainVec.shape[0], 1))\n errorsTestVec = errorsTestVec.reshape((errorsTestVec.shape[0], 1))\n errTotal = np.hstack((errorsTrainVec, errorsTestVec))\n return errTotal\n\n # plt.plot(np.arange(1,h+1),errorsTrainVec,label='Training')\n # plt.plot(np.arange(1,h+1),errorsTestVec,label='Testing')\n # plt.xlabel(\"Model Complexity (Polynomial Degree)\")\n # plt.ylabel(\"Mean Squared Error (Based on Log Scale)\")\n # plt.title(\"Error vs. Model Complexity\")\n # plt.xticks(np.arange(1,10,step=1))\n # plt.yscale('log')\n # plt.legend()\n # st.pyplot()\n # plt.show()\n # print(\"\\nTraining Error:\\n\")\n # print(np.array(errorsTrainVec))\n # print(\"\\nTesting Error:\\n\")\n # print(np.array(errorsTestVec))\n # print(\"\\nBest choice of d = \"+str(errorsTestVec.index(min(errorsTestVec))+1)+\"\\n\")\n","repo_name":"eric-li18/modelweave","sub_path":"polyreg.py","file_name":"polyreg.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"22962837282","text":"\"\"\"\n.. _ref_3d_plane_stress_concentration:\n3D Stress Concentration Analysis for a Notched Plate\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThis tutorial is the 3D corollary to the 2D plane example\n:ref:`ref_plane_stress_concentration`, but This example verifies the\nstress concentration factor :math:`K-t` when modeling opposite single\nnotches in a finite width thin plate\nFirst, start MAPDL as a service and disable all but error messages.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 3\n\nimport numpy as np\nimport pyansys\n\nimport os, sys\nsys.path.append('.\\\\')\n\n\n#mapdl = pyansys.launch_mapdl(override=True, additional_switches='-smp',\n# loglevel='ERROR')\nfrom ansys.base import init\nmapdl = init()\n\n\n\n###############################################################################\n# Geometry\n# ~~~~~~~~\n# Create a rectangular area with two notches at the top and bottom.\n\nlength = 0.4\nwidth = 0.1\n\n# ratio = 0.3 # diameter/width\n# diameter = width*ratio\n# radius = diameter*0.5\n\nnotch_depth = 0.04\n# notch_radius = 0.002\nnotch_radius = 0.01\n\n# create the half arcs\nmapdl.prep7()\n\ncirc0_kp = mapdl.k(x=length/2, y=width + notch_radius)\ncirc_line_num = mapdl.circle(circ0_kp, notch_radius)\ncirc_line_num = circ_line_num[2:] # only concerned with the bottom arcs\n\n# create a line and drag the top circle downward\ncirc0_kp = mapdl.k(x=0, y=0)\nk1 = mapdl.k(x=0, y=-notch_depth)\nl0 = mapdl.l(circ0_kp, k1)\nmapdl.adrag(*circ_line_num, nlp1=l0)\n\n# same thing for the bottom notch (except upwards\ncirc1_kp = mapdl.k(x=length/2, y=-notch_radius)\ncirc_line_num = mapdl.circle(circ1_kp, notch_radius)\ncirc_line_num = circ_line_num[:2] # only concerned with the top arcs\n\n# create a line whereby the top circle will be dragged up\nk0 = mapdl.k(x=0, y=0)\nk1 = mapdl.k(x=0, y=notch_depth)\nl0 = mapdl.l(k0, k1)\nmapdl.adrag(*circ_line_num, nlp1=l0)\n\nrect_anum = mapdl.blc4(width=length, height=width)\n\n\n# Note how pyansys parses the output and returns the area numbers\n# created by each command. This can be used to execute a boolean\n# operation on these areas to cut the circle out of the rectangle.\n# plate_with_hole_anum = mapdl.asba(rect_anum, circ_anum)\ncut_area = mapdl.asba(rect_anum, 'ALL') # cut all areas except the plate\n\n# mapdl.aplot(vtk=True, show_line_numbering=True)\nmapdl.lsla('S')\nmapdl.lplot(vtk=True, show_keypoint_numbering=True)\nmapdl.lsel('all')\n\n\n# plot the area using vtk/pyvista\nmapdl.aplot(vtk=True, show_area_numbering=True, show_lines=True, cpos='xy')\n\n\n# ###############################################################################\n# Next, extrude the area to create volume\nthickness = 0.01\nmapdl.vext(cut_area, dz=thickness)\n\nmapdl.vplot(vtk=True, show_lines=True, show_axes=True,\n smooth_shading=True)\n\n###############################################################################\n# Meshing\n# ~~~~~~~\n# This example will use PLANE183 elements as a thin plate can be\n# modeled with plane elements provided that KEYOPTION 3 is set to 3\n# and a thickness is provided.\n#\n# Mesh the plate using a higher density near the hole and a lower\n# density for the remainder of the plate by setting ``LESIZE`` for the\n# lines nearby the hole and ``ESIZE`` for the mesh global size.\n#\n# Line numbers can be identified through inspection using ``lplot``\n\n# define a PLANE183 element type with thickness\n\n\n# ensure there are at 25 elements around the hole\nnotch_esize = np.pi*notch_radius*2/50\nplate_esize = 0.01\n\n# increased the density of the mesh at the notch\n# line and area numbers identified using aplot\n\nmapdl.asel('S', 'AREA', vmin=1, vmax=1)\nmapdl.aplot(vtk=True, show_line_numbering=True)\n\n\nmapdl.lsel('NONE')\nfor line in [7, 8, 20, 21]:\n mapdl.lsel('A', 'LINE', vmin=line, vmax=line)\nmapdl.lesize('ALL', notch_esize, kforc=1)\nmapdl.lsel('ALL')\n\n# Decrease the area mesh expansion. This ensures that the mesh\n# remains fine nearby the hole\nmapdl.mopt('EXPND', 0.7) # default 1\n\n# mesh several elements through the plate\nesize = notch_esize*5\nif esize > thickness/2:\n esize = thickness/2 # minimum of two elements through\n\nmapdl.esize() # this is tough to automate\nmapdl.et(1, \"SOLID186\")\nmapdl.vsweep('all')\n_ = mapdl.eplot(vtk=True, show_edges=True, show_axes=False, line_width=2,\n background='w')\n\n\n###############################################################################\n# Material Properties and Boundary Conditions\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Fix the left-hand side of the plate in the X direction and set a\n# force of 1 kN in the positive X direction.\n#\n\n# This example will use SI units.\nmapdl.units('SI') # SI - International system (m, kg, s, K).\n\n# Define a material (nominal steel in SI)\nmapdl.mp('EX', 1, 210E9) # Elastic moduli in Pa (kg/(m*s**2))\nmapdl.mp('DENS', 1, 7800) # Density in kg/m3\nmapdl.mp('NUXY', 1, 0.3) # Poisson's Ratio\n\n# Fix the left-hand side.\nmapdl.nsel('S', 'LOC', 'X', 0)\nmapdl.d('ALL', 'UX')\n\n# Fix a few nodes on the left-hand side of the plate in the Y and Z\n# direction. Otherwise, the mesh would be allowed to move in the y\n# direction and would be an improperly constrained mesh.\nmapdl.nsel('R', 'LOC', 'Y', width/2)\nmapdl.d('ALL', 'UY')\nmapdl.d('ALL', 'UZ')\n\n# Apply a force on the right-hand side of the plate. For this\n# example, we select the nodes at the right-most side of the plate.\nmapdl.nsel('S', 'LOC', 'X', length)\n\n# Verify that only the nodes at length have been selected:\nassert np.unique(mapdl.mesh.nodes[:, 0]) == length\n\n# Next, couple the DOF for these nodes. This lets us provide a force\n# to one node that will be spread throughout all nodes in this coupled\n# set.\nmapdl.cp(5, 'UX', 'ALL')\n\n# Select a single node in this set and apply a force to it\n# We use \"R\" to re-select from the current node group\nmapdl.nsel('R', 'LOC', 'Y', width/2) # selects more than one\nsingle_node = mapdl.mesh.nnum[0]\nmapdl.nsel('S', 'NODE', vmin=single_node, vmax=single_node)\nmapdl.f('ALL', 'FX', 1000)\n\n# finally, be sure to select all nodes again to solve the entire solution\n_ = mapdl.allsel()\n\n\n###############################################################################\n# Solve the Static Problem\n# ~~~~~~~~~~~~~~~~~~~~~~~~\n# Solve the static analysis\nmapdl.run('/SOLU')\nmapdl.antype('STATIC')\nmapdl.solve()\nmapdl.finish()\n\n###############################################################################\n# Post-Processing\n# ~~~~~~~~~~~~~~~\n# The static result can be post-processed both within MAPDL and\n# outside of MAPDL using ``pyansys``. This example shows how to\n# extract the von Mises stress and plot it using the ``pyansys``\n# result reader.\n\n# grab the result from the ``mapdl`` instance\nresult = mapdl.result\nresult.plot_principal_nodal_stress(0, 'SEQV', lighting=False,\n background='w', show_edges=True,\n text_color='k', add_text=False)\n\nnnum, stress = result.principal_nodal_stress(0)\nvon_mises = stress[:, -1] # von-Mises stress is the right most column\n\n# Must use nanmax as stress is not computed at mid-side nodes\nmax_stress = np.nanmax(von_mises)\n\n###############################################################################\n# Compute the Stress Concentration\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# The stress concentration :math:`K_t` is the ratio of the maximum\n# stress at the hole to the far-field stress, or the mean cross\n# sectional stress at a point far from the hole. Analytically, this\n# can be computed with:\n# \n# :math:`\\sigma_{nom} = \\frac{F}{wt}`\n#\n# Where\n#\n# - :math:`F` is the force\n# - :math:`w` is the width of the plate\n# - :math:`t` is the thickness of the plate.\n#\n# Experimentally, this is computed by taking the mean of the nodes at\n# the right-most side of the plate.\n\n# We use nanmean here because mid-side nodes have no stress\nmask = result.mesh.nodes[:, 0] == length\nfar_field_stress = np.nanmean(von_mises[mask])\nprint('Far field von mises stress: %e' % far_field_stress)\n# Which almost exactly equals the analytical value of 10000000.0 Pa\n\n# result.plot_element_result(0, 'ENS', 0)\n\n###############################################################################\n# Since the expected nominal stress across the cross section of the\n# hole will increase as the size of the hole increases, regardless of\n# the stress concentration, the stress must be adjusted to arrive at\n# the correct stress. This stress is adjusted by the ratio of the\n# width over the modified cross section width.\nadj = width/(width - notch_depth*2)\nstress_adj = far_field_stress*adj\n\n# The stress concentration is then simply the maximum stress divided\n# by the adjusted far-field stress.\nstress_con = (max_stress/stress_adj)\nprint('Stress Concentration: %.2f' % stress_con)\n\n\n# ###############################################################################\n# # Batch Analysis\n# # ~~~~~~~~~~~~~~\n# # The above script can be placed within a function to compute the\n# # stress concentration for a variety of hole diameters. For each\n# # batch, MAPDL is reset and the geometry is generated from scratch.\n\n\n\n# ###############################################################################\n# # Run the batch and record the stress concentration\n# k_t_exp = []\n# ratios = np.linspace(0.001, 0.5, 20)\n# print(' Ratio : Stress Concentration (K_t)')\n# for ratio in ratios:\n# stress_con = compute_stress_con(ratio)\n# print('%10.4f : %10.4f' % (ratio, stress_con))\n# k_t_exp.append(stress_con)\n\n\n# ###############################################################################\n# # Analytical Comparison\n# # ~~~~~~~~~~~~~~~~~~~~~\n# # Stress concentrations are often obtained by referencing tablular\n# # results or polynominal fits for a variety of geometries. According\n# # to Peterson's Stress Concentration Factors (ISBN 0470048247), the analytical\n# # equation for a hole in a thin plate in uniaxial tension:\n# #\n# # :math:`k_t = 3 - 3.14\\frac{d}{h} + 3.667\\left(\\frac{d}{h}\\right)^2 - 1.527\\left(\\frac{d}{h}\\right)^3`\n# #\n# # Where:\n# #\n# # - :math:`k_t` is the stress concentration\n# # - :math:`d` is the diameter of the circle\n# # - :math:`h` is the height of the plate\n# #\n# # As shown in the following plot, ANSYS matches the known tabular\n# # result for this geometry remarkably well using PLANE183 elements.\n# # The fit to the results may vary depending on the ratio between the\n# # height and width of the plate.\n\n# # where ratio is (d/h)\n# k_t_anl = 3 - 3.14*ratios + 3.667*ratios**2 - 1.527*ratios**3\n\n# plt.plot(ratios, k_t_anl, label=r'$K_t$ Analytical')\n# plt.plot(ratios, k_t_exp, label=r'$K_t$ ANSYS')\n# plt.legend()\n# plt.show()\n\n\n###############################################################################\n# Cleanup\n# ~~~~~~~\n# Close mapdl when complete\n#mapdl.exit()","repo_name":"daalgi/fem-scripts","sub_path":"ansys/notch.py","file_name":"notch.py","file_ext":"py","file_size_in_byte":10764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2998900126","text":"from netCDF4 import Dataset\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport sys\nimport os\n\nfrom tools_AIP import prep_proj_multi_cartopy, get_cfeature, setup_grids_cartopy, read_fcst_nc\nimport matplotlib.pyplot as plt\n\nimport matplotlib.colors as mcolors\nfrom matplotlib.colors import BoundaryNorm\n#from matplotlib import cm\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\n\nfrom matplotlib.axes import Axes\nfrom cartopy.mpl.geoaxes import GeoAxes\nGeoAxes._pcolormesh_patched = Axes.pcolormesh\n\n\nquick = True\n#quick = False\n\n\ndef main( INFO={}, vtime=datetime( 2019, 8, 24, 15, 10, 0), height=3000,\n fstime=datetime( 2019, 8, 24, 15, 10, 0 ), exp_l=[],\n clat=36.1, clon=136,\n texp_l=[], nvar='Reflectivity' ):\n\n\n xticks = np.arange( 134.0, 142, 0.2 )\n yticks = np.arange( 30.0, 45, 0.2 )\n\n res = '10m'\n if quick:\n res = '50m'\n\n land = get_cfeature( typ='land', res=res )\n coast = get_cfeature( typ='coastline', res=res )\n\n # original data is lon/lat coordinate\n data_crs = ccrs.PlateCarree()\n\n # radar location\n lon_r = 139.609\n lat_r = 35.861\n\n if clon < 0.0:\n clon = lon_r\n\n# fig = plt.figure( figsize=(10, 5) )\n# fig.subplots_adjust( left=0.06, bottom=0.03, right=0.95, top=0.95,\n# wspace=0.2, hspace=0.01)\n#\n# ax_l = prep_proj_multi_cartopy( fig, xfig=2, yfig=1, proj='merc', \n# latitude_true_scale=lat_r )\n \n fig = plt.figure( figsize=(10, 9) )\n fig.subplots_adjust( left=0.06, bottom=0.03, right=0.95, top=0.95,\n wspace=0.1, hspace=0.2)\n projection = ccrs.Mercator( latitude_true_scale=lat_r, )\n ax_l = []\n xfig = 2\n yfig = 2\n for i in range( 1, 5 ):\n if i <= 2:\n ax_l.append( fig.add_subplot( yfig,xfig,i, projection=projection ) )\n else:\n ax_l.append( fig.add_subplot( yfig,xfig,i, projection=None ) )\n\n if nvar == 'Reflectivity':\n levels = np.array( [ 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65 ] )\n cmap = mcolors.ListedColormap(['cyan', 'b', 'dodgerblue',\n 'lime','yellow',\n 'orange', 'red', 'firebrick', 'magenta',\n 'purple'])\n cmap.set_over('gray', alpha=1.0)\n cmap.set_under('w', alpha=0.0)\n cmap.set_under('mistyrose', alpha=1.0 )\n cmap.set_bad( color='gray', alpha=0.5 )\n unit = 'Z (dBZ)'\n\n elif nvar == 'Vr':\n #levels = np.array( [ -15, -10, -5, -1, 1, 5, 10, 15] ) \n levels = np.arange( -6, 6.6, 0.5 )\n levels = np.array( [ -15, -12, -9, -6, -3, -2, -1, -0.5, 0.5, 1, 2, 3, 6, 9, 12, 15 ] ) \n #levels = np.arange( -5, 5.5, 0.5 )\n cmap = plt.cm.get_cmap(\"RdBu_r\")\n #cmap = plt.cm.get_cmap(\"Spectral_r\")\n cmap.set_over('gray', alpha=1.0)\n cmap.set_under('aqua', alpha=1.0)\n cmap.set_bad( color='gray', alpha=0.5 )\n #cmap = plt.cm.get_cmap(\"Spectral_r\")\n unit = r'Vr (m s$^{-1}$)'\n\n# norm = None\n norm = BoundaryNorm( levels, ncolors=cmap.N, clip=False)\n# norm = cm.colors.Normalize( vmax=np.max( levels ), vmin=np.min( levels ) )\n\n l_l = []\n b_l = []\n w_l = []\n h_l = []\n\n for i, ax in enumerate( ax_l ):\n\n if i <= 1:\n# #print( \"TEST\\n\", ax.get_position[0] )\n# l, b, w, h = ax.get_position().bounds\n# l_l.append( l )\n# b_l.append( b )\n# w_l.append( w )\n# h_l.append( h )\n\n ffn = '{0:}/{1:}/dafcst_nc/{2:}.nc'.format( INFO[\"TOP\"], exp_l[i],\n fstime.strftime('%Y%m%d-%H%M%S') )\n print( ffn )\n fdat2d, flon1d, flat1d = read_fcst_nc( fn=ffn, fstime=fstime, vtime=vtime, nvar=nvar, height=height )\n \n flon2d, flat2d = np.meshgrid( flon1d, flat1d )\n \n rxlev = np.argmin( np.abs( flon1d - lon_r ) )\n rylev = np.argmin( np.abs( flat1d - lat_r ) )\n i1d = ( np.arange( 0, len( flon1d ), 1 ) - rxlev ) * 0.5\n j1d = ( np.arange( 0, len( flon1d ), 1 ) - rylev ) * 0.5\n \n i2d, j2d = np.meshgrid( i1d, j1d )\n dist2d = np.sqrt( np.square( i2d ) + np.square( j2d ) )\n\n lons = np.min( flon2d )\n lone = np.max( flon2d )\n \n lats = np.min( flat2d )\n late = np.max( flat2d )\n \n ax.set_extent([ lons, lone, lats, late ] )\n ax.add_feature( coast, zorder=0 )\n \n setup_grids_cartopy( ax, xticks=xticks, yticks=yticks, \n fs=9, lw=0.0 )\n \n SHADE = ax.contourf( flon2d, flat2d, fdat2d[:,:], \n norm=norm,\n levels=levels,\n cmap=cmap,\n extend='both',\n transform=data_crs )\n\n ax.plot( lon_r, lat_r, marker='o', color='k', markersize=10,\n transform=data_crs )\n\n CONT = ax.contour( flon2d, flat2d, dist2d, \n levels=[10, 20, 30, 40, 50, 60 ],\n colors='gray',\n linestyles='dashed',\n linewidths=1.0, \n transform=data_crs,\n )\n if clat > 0.0:\n ax.plot( [ lons, lone ], [ clat, clat ], transform=data_crs,\n color='k', ls='dashed', lw=1.0 )\n elif clon > 0.0:\n ax.plot( [ clon, clon ], [ lats, late ], transform=data_crs,\n color='k', ls='dashed', lw=1.0 )\n\n else:\n\n# l, b, w, h = ax.get_position().bounds\n# ax.set_position( [ l_l[i-2], b, w_l[i-2], h_l[i-2] ] )\n\n ffn = '{0:}/{1:}/dafcst_nc/{2:}.nc'.format( INFO[\"TOP\"], exp_l[i-2],\n fstime.strftime('%Y%m%d-%H%M%S') )\n fdat2d, fx1d, fy1d = read_fcst_nc( fn=ffn, fstime=fstime, vtime=vtime , VERT=True, clon=clon, clat=clat, nvar=nvar )\n fy2d, fx2d = np.meshgrid( fy1d, fx1d )\n print( fx2d.shape, fy2d.shape, fdat2d.shape)\n SHADE = ax.contourf( fx2d, fy2d, fdat2d[:,:], \n norm=norm,\n levels=levels,\n cmap=cmap,\n extend='both', )\n\n\n ax.text( 0.5, 1.01, texp_l[i],\n va='bottom', \n ha='center',\n transform=ax.transAxes,\n color='k', fontsize=12, )\n\n bbox = { 'facecolor':'w', 'alpha':0.95, 'pad':0.5,\n 'edgecolor':'w' }\n ax.text( 0.99, 0.01, 'Z={0:.1f} km'.format( height*0.001 ), \n va='bottom', \n ha='right',\n bbox=bbox,\n transform=ax.transAxes,\n color='k', fontsize=10, )\n\n \n\n if i == 1:\n\n cfstime = fstime.strftime( '%H:%M:%S' )\n cvtime = vtime.strftime( '%H:%M:%S' )\n ax.text( 1.1, 1.01, 'Init at {0:}\\nValid at {1:}'.format( cfstime, cvtime ),\n va='bottom', \n ha='right',\n transform=ax.transAxes,\n color='k', fontsize=10, )\n\n\n pos = ax.get_position()\n cb_width = 0.006\n cb_height = pos.height*0.9\n ax_cb = fig.add_axes( [ pos.x1+0.003, pos.y0, \n cb_width, cb_height] )\n cb = plt.colorbar( SHADE, cax=ax_cb, orientation='vertical', \n ticks=levels[::1], extend='both' )\n\n ax.text( 1.01, 0.95, unit, \n va='top', \n ha='left',\n transform=ax.transAxes,\n color='k', fontsize=10, )\n\n\n\n\n if fstime == vtime:\n tit_ = 'analysis'\n else:\n tit_ = 'forecast'\n\n tit = 'SCALE-LETKF {0:}'.format( tit_ )\n fig.suptitle( tit, fontsize=14 )\n\n\n if clon > 0.0:\n ref_ = clon\n else:\n ref_ = clat\n\n ofig = \"4p_fcst_ac_{0:}_{1:}_h{2:}km_s{3:}_v{4:}_crs{5:.2f}.png\".format( \n exp_l[0],\n exp_l[1],\n height*0.001, \n fstime.strftime('%Y%m%d%H%M%S'),\n vtime.strftime('%Y%m%d%H%M%S'),\n ref_, \n )\n\n print( ofig )\n if not quick:\n opath = \"png/attenuation\"\n os.makedirs( opath, exist_ok=True )\n ofig = os.path.join(opath, ofig)\n plt.savefig(ofig,bbox_inches=\"tight\", pad_inches = 0.1)\n plt.clf()\n else:\n plt.show()\n\n\nINFO = { \"TOP\": \"/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/realtime2021/test\" }\n\nexp_l = [ \"d4\", \"d4_attenuation_corrected\"]\ntexp_l = [ \"CTRL\", \"TEST\", \"CTRL\", \"TEST\" ]\n\nexp_l = [ \"d4\", \"d4_500m_ac\"]\ntexp_l = [ \"CTRL\", \"TEST\", \"CTRL\", \"TEST\" ]\n\n#exp_l = [ \"d4_500m_ac\", \"d4_500m_ac_vr\"]\n#texp_l = [ \"TEST\", \"TEST (VR only)\", \"TEST\", \"TEST (VR only)\" ]\n\n#exp_l = [ \"d4_500m_ac\", \"d4_500m_ac_z\"]\n#texp_l = [ \"TEST\", \"TEST (Z only)\", \"TEST\", \"TEST (Z only)\" ]\n\n#exp_l = [ \"d4_500m_ac_z\", \"d4_500m_ac_vr\"]\n#texp_l = [ \"TEST (Z only)\", \"TEST (VR only)\", \"TEST (Z only)\", \"TEST (VR only)\" ]\n\nvtime = datetime( 2019, 8, 24, 15, 30, 0 )\n#vtime = datetime( 2019, 8, 24, 15, 40, 0 )\n#vtime = datetime( 2019, 8, 24, 15, 50, 0 )\n#vtime = datetime( 2019, 8, 24, 16, 0, 0 )\nfstime = datetime( 2019, 8, 24, 15, 30, 0 )\n#fstime = datetime( 2019, 8, 24, 15, 20, 0 )\n\nfstime = datetime( 2019, 8, 24, 15, 0, 30 )\nfstime = datetime( 2019, 8, 24, 15, 1, 0 )\nfstime = datetime( 2019, 8, 24, 15, 3, 0 )\nfstime = datetime( 2019, 8, 24, 15, 5, 0 )\nfstime = datetime( 2019, 8, 24, 15, 10, 0 )\nfstime = datetime( 2019, 8, 24, 15, 7, 0 )\nfstime = datetime( 2019, 8, 24, 15, 6, 0 )\nfstime = datetime( 2019, 8, 24, 15, 5, 30 )\nfstime = datetime( 2019, 8, 24, 15, 20, 0 )\nfstime = datetime( 2019, 8, 24, 15, 30, 0 )\n\nfstime = datetime( 2019, 8, 19, 13, 30, 0 )\n\n#fstime = datetime( 2019, 8, 24, 15, 1, 30 )\nvtime = fstime + timedelta( minutes=10 )\n#vtime = fstime\n\nheight = 3000\n#height = 1000\n#height = 500\n\nclat = 36.15\nclat = 36.2\nclat = -1\n#clat = 35.96\n#clon = 36.2\nclon = 139.37\nclon = 139.5\n#clon = 139.4\n\nnvar = \"Reflectivity\"\n#nvar = \"Vr\"\n\nmain( INFO=INFO, vtime=vtime, fstime=fstime, exp_l=exp_l, \n texp_l=texp_l, height=height, clat=clat, clon=clon, nvar=nvar )\n","repo_name":"takumihonda/AIP_realtime","sub_path":"python/4p_fcst_z_ac.py","file_name":"4p_fcst_z_ac.py","file_ext":"py","file_size_in_byte":10397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41206016538","text":"def CalcFibNum(UserNum):\n if(UserNum <= 100 and UserNum >= 2):\n N1 = 0\n N2 = 1\n Sum = 0\n while(N2 <= UserNum):\n Sum = Sum + N2\n temp = N1 + N2\n N1 = N2 \n N2 = temp\n print(f'The sum of all Fibonacci numbers which are less than {UserNum} is equal to: {Sum}') \n return Sum\n else:\n print(\"Enter a valid number\")\n return 1\n\ndef CheckPrimeNum(PNum):\n for i in range(2, int(PNum ** 0.5)+ 1):\n if PNum%i == 0:\n print(PNum, \"is NOT a prime number\")\n return False\n print(PNum, \"IS prime\")\n return True\n\ndef ConvToBin(DNum):\n sttr = \"\"\n s = DNum\n while(DNum > 0):\n i = DNum % 2\n if i == 0:\n sttr = '0' + sttr\n else:\n sttr = '1' + sttr\n DNum = int(DNum/2)\n lenght = 8\n if len(sttr) != 8:\n lenght -= len(sttr)\n sttr = lenght * \"0\" + sttr\n print(f'The Binary representation of {s} is equal to: {sttr}')\n\ndef main():\n while(True):\n InputNumber = input(\"Please enter a number: \")\n if(InputNumber.isdigit()):\n InputNumber = int(InputNumber)\n if(CalcFibNum(InputNumber) != 1):\n CheckPrimeNum(InputNumber)\n ConvToBin(InputNumber) \n break\n else:\n print(\"Wrong input\")\n\nmain()\n","repo_name":"artur-manukyan-1/ENGS110-2021-HOMEWORKS","sub_path":"Homework-March_5.py","file_name":"Homework-March_5.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26416412614","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Embedding\nfrom evaluate_model import eval_model\nfrom evaluate_model import randomize_row_placement\nimport bugs_dot_jar\n\n# Objetivo de utilizacao da CNN:\n# - Feature extraction\n#-----------\n# Feature extraction starts from an initial set of measured data\n# It builds derived values (features) intended to be informative and non-redundant\n# It facilitates the subsequent learning steps, and in some cases leading to better human interpretations.\n# Feature extraction is a dimensionality reduction process, where \n# an initial set of raw variables is reduced to more manageable groups (features) for processing, \n# while still accurately and completely describing the original data set.\n\n# 1 Treinar o modelo\n# 2 Fazer feature extraction\n\n# Read parsing output and split into train and test sets\ndatasets = bugs_dot_jar.load_data(0.7)\nlst = {\"accumulo\", \"camel\", \"commons-math\", \"flink\", \"jackrabbit-oak\", \"logging-log4j2\", \"maven\", \"wicket\"}\n\nheader = \"dataset -> F1 Score\\n\"\nresult = [header]\n\nfor dataset in datasets:\n\t# Model data\n\tdataset_name = dataset[0]\n\tif(dataset_name == \"accumulo\"):\n\t\tcontinue\n\t(train_src, train_labels), (test_src, test_labels) = dataset[1]\n\t# (train_src, train_labels) = randomize_row_placement(train_src, train_labels)\n\n\t# Model Metadata\n\tmax_sequence_length = len(train_src[1])\n\tnr_of_features \t\t= bugs_dot_jar.getNumberOfFeatures(dataset_name)\n\n\t# Parameters\n\tepochs = 10\n\tembedding_size = 32\n\tnr_of_filters = 32\n\tfilter_length = 3\n\t\n\t# train model\n\tprint(\"\\n\\nModel Metadata:\")\n\tprint(\"Dataset Name: \", dataset_name)\n\tprint(\"Max sequence Length: \",max_sequence_length)\n\tprint(\"Number of Features: \",nr_of_features)\n\tprint(\"Number of Filters: \", nr_of_filters)\n\tprint(\"Dataset Shape: \", train_src.shape)\n\tprint(\"Test set Shape: \", test_src.shape,\"\\n\")\n\n\t# create model\n\tmodel = Sequential()\n\n\t# add model layers\n\tmodel.add(Embedding(nr_of_features + 1,\n\t embedding_size, # Embedding size\n\t input_length=max_sequence_length))\n\n\tmodel.add(Conv1D(nr_of_filters, filter_length, activation='relu'))\n\tmodel.add(MaxPooling1D(nr_of_filters))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(units=64, activation='relu'))\n\tmodel.add(Dense(units=32, activation='relu'))\n\tmodel.add(Dense(units=1, activation='sigmoid'))\n\n\t# compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\t# train model\n\tmodel.fit(train_src, train_labels, validation_data=(test_src, test_labels), epochs=epochs)\n\t# print(model.summary())\n\n\t# evaluate the model\n\tloss, accuracy = model.evaluate(test_src, test_labels, verbose=0)\n\n\tf1_score = eval_model(model, test_src, test_labels)\n\n\tresult.append(dataset_name + \" \" + str(f1_score) +\"\\n\")\n\t\n\t# save model and architecture to single file\n\t# model.save(\"model-\"+dataset_name+\"-embedding-\",embed,\".h5\")\n\t# print(\"Saved model to disk\")\n\nf = open(\"results/last_results.txt\", \"a\")\nfor line in result:\n\tf.write(line)\nf.close()\n","repo_name":"AndreSobrall/Defect-Prediction","sub_path":"bugPredictor/src/machine-learning/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38103303352","text":"# 调用示例\n# from api.Mail import Mail\n#\n# mail_tmp = Mail()\n# msg = {\n# \"title\": \"标题\",\n# \"content\": \"内容\",\n# \"sender_name\": \"发送人\",\n# \"receiver_name\": \"接收人\"\n# }\n# print(mail_tmp.send_mail('test@qq.com', msg))\nimport json\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\n\nclass Mail:\n \"\"\"发送邮件\n\n 1、SMTP邮件发送\n\n Args:\n sender: 发送邮箱账号\n smtp: 邮箱服务器\n\n Returns:\n 是否成功\n \"\"\"\n\n def __init__(self):\n f = open(\"./api/key.json\", encoding='utf-8')\n infile = f.read()\n f.close()\n api = json.loads(infile)\n self.sender = str(api['mail_sender'])\n smtp_code = str(api['smtp_code'])\n # 配置服务器\n self.smtp_obj = smtplib.SMTP_SSL('smtp.qq.com', 465)\n self.smtp_obj.login(self.sender, smtp_code) # 邮件发送账号, 授权码(不是qq密码,是邮箱中的配置的授权码)\n\n def send_mail(self, receivers, msg):\n receivers = receivers # 接收邮件账号\n # 组装发送内容\n message = MIMEText(msg['content'], 'plain', 'utf-8') # 发送的内容\n message['From'] = Header(msg['sender_name'], 'utf-8') # 发件人名称\n message['To'] = Header(msg['receiver_name'], 'utf-8') # 收件人名称\n message['Subject'] = Header(msg['title'], 'utf-8') # 邮件标题\n\n try:\n self.smtp_obj.sendmail(self.sender, receivers, message.as_string())\n except Exception as e:\n return '邮件发送失败--' + str(e)\n return '邮件发送成功'\n","repo_name":"SAIKE17/aliddns","sub_path":"Ali/Ali/api/Mail.py","file_name":"Mail.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9689156019","text":"import torch\nimport argparse\nimport torch.nn as nn\nimport net\nimport cv2\nimport os\nfrom torchvision import transforms\nimport torch.nn.functional as F\nimport numpy as np\n\ndef get_args():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch Super Res Example')\n parser.add_argument('--size', type=int, required=True, help=\"size of input image\")\n parser.add_argument('--testList', type=str, required=True, help=\"train image list\")\n parser.add_argument('--imgDir', type=str, required=True, help=\"directory of image\")\n parser.add_argument('--cuda', action='store_true', help='use cuda?')\n parser.add_argument('--resume', type=str, required=True, help=\"checkpoint that model resume from\")\n parser.add_argument('--savePath', type=str, required=True, help=\"where prediction result save to\")\n args = parser.parse_args()\n print(args)\n return args\n\ndef gen_transform(size, img, name):\n scale_h = float(size) / img.shape[0]\n scale_w = float(size) / img.shape[1]\n img = cv2.resize(img,(size, size),interpolation=cv2.INTER_LINEAR)\n return img, [name, scale_h, scale_w]\n\ndef gen_dataset(namelist, imgdir, size, transform=True, normalize=None):\n sample_set = []\n with open(namelist, 'r') as f:\n names = f.readlines()\n print('--namelist:{}'.format(namelist))\n print('--names len:{}'.format(len(names)))\n for name in names:\n name = name.strip('\\n')\n img_path = imgdir + '/' + name + '.jpg'\n if os.path.exists(img_path):\n #size:[800,600,3] value:0-255 order BGR\n img = cv2.imread(img_path)\n if transform:\n new_img, info = gen_transform(size, img, name)\n # to tensor\n toTensor = transforms.ToTensor()\n new_img = toTensor(new_img)\n # normalize\n if normalize:\n new_img = normalize(new_img)\n new_img = new_img.view(1, 3, size, size)\n\n sample_set.append((new_img, info))\n print('--samples len:{}'.format(len(sample_set)))\n return sample_set\n\ndef np_norm(x):\n low = x.min()\n hig = x.max()\n y = (x - low) / (hig - low)\n return y\n\ndef main():\n\n print(\"===> Loading args\")\n args = get_args()\n\n print(\"===> Environment init\")\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n if args.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\")\n \n model = net.MattNet()\n ckpt = torch.load(args.resume)\n model.load_state_dict(ckpt['state_dict'], strict=True)\n\n if args.cuda:\n model = model.cuda()\n\n Normalize = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n dataset = gen_dataset(args.testList, args.imgDir, args.size, True, Normalize)\n\n for img, info in dataset:\n print('Inference for {}'.format(info[0]))\n if args.cuda:\n img = img.cuda()\n #seg = model(img)\n #seg = F.softmax(seg, dim=1)\n seg,alpha,a,b,c = model(img)\n\n #print(\"Delploy:\", seg[0,:,:,:])\n #print(\"alpha mean: {} a mean: {} b mean: {} c_mean: {}\".format(alpha.mean(), a.mean(), b.mean(), c.mean()))\n\n if args.cuda:\n seg_np = seg[0,1,:,:].data.cpu().numpy()\n alpha_np = alpha[0,0,:,:].data.cpu().numpy()\n a_np = a[0,0,:,:].data.cpu().numpy()\n b_np = b[0,0,:,:].data.cpu().numpy()\n c_np = c[0,0,:,:].data.cpu().numpy()\n else:\n seg_np = seg[0,1,:,:].data.numpy()\n alpha_np = alpha[0:0:,:,:].data.numpy()\n\n origin_h = int(seg_np.shape[0] / info[1])\n origin_w = int(seg_np.shape[1] / info[2])\n\n seg_np = cv2.resize(seg_np,(origin_w, origin_h),interpolation=cv2.INTER_LINEAR)\n alpha_np = cv2.resize(alpha_np,(origin_w, origin_h), interpolation=cv2.INTER_LINEAR)\n a_np = cv2.resize(a_np,(origin_w, origin_h),interpolation=cv2.INTER_LINEAR)\n b_np = cv2.resize(b_np,(origin_w, origin_h),interpolation=cv2.INTER_LINEAR)\n c_np = cv2.resize(c_np,(origin_w, origin_h),interpolation=cv2.INTER_LINEAR)\n\n #print(alpha_np)\n #print(seg_np.mean(), alpha_np.mean())\n\n #seg_fg = seg_np * 255\n seg_fg = (seg_np >= 0.5).astype(np.float32) * 255\n #seg_fg = (seg_np >= 0.95).astype(np.float32) * 255\n #seg_fg = ((seg_np < 0.95) * (seg_np >= 0.05)).astype(np.float32) * 128 + seg_fg\n\n alpha_fg = alpha_np * 255\n #alpha_fg = (alpha_np >= 0.5).astype(np.float32) * 255\n\n \n a_fg = np_norm(a_np) * 255\n b_fg = np_norm(b_np) * 255\n c_fg = np_norm(c_np) * 255\n \n cv2.imwrite('{}{}.jpg'.format(args.savePath, info[0]), seg_fg)\n cv2.imwrite('{}{}_alpha.jpg'.format(args.savePath, info[0]), alpha_fg)\n cv2.imwrite('{}{}_a.jpg'.format(args.savePath, info[0]), a_fg)\n cv2.imwrite('{}{}_b.jpg'.format(args.savePath, info[0]), b_fg)\n cv2.imwrite('{}{}_c.jpg'.format(args.savePath, info[0]), c_fg)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"huochaitiantang/pytorch-fast-matting-portrait","sub_path":"core/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"19013930729","text":"# Example 1:\n\n# Input: nums = [2,7,11,15], target = 9\n# Output: [0,1]\n# Output: Because nums[0] + nums[1] == 9, we return [0, 1].\n# Example 2:\n\n# Input: nums = [3,2,4], target = 6\n# Output: [1,2]\n# Example 3:\n\n# Input: nums = [3,3], target = 6\n# Output: [0,1]\n\n\nfrom typing import DefaultDict\n\n\n# def twoSum(nums, target):\n# seen = {}\n# for idx, val in enumerate(nums):\n# other_num = target - val\n# if other_num in seen:\n# return[idx, seen[other_num]]\n# seen[val] = idx\n\ndef twoSum(nums, target):\n nums_seen = {}\n for idx, num in enumerate(nums):\n target_num = target - num\n if target_num in nums_seen:\n return [nums_seen[target_num], idx]\n nums_seen[num] = idx\n\n\nprint(twoSum([3,2,4], 6))","repo_name":"taeheechoi/coding-practice","sub_path":"F_twoSum.py","file_name":"F_twoSum.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3695819881","text":"#!/usr/bin/env python3\n\nf = open(\"input_test.txt\")\nlines = f.readlines()\n\ngamma = ''\nepsylon = ''\nglobal columns\ncolumns = []\n\ndef get_max(lines, i):\n columns.append({'0': 0, '1': 0})\n for l in lines:\n nb = list(l.strip())[i]\n columns[i][str(nb)] += 1\n\nl = lines\nfor i in range(len(lines[0].strip())):\n print(i)\n if i > 0:\n l = list(filter(lambda x: list(x)[i-1] == list(gamma)[i-1], filtered_lines))\n\n if len(l) == 1:\n gamma = l[0].strip()\n break\n\n filtered_lines = l\n print(filtered_lines)\n get_max(filtered_lines, i)\n\n if columns[i]['0'] > columns[i]['1']:\n gamma = gamma + '0'\n else:\n gamma = gamma + '1'\n\n\n print(columns)\n print(f\"gamma = {gamma}\")\n\ncolumns = []\nl = lines\nfor i in range(len(lines[0].strip())):\n print(i)\n if i > 0:\n l = list(filter(lambda x: list(x)[i-1] == list(epsylon)[i-1], filtered_lines))\n\n if len(l) == 1:\n epsylon = l[0].strip()\n break\n\n filtered_lines = l\n print(filtered_lines)\n get_max(filtered_lines, i)\n\n if columns[i]['0'] > columns[i]['1']:\n epsylon = epsylon + '1'\n else:\n epsylon = epsylon + '0'\n\n print(columns)\n print(f\"epsylon = {epsylon}\")\n\nprint(columns)\nprint(int(gamma, base=2)*int(epsylon, base=2))\n","repo_name":"jcosmao/AdventOfCode","sub_path":"day3/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37578150732","text":"from flask_mysqldb import MySQL\nfrom MySQLdb import Error as MySQLError\nimport json\nimport os\nimport random\n\nfrom util import log, filename\nfrom config import dbConst, RECSURL\n\nclass DbHandler:\n def __init__(self, app):\n # MySQL configurations\n app.config['MYSQL_HOST'] = dbConst['host']\n app.config['MYSQL_USER'] = dbConst['user']\n app.config['MYSQL_DB'] = dbConst['db']\n app.config['MYSQL_USE_UNICODE'] = dbConst['use_unicode']\n app.config['MYSQL_CHARSET'] = dbConst['charset']\n\n self.mysql = MySQL(app)\n\n # path to saved recordings\n self.recordings_path = app.config['MAIN_RECORDINGS_PATH']\n\n # needed to sanitize the dynamic sql creation in insertGeneralData\n # keep a list of allowed column names for insertions etc. depending on the table (device, isntructor, etc)\n self.allowedColumnNames = {\n 'device': [\n 'userAgent',\n 'imei'\n ],\n 'instructor': [\n 'name',\n 'email',\n 'phone',\n 'address'\n ],\n 'speaker': [\n 'name',\n 'deviceImei'\n ],\n 'speaker_info': [\n 'speakerId',\n 's_key',\n 's_value'\n ]\n }\n\n # generate list of currently valid tokens according to 'valid' column in table token.\n #self.invalid_token_ids = self.getInvalidTokenIds() # messes up WSGI script for some fucking reason\n self.invalid_token_ids = None\n\n def getInvalidTokenIds(self):\n \"\"\"\n Returns a list of tokenId's who are marked with valid=FALSE in database.\n \"\"\"\n cur = self.mysql.connection.cursor()\n cur.execute('SELECT id FROM token WHERE valid=FALSE')\n return [row[0] for row in cur.fetchall()]\n\n def insertGeneralData(self, name, data, table):\n \"\"\"\n inserts data into appropriate table\n\n name is i.e. 'instructor' and is a representation of the data, for errors and general identification\n data is a json object whose keys will be used as table column names and those values\n will be inserted into table\n returns the id of the newly inserted row or errors in the format\n dict(msg=id or msg, statusCode=htmlStatusCode)\n\n Example:\n name='device'\n data = {'imei':245, 'userAgent':'Mozilla'}\n table = 'device'\n\n In which case, this function will \n insert into device (imei, userAgent) \n values ('245','Mozilla')\n and return said rows newly generated id.\n\n WARNING: appends the keys of data straight into a python string using %\n so at least this should be sanitized. Sanitized by a whitelist of\n allowed keys in self.allowedColumnNames\n \"\"\"\n keys = []\n vals = []\n dataId = None\n try:\n if isinstance(data, str):\n data = json.loads(data)\n\n for key, val in data.items(): # use data.iteritems() for python 2.7\n # allow only keys from the appropriate list in self.allowedColumnNames\n if key not in self.allowedColumnNames[name]:\n raise KeyError('Unallowed column name used! Did someone hack the frontend? name: %s' % key)\n keys.append(key)\n vals.append(val)\n\n data = None # data is untrusted, should not be used unless it's filtered\n except (KeyError, TypeError, ValueError) as e:\n msg = '%s data not on correct format, aborting.' % name\n log(msg, e)\n return dict(msg=msg, statusCode=400)\n\n try: \n # insert into table\n cur = self.mysql.connection.cursor()\n\n # make our query something like (with 4 key/value pairs)\n # 'INSERT INTO %s (%s, %s, %s, %s) \\\n # VALUES (%s, %s, %s, %s)',\n # depending on number of data keys/values \n queryStr = 'INSERT INTO %s ('\n\n queryStrMid = '' # since we can reuse the (%s,%s,...)\n for i in range(len(keys)):\n queryStrMid += '%s'\n if (i != len(keys) - 1):\n queryStrMid += ', '\n\n queryStr += queryStrMid\n queryStr += ') '\n\n # input the keys first, because we don't want the '' quotes that cur.execute\n # automatically puts there\n queryStr = queryStr % tuple([table] + keys)\n\n queryStr += 'VALUES ('\n queryStr += queryStrMid\n queryStr += ')'\n\n # make the replacement tuple which is set in place of the %s's in the query\n queryTuple = tuple(vals)\n\n cur.execute(queryStr, queryTuple)\n\n # get the newly auto generated id\n\n # create our query something like\n # 'SELECT id FROM %s WHERE \\\n # %s=%s AND %s=%s AND %s=%s AND %s=%s'\n # but now the order is WHERE key=val AND key1=val1 and so\n # we have to interleave our lists instead of appending them \n # to get the correct order \n interleavedList = []\n for i in range(len(keys)):\n interleavedList.append(keys[i])\n # just a hack, because of the quote thing mentioned above\n # will be replaces with vals in query\n interleavedList.append('%s') \n \n queryStr = 'SELECT id FROM %s WHERE '\n for i in range(len(keys)):\n queryStr += '%s=%s'\n if (i != len(keys) - 1):\n queryStr += ' AND '\n\n queryStr = queryStr % tuple([table] + interleavedList)\n\n cur.execute(queryStr, queryTuple)\n # return highest id in case of multiple results (should be the newest entry)\n dataIds = cur.fetchall()\n dataId = max([i[0] for i in dataIds]) # fetchall() returns a list of tuples\n\n # only commit if we had no exceptions until this point\n self.mysql.connection.commit()\n\n except MySQLError as e:\n msg = 'Database error.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n\n if dataId is None:\n msg = 'Couldn\\'t get %s id.' % name\n log(msg)\n return dict(msg=msg, statusCode=500)\n else:\n return dict(msg='{\"%sId\":' % name + str(dataId) + '}', statusCode=200)\n\n def insertSpeakerData(self, speakerData, speakerInfo):\n \"\"\"\n inserts into both speaker and speaker_info\n speakerData is the {'name':name[, 'deviceImei':deviceImei]}\n speakerInfo are the extra info values to insert into\n speaker_info table, e.g. speakerInfo: {'height':'154', etc.}\n assumes speaker doesn't exist in database.\n \"\"\"\n speakerId = None\n res = self.insertGeneralData('speaker', speakerData, 'speaker')\n if 'speakerId' in res['msg']:\n speakerId = json.loads(res['msg'])['speakerId']\n else:\n return res\n for k, v in speakerInfo.items():\n self.insertGeneralData('speaker_info', {\n 'speakerId':speakerId,\n 's_key':k,\n 's_value':v\n },\n 'speaker_info')\n return res\n\n def processInstructorData(self, instructorData):\n \"\"\"\n instructorData = look at format in the client-server API\n \"\"\"\n try:\n if isinstance(instructorData, str):\n instructorData = json.loads(instructorData)\n except (ValueError) as e:\n msg = '%s data not on correct format, aborting.' % name\n log(msg, e)\n return dict(msg=msg, statusCode=400)\n\n if 'id' in instructorData:\n # instructor was submitted as an id, see if he exists in database\n try: \n cur = self.mysql.connection.cursor()\n\n cur.execute('SELECT id FROM instructor WHERE id=%s', (instructorData['id'],)) # have to pass in a tuple, with only one parameter\n instructorId = cur.fetchone()\n if (instructorId is None):\n # no instructor\n msg = 'No instructor with that id.'\n log(msg)\n return dict(msg=msg, statusCode=400)\n else:\n # instructor already exists, return it\n instructorId = instructorId[0] # fetchone returns tuple on success\n return dict(msg='{\"instructorId\":' + str(instructorId) + '}', statusCode=200)\n except MySQLError as e:\n msg = 'Database error.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n return 'Unexpected error.', 500\n\n return self.insertGeneralData('instructor', instructorData, 'instructor')\n\n def processDeviceData(self, deviceData):\n # we have to make sure not to insert device with same IMEI\n # as is already in the database if so. Otherwise, we create new device\n deviceImei, deviceId, userAgent = None, None, None\n try:\n if isinstance(deviceData, str):\n deviceData = json.loads(deviceData)\n userAgent = deviceData['userAgent']\n except (TypeError, ValueError, KeyError) as e:\n msg = 'Device data not on correct format, aborting.'\n log(msg, e)\n return dict(msg=msg, statusCode=400)\n\n try:\n deviceImei = deviceData['imei']\n except (KeyError) as e:\n # we don't care if device has no ['imei']\n pass\n try:\n deviceId = deviceData['deviceId']\n del deviceData['deviceId'] # delete it, we don't want to insert it into database\n except (KeyError) as e:\n # we don't care if device has no ['deviceId']\n pass\n\n if deviceImei is not None and deviceImei != '':\n try: \n cur = self.mysql.connection.cursor()\n\n # firstly, check if this device already exists, if so, update end time, otherwise add device\n cur.execute('SELECT id FROM device WHERE imei=%s', (deviceImei,)) # have to pass in a tuple, with only one parameter\n dbDeviceId = cur.fetchone()\n if (dbDeviceId is None):\n # no device with this imei in database, insert it\n return self.insertGeneralData('device', deviceData, 'device')\n else:\n # device already exists, return it\n dbDeviceId = dbDeviceId[0] # fetchone returns tuple on success\n return dict(msg='{\"deviceId\":' + str(dbDeviceId) + '}', statusCode=200)\n except MySQLError as e:\n msg = 'Database error.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n\n # no imei present, won't be able to identify device unless he has his id\n if deviceId is not None and deviceId != '':\n # check if a device with this id has the same userAgent as our devicedata\n try: \n cur = self.mysql.connection.cursor()\n\n cur.execute('SELECT userAgent FROM device WHERE \\\n id=%s', (deviceId,))\n dbUserAgent = cur.fetchone()\n if dbUserAgent is None:\n # no device with this info in database, insert it\n return self.insertGeneralData('device', deviceData, 'device')\n else:\n # device already exists, check if names match\n dbUserAgent = dbUserAgent[0]\n if dbUserAgent == userAgent:\n return dict(msg='{\"deviceId\":' + str(deviceId) + '}', statusCode=200)\n else:\n msg = 'userAgents don\\'t match for supplied id. Creating new device.'\n log(msg)\n return self.insertGeneralData('device', deviceData, 'device')\n except MySQLError as e:\n msg = 'Database error.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n\n # no id and no imei, must be new device and first transmission\n return self.insertGeneralData('device', deviceData, 'device')\n\n def processSpeakerData(self, speakerData):\n name, deviceImei, speakerId = None, None, None\n try:\n if isinstance(speakerData, str):\n speakerData = json.loads(speakerData)\n name = speakerData['name']\n except (KeyError, TypeError, ValueError) as e:\n msg = 'Speaker data not on correct format, aborting.'\n log(msg, e)\n return dict(msg=msg, statusCode=400)\n try:\n deviceImei = speakerData['deviceImei']\n except (KeyError) as e:\n # we don't care if speaker has no ['imei']\n pass\n try:\n speakerId = speakerData['speakerId']\n except (KeyError) as e:\n # or if he doesn't have an id\n pass\n\n # now, lets process the dynamic keys/values from speaker data\n # ignore name, speakerId and deviceImei keys from dict\n speakerInfo = {}\n for k, v in speakerData.items():\n if k != 'name' and k != 'deviceImei' and k != 'speakerId':\n speakerInfo[str(k)] = str(v)\n # recreate our speakerData object ready to store in db\n newSpeakerData = {'name':name}\n\n # if the speaker has imei info, use that to identify him\n if deviceImei is not None and deviceImei != '':\n newSpeakerData['deviceImei'] = deviceImei\n try: \n cur = self.mysql.connection.cursor()\n\n # firstly, check if this speaker already exists, if so, return speakerId, otherwise add speaker\n cur.execute('SELECT id FROM speaker WHERE \\\n name=%s AND deviceImei=%s',\n (name, deviceImei))\n dbSpeakerId = cur.fetchone()\n if (dbSpeakerId is None):\n # no speaker with this info in database, insert it\n return self.insertSpeakerData(newSpeakerData, speakerInfo)\n else:\n # speaker already exists, return it\n dbSpeakerId = dbSpeakerId[0] # fetchone returns tuple on success\n return dict(msg='{\"speakerId\":' + str(dbSpeakerId) + '}', statusCode=200)\n except MySQLError as e:\n msg = 'Database error.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n\n # no imei present, won't be able to identify speaker unless he has his id\n if speakerId is not None and speakerId != '':\n # check if a speaker with this id has the same name as our speakerdata\n try: \n cur = self.mysql.connection.cursor()\n\n cur.execute('SELECT name FROM speaker WHERE \\\n id=%s', (speakerId,))\n dbName = cur.fetchone()\n if dbName is None:\n # no speaker with this info in database, insert it\n return self.insertSpeakerData(newSpeakerData, speakerInfo)\n else:\n # speaker already exists, check if names match\n dbName = dbName[0] # fetchone() returns a tuple\n if dbName == name:\n return dict(msg='{\"speakerId\":' + str(speakerId) + '}', statusCode=200)\n else:\n msg = 'Names don\\'t match for supplied id. Creating new speaker.'\n log(msg)\n return self.insertSpeakerData(newSpeakerData, speakerInfo)\n except MySQLError as e:\n msg = 'Database error.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n\n # no id and no imei, must be new speaker and first transmission\n return self.insertSpeakerData(newSpeakerData, speakerInfo)\n\n def processSessionData(self, jsonData, recordings):\n \"\"\"\n Processes session data sent from client, saves it to the appropriate tables\n in the database, and saves the recordings to the filesystem at\n '/session_/recname'\n\n parameters:\n jsonData look at format in the client-server API\n recordings an array of file objects representing the submitted recordings\n \n returns a dict (msg=msg, statusCode=200,400,..)\n msg on format: dict(deviceId=dId, speakerId=sId, sessionId=sesId, recsDelivered=numRecsInDb)\n \"\"\"\n jsonDecoded = None\n sessionId = None\n # can be a number of messages, depending on the error.\n # sent back to the user, and used as a flag to see\n # if recordings should be saved but not put in mysqldb\n error = '' \n errorStatusCode = 400 # modified if something else\n\n # vars from jsonData\n speakerId, instructorId, deviceId, location, start, end, comments = \\\n None, None, None, None, None, None, None\n speakerName = None\n\n if type(recordings)!=list or len(recordings)==0:\n msg = 'No recordings received, aborting.'\n log(msg)\n return dict(msg=msg, statusCode=400)\n\n # extract json data\n try:\n jsonDecoded = json.loads(jsonData)\n #log(jsonDecoded)\n \n if jsonDecoded['type'] == 'session':\n jsonDecoded = jsonDecoded['data']\n speakerName = jsonDecoded['speakerInfo']['name']\n # this inserts speaker into database\n speakerId = json.loads(\n self.processSpeakerData(\n jsonDecoded['speakerInfo']\n )['msg']\n )['speakerId']\n instructorId = jsonDecoded['instructorId']\n # this inserts device into database\n deviceId = json.loads(\n self.processDeviceData(\n jsonDecoded['deviceInfo']\n )['msg']\n )['deviceId']\n location = jsonDecoded['location']\n start = jsonDecoded['start']\n end = jsonDecoded['end']\n comments = jsonDecoded['comments']\n else:\n error = 'Wrong type of data.'\n log(error)\n except (KeyError, TypeError, ValueError) as e:\n error = 'Session data not on correct format.'\n log(error, e)\n\n if not error:\n try:\n # insert into session\n cur = self.mysql.connection.cursor()\n\n # firstly, check if this session already exists, if so, update end time, otherwise add session\n cur.execute('SELECT id FROM session WHERE \\\n speakerId=%s AND instructorId=%s AND deviceId=%s AND location=%s AND start=%s',\n (speakerId, instructorId, deviceId, location, start))\n sessionId = cur.fetchone()\n if sessionId is None:\n # create new session entry in database\n cur.execute('INSERT INTO session (speakerId, instructorId, deviceId, location, start, end, comments) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s)', \n (speakerId, instructorId, deviceId, location, start, end, comments))\n # get the newly auto generated session.id \n cur.execute('SELECT id FROM session WHERE \\\n speakerId=%s AND instructorId=%s AND deviceId=%s AND location=%s AND start=%s AND end=%s',\n (speakerId, instructorId, deviceId, location, start, end))\n sessionId = cur.fetchone()[0] # fetchone returns a tuple\n else:\n # session already exists, simply update end-time\n sessionId = sessionId[0] # fetchone() returns tuple\n cur.execute('UPDATE session \\\n SET end=%s \\\n WHERE id=%s', \n (end, sessionId))\n except MySQLError as e:\n error = 'Error inserting sessionInfo into database.'\n errorStatusCode = 500\n log(error, e)\n\n try:\n # now populate recordings table and save recordings+extra data to file/s\n\n # make sure path to recordings exists\n os.makedirs(self.recordings_path, exist_ok=True)\n\n for rec in recordings:\n # grab token to save as extra metadata later, and id to insert into table recording\n tokenId = jsonDecoded['recordingsInfo'][rec.filename]['tokenId']\n # use token sent as text if available to write to metadata file (in case database is wrong)\n # to be salvaged later if needed.\n token = None\n if 'tokenText' in jsonDecoded['recordingsInfo'][rec.filename]:\n token = jsonDecoded['recordingsInfo'][rec.filename]['tokenText']\n else:\n # otherwise, grab it from the database\n cur.execute('SELECT inputToken FROM token WHERE id=%s', (tokenId,))\n token = cur.fetchone()\n if token is None:\n error = 'No token with supplied id.'\n log(error.replace('id.','id: {}.'.format(tokenId)))\n else:\n token = token[0] # fetchone() returns tuple\n\n if not error:\n recName = self.writeRecToFilesystem(rec, token, sessionId, speakerName, lost=False)\n else:\n recName = self.writeRecToFilesystem(rec, token, sessionId, speakerName, lost=True)\n\n if not error:\n # insert recording data into database\n cur.execute('INSERT INTO recording (tokenId, speakerId, sessionId, filename) \\\n VALUES (%s, %s, %s, %s)', \n (tokenId, speakerId, sessionId, recName))\n except MySQLError as e:\n msg = 'Error adding recording to database.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n except os.error as e:\n msg = 'Error saving recordings to file.'\n log(msg, e)\n return dict(msg=msg, statusCode=500)\n except KeyError as e:\n msg = 'Missing recording info in session data.'\n log(msg, e)\n return dict(msg=msg, statusCode=400)\n\n # only commit if we had no exceptions until this point\n # and no error\n if not error:\n self.mysql.connection.commit()\n\n # extra, add the number of tokens (recordings) we have actually received from this speaker\n numRecs = self.getRecordingCount(speakerName, speakerId, deviceId)\n\n return dict(msg=json.dumps(dict(sessionId=sessionId, deviceId=deviceId, speakerId=speakerId, recsDelivered=numRecs)), \n statusCode=200)\n else:\n log('There was an error: {}, not committing to MySQL database.'.format(error))\n return dict(msg=error, statusCode=errorStatusCode)\n\n def writeRecToFilesystem(self, rec, token, sessionId, speakerName, lost=False):\n \"\"\"\n Writes rec (as .wav) and token (as .txt with same name as rec) to filesystem at \n app.config['MAIN_RECORDINGS_PATH']/session_/filename\n\n Parameters:\n rec a werkzeug FileStorage object representing a .wav recording\n token a string representing the prompt read during rec. None if \n there was no token, or an error in retrieving it.\n sessionId id of session, None if error obtaining it.\n speakerName the name of the speaker, None if there was an error\n lost True if there was an error in handling the metadata, means we \n still write the recording to file, just categorized as lost.\n\n Return:\n recName returns the name of the saved recording (basename)\n \"\"\"\n if not token:\n token = 'No prompt.'\n if not sessionId:\n sessionId = 'unknown'\n if not speakerName:\n speakerName = 'unknown'\n\n # save recordings to app.config['MAIN_RECORDINGS_PATH']/session_sessionId/filename\n sessionPath = os.path.join(self.recordings_path, 'session_{}'.format(sessionId))\n if lost:\n sessionPath = os.path.join(self.recordings_path, 'lost', 'session_{}'.format(sessionId))\n os.makedirs(sessionPath, exist_ok=True)\n \n recName = filename(speakerName) + '_' + filename(rec.filename)\n wavePath = os.path.join(sessionPath, recName)\n # rec is a werkzeug FileStorage object\n rec.save(wavePath)\n # save additional metadata to text file with same name as recording\n # open with utf8 to avoid encoding issues.\n # right now, only save the token\n with open(wavePath.replace('.wav','.txt'), mode='w', encoding='utf8') as f:\n f.write(token)\n\n return recName\n\n def getRecordingCount(self, speakerName, speakerId, deviceId):\n \"\"\"\n Returns how many recordings this speaker has in our database.\n\n parameters:\n speakerName name as it is in json we receive from frontend\n speakerId id of speaker as in database\n deviceId id of device as in database\n\n returns:\n recCnt number of recordings from this speaker in database\n -1 on failure\n\n Right now, takes the maximum of the recordings of speaker with supplied id\n and the total sum of all speakers with 'speakerName' and a common 'deviceId'.\n Meaning, if a speaker through a glitch has a couple versions of himself in the db\n (with same device tho) we count that.\n \"\"\"\n try:\n cur = self.mysql.connection.cursor()\n\n #TODO combine into one query\n cur.execute('SELECT count(*) FROM recording '\n 'WHERE speakerId IN ( '\n 'SELECT id FROM speaker WHERE name=%s) '\n 'AND sessionId IN (SELECT id FROM session WHERE deviceId=%s)'\n ,(speakerName, deviceId))\n\n cntByName = cur.fetchone()\n if cntByName is None:\n cntByName = 0\n else:\n cntByName = cntByName[0]\n cur.execute('SELECT count(*) FROM recording WHERE speakerId=%s', (speakerId,))\n cntById = cur.fetchone()\n if cntById is None:\n cntById = 0\n else:\n cntById = cntById[0]\n except MySQLError as e:\n msg = 'Error grabbing recording count for speaker with id={}'.format(speakerId)\n log(msg, e)\n return -1 # lets not fail, but return a sentinel value of -1\n\n return max(int(cntByName), int(cntById))\n\n\n def getTokens(self, numTokens):\n \"\"\"\n Gets numTokens tokens randomly selected from the database and returns them in a nice json format.\n look at format in the client-server API\n \n Does not return any tokens marked with valid:FALSE in db.\n or it's: [{\"id\":id1, \"token\":token1}, {\"id\":id2, \"token\":token2}, ...]\n \n returns [] on failure\n \"\"\"\n tokens = []\n try:\n cur = self.mysql.connection.cursor()\n # Get list of random tokens which are valid from the mysql database\n cur.execute('SELECT id, inputToken, valid FROM token WHERE valid=1 ORDER BY RAND() LIMIT %s',\n (numTokens, ))\n tokens = cur.fetchall()\n except MySQLError as e:\n msg = 'Error getting tokens from database.'\n log(msg, e)\n return []\n\n jsonTokens = []\n # parse our tuple object from the cursor.execute into our desired json object\n for pair in tokens:\n jsonTokens.append({\"id\":pair[0], \"token\":pair[1]})\n return jsonTokens\n\n def getRecordingsInfo(self, sessionId, count=None) -> '[{\"recId\": ..., \"token\": str, \"recPath\": str - absolute path, \"tokenId\": ...}]':\n \"\"\"Fetches info for the recordings of the session `sessionId`\n\n Parameters:\n sessionId Only consider recordings from this session\n count If set only return info for count newest recordings\n otherwise fetch info for all recordings from session\n\n The returned list contains the newest recordings last, i.e. recordings are\n in ascending order with regard to recording id.\n \"\"\"\n try:\n cur = self.mysql.connection.cursor()\n cur.execute('SELECT recording.id, recording.filename, token.inputToken, token.id FROM recording '\n + 'JOIN token ON recording.tokenId=token.id '\n + 'WHERE recording.sessionId=%s '\n + 'ORDER BY recording.id ASC ', (sessionId,))\n\n if count is not None:\n rows = cur.fetchmany(size=count)\n else:\n rows = cur.fetchall()\n except MySQLError as e:\n msg = 'Error getting info for session recordings'\n log(msg, e)\n raise\n else:\n return json.dumps([dict(recId=recId, \n recPath=os.path.join(self.recordings_path,'session_'+str(sessionId),recPath), \n token=token, \n tokenId=id)\n for recId, recPath, token, id in rows])\n\n def sessionExists(self, sessionId) -> bool:\n \"\"\"\n Checks to see if session with sessionId exists (is in database).\n \"\"\"\n try:\n cur = self.mysql.connection.cursor()\n cur.execute('SELECT * FROM session WHERE id=%s', (sessionId,))\n if cur.fetchone():\n return True\n except MySQLError as e:\n msg = 'Error checking for session existence.'\n log(msg, e)\n raise\n else:\n return False\n\n def getFromSet(self, eval_set, progress, count):\n \"\"\"\n Get link/prompt pairs from specified set in ascending order by recording id.\n\n Parameters:\n set name of the set corresponding to evaluation_sets(eval_set) in database.\n progress progress (index) into the set\n count number of pairs to get\n\n A special set, Random, will receive count random recordings from the total recordings\n so far, unrelated to progress.\n\n Returns tuple\n (json, http_status_code)\n\n Returned JSON definition:\n [[recLinkN, promptN], .., [recLinkN+count, promptN+count]]\n\n where N is progress and recLink is the RECSURL + the relative path in the RECSROOT folder,\n e.g. '/recs/session_26/user_date.wav'. An error string on failure.\n \"\"\"\n try:\n cur = self.mysql.connection.cursor()\n # select count random recordings from a special set (Random)\n if eval_set == 'Random':\n cur.execute('SELECT id FROM recording');\n recIds = [x[0] for x in cur.fetchall()]\n recIds = recIds[1:] # remove the placeholder recording introduced by populate_db.sql\n\n randIds = random.sample(recIds, count)\n randIds = tuple(randIds) # change to tuple because SQL syntax is 'WHERE id IN (1,2,3,..)'\n cur.execute('SELECT recording.sessionId, recording.filename, inputToken '+\n 'FROM recording, token '+\n 'WHERE recording.tokenId = token.id '+\n 'AND recording.id IN %s ',\n (randIds,))\n else:\n # branch for the normal usage, taking from a specific set\n cur.execute('SELECT recording.sessionId, recording.filename, inputToken '+\n 'FROM recording, token, evaluation_sets '+\n 'WHERE recording.tokenId = token.id '+\n 'AND recording.id = evaluation_sets.recordingId '+\n 'AND eval_set=%s '+\n 'ORDER BY evaluation_sets.id ASC', (eval_set,))\n partialSet = [['{}/session_{}/{}'.format(RECSURL, sesId, filename), prompt]\n for sesId, filename, prompt in cur.fetchall()]\n except MySQLError as e:\n msg = 'Error grabbing from set.'\n log(msg, e)\n return (msg, 500)\n\n if partialSet and eval_set != 'Random':\n return (partialSet[progress:progress+count], 200)\n elif partialSet and eval_set == 'Random':\n return (partialSet, 200) # not actually a partial set in this case, just set with count elements\n else:\n msg = 'No set by that name in database.'\n log(msg+' Set: {}'.format(eval_set))\n return (msg, 404)\n\n def processEvaluation(self, eval_set, data):\n \"\"\"\n Process and save evaluation in database table: evaluation.\n\n Parameters:\n eval_set name of the set corresponding to evaluation_sets table\n data json on format:\n [\n {\n \"evaluator\": \"daphne\",\n \"sessionId\": 5,\n \"recordingFilename\": \"asdf_2016-03-05T11:11:09.287Z.wav\",\n \"grade\": 2,\n \"comments\": \"Bad pronunciation\",\n \"skipped\": false\n },\n ..\n ]\n\n Returns (msg, http_status_code)\n \"\"\"\n eval_set = str(eval_set)\n try:\n jsonDecoded = json.loads(data)\n #log('json: ', jsonDecoded)\n except (TypeError, ValueError) as e:\n msg = 'Evaluation data not on correct format.'\n log(msg, e)\n return (msg, 400)\n\n error = '' \n errorStatusCode = 500\n for evaluation in jsonDecoded:\n evaluator, sessionId, recordingFilename, grade, comments, skipped = \\\n None, None, None, None, None, None\n try:\n evaluator = evaluation['evaluator']\n sessionId = evaluation['sessionId']\n recordingFilename = evaluation['recordingFilename']\n grade = evaluation['grade']\n comments = evaluation['comments']\n skipped = evaluation['skipped']\n except KeyError as e:\n error = 'Some evaluation data not on correct format, wrong key.'\n errorStatusCode = 400\n log(error + ' Data: {}, eval_set: {}'.format(evaluation, eval_set), e)\n continue\n\n try:\n cur = self.mysql.connection.cursor()\n if eval_set == 'Random':\n cur.execute('SELECT recording.id FROM recording '+\n 'WHERE recording.sessionId = %s '+\n 'AND recording.filename = %s ',\n (sessionId, recordingFilename))\n else:\n cur.execute('SELECT recording.id FROM recording, evaluation_sets '+\n 'WHERE evaluation_sets.recordingId = recording.id '+\n 'AND recording.sessionId = %s '+\n 'AND recording.filename = %s '+\n 'AND eval_set = %s',\n (sessionId, recordingFilename, eval_set))\n try:\n recId = cur.fetchone()[0]\n except TypeError as e:\n error = 'Could not find a recording with some data.'\n errorStatusCode = 400\n log(error + ' Data: {}, eval_set: {}'.format(evaluation, eval_set), e)\n continue\n\n cur.execute('INSERT INTO evaluation (recordingId, eval_set, evaluator, grade, comments, skipped) \\\n VALUES (%s, %s, %s, %s, %s, %s)', \n (recId, eval_set, evaluator, grade, comments, skipped))\n except MySQLError as e:\n error = 'Error inserting some evaluation into database.'\n errorStatusCode = 500\n log(error + ' Data: {}, eval_set: {}'.format(evaluation, eval_set), e)\n continue\n\n self.mysql.connection.commit()\n\n if error:\n return (error, errorStatusCode)\n else:\n return ('Successfully processed evaluation.', 200)\n\n def getSetInfo(self, eval_set):\n \"\"\"\n Currently, only returns the number of elements in on format:\n {\n \"count\": 52\n }\n \"\"\"\n if eval_set == 'Random':\n return (json.dumps(dict(count='∞')), 200)\n\n try:\n cur = self.mysql.connection.cursor()\n cur.execute('SELECT COUNT(*) FROM evaluation_sets '+\n 'WHERE eval_set=%s ', (eval_set,))\n try:\n count = cur.fetchone()[0]\n except TypeError as e:\n msg = 'Could not find supplied set.'\n log(msg + ' Eval_set: {}'.format(eval_set), e)\n return (msg, 404)\n except MySQLError as e:\n msg = 'Error getting set info.'\n log(msg + ' Eval_set: {}'.format(eval_set), e)\n return (msg, 500)\n\n return (json.dumps(dict(count=count)), 200)\n\n def getUserProgress(self, user, eval_set):\n \"\"\"\n Returns user progress into eval_set, format:\n {\n \"progress\": 541\n }\n \"\"\"\n try:\n cur = self.mysql.connection.cursor()\n cur.execute('SELECT COUNT(*) FROM evaluation '+\n 'WHERE eval_set=%s '+\n 'AND evaluator=%s', (eval_set, user))\n # COUNT(*) always returns a number, so no need for a try block here\n progress = cur.fetchone()[0]\n except MySQLError as e:\n msg = 'Error getting user progress.'\n log(msg + ' Eval_set: {}, user: {}'.format(eval_set, user), e)\n return (msg, 500)\n\n return (json.dumps(dict(progress=progress)), 200)\n\n def getPossibleSets(self):\n \"\"\"\n Returns possible sets, format:\n [\n \"set1\",\n \"set2\",\n ..\n ]\n or as in client-server API.\n \"\"\"\n try:\n cur = self.mysql.connection.cursor()\n cur.execute('SELECT eval_set FROM evaluation_sets '+\n 'GROUP BY eval_set')\n sets = [x[0] for x in cur.fetchall()]\n except MySQLError as e:\n msg = 'Error getting possible sets.'\n log(msg, e)\n return (msg, 500)\n\n return (json.dumps(sets), 200)\n","repo_name":"Eyra-is/Eyra","sub_path":"Backend/server-interface/db_handler.py","file_name":"db_handler.py","file_ext":"py","file_size_in_byte":40264,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"74617216892","text":"from bs4 import BeautifulSoup\n\n\nclass AdvertisementParser:\n\n @staticmethod\n def parse(html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n data = dict(title=None,\n price=None,\n body=None,\n post_id=None,\n created_time=None,\n modified_time=None,\n image=[]\n )\n title_tag = soup.find('span', {'id': 'titletextonly'})\n if title_tag:\n data['title'] = title_tag.text\n\n price_tag = soup.find('span', {'class': 'price'})\n if price_tag:\n data['price'] = price_tag.text\n\n body_tag = soup.find('section', {'id': 'postingbody'})\n if body_tag:\n data['body'] = body_tag.text\n\n post_id_tag = soup.select_one('body > section > section > section > div.postinginfos > p:nth-child(1)')\n if post_id_tag:\n data['post_id'] = post_id_tag.text.replace('post id: ', '')\n\n created_time_tag = soup.select_one(\n 'body > section > section > section > div.postinginfos > p:nth-child(2) > time')\n if created_time_tag:\n data['created_time'] = created_time_tag['datetime']\n\n modified_time = soup.select_one('body > section > section > section > div.postinginfos > p:nth-child(3) > time')\n if modified_time:\n data['modified_time'] = modified_time['datetime']\n image_url = soup.find_all('img')\n if image_url:\n image_list = [{'url': img['src'], 'flag': False} for img in image_url]\n data['image'] = image_list\n return data\n","repo_name":"athfemoiur/craigslist-crawler","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9559237091","text":"import psycopg2\nimport datetime\nimport random\nfrom psycopg2.extensions import AsIs\ndef getConn():\n #pwFile = open(\"pw.txt\", \"r\")\n #pw = pwFile.read()\n #pwFile.close()\n connStr = \"host=localhost \\\n dbname=Travelly user=postgres password = \" + \"password\"\n conn=psycopg2.connect(connStr) \n return conn\n\ndef pw_hash_salt(unhashed_pw,pw_salt=0):\n num = 31\n hashed_pw = 0\n for i in range(0,len(unhashed_pw)):\n hashed_pw += ((num * hashed_pw) + ord(unhashed_pw[i]))\n hashed_salted_pw = hashed_pw + pw_salt \n return hashed_salted_pw\n\nUSERS=[\"Aleida King\",\"Billye Quayle\",\"Mildred Beaty\",\"Adeline Beyers\",\"Tricia Wendel\",\"Kizzy Bedoya\",\"Marx Warn\",\"Hulda Culberson\",\"Devona Morvant\",\"Winston Tomasello\",\"Dede Frame\",\"Lissa Follansbee\",\"Timmy Dapolito\",\"Gracie Lonon\",\"Nana Officer\",\"Yuri Kruchten\",\"Chante Brasch\",\"Edmond Toombs\",\"Scott Schwan\",\"Lean Beauregard\",\"Norberto Petersen\",\"Carole Costigan\",\"Chantel Drumheller\",\"Riva Redfield\",\"Jennie Sandifer\",\"Vivian Cimini\",\"Goldie Hayworth\",\"Tomeka Kimler\",\"Micaela Juan\",\"Jerrold Tjaden\",\"Collene Olson\",\"Edna Serna\",\"Cleveland Miley\",\"Ena Haecker\",\"Huey Voelker\",\"Annamae Basco\",\"Florentina Quinlan\",\"Eryn Chae\",\"Mozella Mcknight\"]\n\nCOUNTRIES=[ \n'France', \n'Spain', \n'United States', \n'China', \n'Italy', \n'Mexico', \n'United Kingdom', \n'Turkey', \n'Germany', \n'Thailand', \n'Austria', \n'Japan', \n'China', \n'Greece', \n'Malaysia', \n'Russia', \n'Canada', \n'Poland', \n'Netherlands', \n'Saudi Arabia',\n'Croatia',\n'India', \n'Portugal', \n'Ukraine', \n'Indonesia', \n'Singapore', \n'Korea', \n'Vietnam', \n'Denmark', \n'Bahrain', \n'Morocco', \n'Belarus', \n'Romania', \n'Ireland', \n'South Africa', \n'Czech Republic', \n'Switzerland', \n'Bulgaria', \n'Australia', \n'Belgium', \n'Egypt', \n'Kazakhstan', \n'United Arab Emirates', \n'Sweden', \n'Tunisia', \n'Argentina', \n'Philippines', \n'Brazil', \n'Georgia', \n'Chile', \n'Norway', \n'Dominican Republic', \n'Hungary', \n'Cambodia', \n'Syrian Arab Republic', \n'Iran', \n'Albania', \n'Cuba', \n'Kyrgyz Republic', \n'Colombia', \n'Peru', \n'Jordan', \n'Puerto Rico', \n'Uruguay', \n'Cyprus', \n'Israel', \n'Slovenia', \n'New Zealand', \n'Myanmar', \n'Lao PDR', \n'Estonia', \n'Finland', \n'Costa Rica', \n'Andorra', \n'Uzbekistan', \n'Lithuania', \n'Azerbaijan', \n'Algeria', \n'Zimbabwe', \n'Oman', \n'Jamaica', \n'Malta', \n'Qatar', \n'Iceland', \n'Slovak Republic', \n'Sri Lanka', \n'Guatemala', \n'Latvia', \n'Nigeria', \n'Montenegro', \n'Lebanon', \n'Panama', \n'Côte d\\'Ivoire', \n'Nicaragua', \n'Ecuador', \n'Paraguay', \n'Botswana', \n'El Salvador', \n'Namibia'] \n\ndef create_schema(cur):\n cur.execute(open(\"schema.sql\", \"r\").read())\n\ndef create_user(name):\n salt = 12345\n password=pw_hash_salt('password', salt)\n username = name.split()[0].lower() + '999'\n email = '%s.%s@email.com'%(name.split()[0][1].lower(), name.split()[1].lower())\n firstname= name.split()[0]\n lastname= name.split()[1]\n dob = datetime.date(1990,1,1)\n return [username, firstname, lastname, email, dob, password, salt]\n\n \ndef create_post(author, country):\n content = 'Content for this post'\n title = 'post about %s'%(country)\n date = datetime.date.today().replace(day=1, month=1) + datetime.timedelta(days=random.randint(0, 365))\n return [title, country, author, content, date]\n \n \n# def create_comment(cur, cid,pid, author):\n# for i in range(random.randrange(2,5)):\n# content = 'Comment %d'%(i)\n# date = date + datetime.timedelta( random.randrange(1,3), minutes=random.randrange(1,120), hours=random.randrange(0,6) )\n# cur.execute('INSERT INTO tr_comment (cid, author, content,date) VALUES (?,?,?,?,?,?)',(id, country, author, content,date))\n \ntry: \n conn=None \n conn=getConn()\n cur = conn.cursor()\n \n cur.execute(\"DROP SCHEMA IF EXISTS %s CASCADE\", [AsIs('travelly')])\n\n file_p = open('schema.sql', 'r')\n cur.execute(file_p.read())\n \n for user in USERS:\n cur.execute(\"INSERT INTO tr_users VALUES (%s,%s,%s,%s,%s,%s,%s)\", create_user(user))\n\n cur.execute(\"SELECT username FROM tr_users\")\n users = cur.fetchall()\n \n for country in COUNTRIES:\n user = random.choice(users)\n cur.execute(\"INSERT INTO tr_post (title, country, author, content, date) VALUES (%s,%s,%s,%s,%s)\", create_post(user, country))\n \n conn.commit()\n conn.close()\n\n\nexcept Exception as e:\n print (e)","repo_name":"nuridagasan/Travelly-PYthon-API-and-server","sub_path":"DB_seeder.py","file_name":"DB_seeder.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12154968728","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 11 15:03:25 2019\n\n@author: Lenovo\n\"\"\"\n\ncurrent_savings = 0\nmonth_num = 0\nr = 0.04\nflag = 0\nannual_salary = int(input(\"Enter your annual salary:​\"))\nportion_saved = float(input(\"Enter the percent of your salary to save, as a decimal:\"))\ntotal_cost = int((input(\"Enter the cost of your dream home:​\")))\nmonth_salary = float(annual_salary) / 12\nportion_down_payment = total_cost * 0.25\n\nwhile True:\n if current_savings < portion_down_payment :\n current_savings = current_savings * r / 12 + + current_savings + month_salary * portion_saved\n month_num += 1\n else:\n break\n\nprint(\"Number of months:​ {}\".format(month_num))\n\n ","repo_name":"oscar0325/Introduction-to-Computation-and-Programming","sub_path":"ps1/ps1a.py","file_name":"ps1a.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70998528891","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\n\n# Function to generate random username and Ethereum address (you may use a different method if desired)\nimport random\nimport string\n\ndef generate_random_username():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=8))\n\ndef generate_random_ethereum_address():\n characters = \"abcdef\" + string.digits\n return '0x' + ''.join(random.choice(characters) for i in range(40))\n\n# URL and referral code\nurl = \"https://www.arrow.markets/waitlist?referralCode=mh2eXFPl\"\n\n# Set up Selenium web driver in headless mode\nchrome_path = \"/home/yunan/Automation/ArrowMarkets/pip/chromedriver\" # Replace with the path to your chromedriver executable\nchrome_options = Options()\n#chrome_options.add_argument(\"--headless\")\nservice = Service(chrome_path)\ndriver = webdriver.Chrome(service=service, options=chrome_options)\n\nmax_attempts = 1000\n\nwhile True:\n # Create a new driver instance for each attempt\n driver = webdriver.Chrome(service=service, options=chrome_options)\n attempts = 0\n signed_up = True\n\n while signed_up and attempts < max_attempts:\n try:\n # Open the URL in the browser\n driver.get(url)\n #driver.maximize_window()\n\n # Wait for the sign-up form to load\n wait = WebDriverWait(driver, 10)\n username_input = wait.until(EC.presence_of_element_located((By.NAME, \"Username\")))\n wallet_address_input = wait.until(EC.presence_of_element_located((By.NAME, \"Wallet Address\")))\n sign_up_button = wait.until(EC.element_to_be_clickable((By.XPATH, \"/html/body/div[1]/main/div/div[1]/div/div/div/div[1]/form/button\")))\n\n # Generate random username and Ethereum address\n random_username = generate_random_username()\n random_ethereum_address = generate_random_ethereum_address()\n\n # Fill in the sign-up form\n username_input.send_keys(random_username)\n wallet_address_input.send_keys(random_ethereum_address)\n\n #time.sleep(1)\n # Click the sign-up button\n driver.implicitly_wait(10)\n sign_up_button.click()\n #driver.implicitly_wait(10)\n # Wait for the response and check for the \"Network Error\" pop-up\n try:\n signed_up_succesfully = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, \".ant-typography elements__StyledHeading-sc-1h3erfw-0 gRrPFZ\"))\n )\n if signed_up_succesfully.is_displayed():\n # If there's a network error, refresh the page and try again\n signed_up = False\n attempts += 1\n except:\n # If no pop-up is found within the timeout, assume sign-up was successful and break the loop\n signed_up = True\n print(\"Sign-up successful!\")\n break\n\n except Exception as e:\n print(f\"An error occurred: {e}\")\n #driver.implicitly_wait(10)\n # Close the browser after each sign-up attempt\n driver.quit()\n\n # Wait for some time before starting the sign-up process again\n time.sleep(0)\n","repo_name":"yunansum/ArrowMarkets","sub_path":"Arrow.py","file_name":"Arrow.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28278887911","text":"import os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\ntry:\n link = 'http://suninjuly.github.io/file_input.html'\n browser = webdriver.Chrome()\n browser.get(link)\n\n first_name = browser.find_element(By.CSS_SELECTOR, 'input[name=\"firstname\"]')\n first_name.send_keys(\"Lolek\")\n\n last_name = browser.find_element(By.CSS_SELECTOR, 'input[name=\"lastname\"]')\n last_name.send_keys(\"Bolek\")\n\n email = browser.find_element(By.CSS_SELECTOR, 'input[name=\"email\"]')\n email.send_keys(\"Alokogolek\")\n\n current_dir = os.path.abspath(os.path.dirname(__file__))\n file_path = os.path.join(current_dir, 'file.txt')\n file = browser.find_element(By.CSS_SELECTOR, 'input#file')\n file.send_keys(file_path)\n\n button = browser.find_element(By.TAG_NAME, 'button')\n button.location_once_scrolled_into_view\n button.click()\n\n\nfinally:\n time.sleep(5)\n \n browser.quit()","repo_name":"DrAshford/train_auto_tests","sub_path":"lesson8_step8.py","file_name":"lesson8_step8.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"54649463","text":"from collections import Counter\n\n\ndef min_window(s: str, t: str) -> str:\n if len(t) > len(s):\n return \"\"\n if t == s:\n return s\n\n if not t or not s:\n return \"\"\n\n counter_t = Counter(t)\n\n required = len(counter_t)\n\n # Filter all the characters from s into a new list along with their index.\n # The filtering criteria is that the character should be present in t.\n filtered_s = []\n for idx, char in enumerate(s):\n if char in counter_t:\n filtered_s.append((idx, char))\n\n left, right = 0, 0\n formed = 0\n window_counts = {}\n ans = float(\"inf\"), None, None\n\n # Look for the characters only in the filtered list instead of entire s. This helps to reduce our search.\n # Hence, we follow the sliding window approach on as small list.\n while right < len(filtered_s):\n char = filtered_s[right][1]\n window_counts[char] = window_counts.get(char, 0) + 1\n\n if window_counts[char] == counter_t[char]:\n formed += 1\n\n # If the current window has all the characters in desired frequencies i.e. t is present in the window\n while left <= right and formed == required:\n char = filtered_s[left][1]\n\n end = filtered_s[right][0]\n start = filtered_s[left][0]\n\n if end - start + 1 < ans[0]:\n ans = (end - start + 1, start, end)\n\n window_counts[char] -= 1\n if window_counts[char] < counter_t[char]:\n formed -= 1\n\n left += 1\n\n right += 1\n\n return \"\" if ans[0] == float(\"Inf\") else s[ans[1]: ans[2] + 1]\n","repo_name":"BrianLusina/PythonSnips","sub_path":"algorithms/sliding_window/minimum_window_substring/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"35804232918","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport keras\nkeras.__version__\n\n\n# # Advanced usage of recurrent neural networks\n# \n# This notebook contains the code samples found in Chapter 6, Section 3 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.\n# \n# ---\n# \n# In this section, we will review three advanced techniques for improving the performance and generalization power of recurrent neural \n# networks. By the end of the section, you will know most of what there is to know about using recurrent networks with Keras. We will \n# demonstrate all three concepts on a weather forecasting problem, where we have access to a timeseries of data points coming from sensors \n# installed on the roof of a building, such as temperature, air pressure, and humidity, which we use to predict what the temperature will be \n# 24 hours after the last data point collected. This is a fairly challenging problem that exemplifies many common difficulties encountered \n# when working with timeseries.\n# \n# We will cover the following techniques:\n# \n# * *Recurrent dropout*, a specific, built-in way to use dropout to fight overfitting in recurrent layers.\n# * *Stacking recurrent layers*, to increase the representational power of the network (at the cost of higher computational loads).\n# * *Bidirectional recurrent layers*, which presents the same information to a recurrent network in different ways, increasing accuracy and \n# mitigating forgetting issues.\n\n# ## A temperature forecasting problem\n# \n# Until now, the only sequence data we have covered has been text data, for instance the IMDB dataset and the Reuters dataset. But sequence \n# data is found in many more problems than just language processing. In all of our examples in this section, we will be playing with a weather \n# timeseries dataset recorded at the Weather Station at the Max-Planck-Institute for Biogeochemistry in Jena, Germany: http://www.bgc-jena.mpg.de/wetter/.\n# \n# In this dataset, fourteen different quantities (such air temperature, atmospheric pressure, humidity, wind direction, etc.) are recorded \n# every ten minutes, over several years. The original data goes back to 2003, but we limit ourselves to data from 2009-2016. This dataset is \n# perfect for learning to work with numerical timeseries. We will use it to build a model that takes as input some data from the recent past (a \n# few days worth of data points) and predicts the air temperature 24 hours in the future.\n\n# Let's take a look at the data:\n\n# In[6]:\n\n\nimport os\n\ndata_dir = '/home/ubuntu/data/'\nfname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')\n\nf = open(fname)\ndata = f.read()\nf.close()\n\nlines = data.split('\\n')\nheader = lines[0].split(',')\nlines = lines[1:]\n\nprint(header)\nprint(len(lines))\n\n\n# Let's convert all of these 420,551 lines of data into a Numpy array:\n\n# In[7]:\n\n\nimport numpy as np\n\nfloat_data = np.zeros((len(lines), len(header) - 1))\nfor i, line in enumerate(lines):\n values = [float(x) for x in line.split(',')[1:]]\n float_data[i, :] = values\n\n\n# For instance, here is the plot of temperature (in degrees Celsius) over time:\n\n# In[5]:\n\n\nfrom matplotlib import pyplot as plt\n\ntemp = float_data[:, 1] # temperature (in degrees Celsius)\nplt.plot(range(len(temp)), temp)\nplt.show()\n\n\n# \n# On this plot, you can clearly see the yearly periodicity of temperature.\n# \n# Here is a more narrow plot of the first ten days of temperature data (since the data is recorded every ten minutes, we get 144 data points \n# per day):\n\n# In[6]:\n\n\nplt.plot(range(1440), temp[:1440])\nplt.show()\n\n\n# \n# On this plot, you can see daily periodicity, especially evident for the last 4 days. We can also note that this ten-days period must be \n# coming from a fairly cold winter month.\n# \n# If we were trying to predict average temperature for the next month given a few month of past data, the problem would be easy, due to the \n# reliable year-scale periodicity of the data. But looking at the data over a scale of days, the temperature looks a lot more chaotic. So is \n# this timeseries predictable at a daily scale? Let's find out.\n\n# ## Preparing the data\n# \n# \n# The exact formulation of our problem will be the following: given data going as far back as `lookback` timesteps (a timestep is 10 minutes) \n# and sampled every `steps` timesteps, can we predict the temperature in `delay` timesteps?\n# \n# We will use the following parameter values:\n# \n# * `lookback = 720`, i.e. our observations will go back 5 days.\n# * `steps = 6`, i.e. our observations will be sampled at one data point per hour.\n# * `delay = 144`, i.e. our targets will be 24 hours in the future.\n# \n# To get started, we need to do two things:\n# \n# * Preprocess the data to a format a neural network can ingest. This is easy: the data is already numerical, so we don't need to do any \n# vectorization. However each timeseries in the data is on a different scale (e.g. temperature is typically between -20 and +30, but \n# pressure, measured in mbar, is around 1000). So we will normalize each timeseries independently so that they all take small values on a \n# similar scale.\n# * Write a Python generator that takes our current array of float data and yields batches of data from the recent past, alongside with a \n# target temperature in the future. Since the samples in our dataset are highly redundant (e.g. sample `N` and sample `N + 1` will have most \n# of their timesteps in common), it would be very wasteful to explicitly allocate every sample. Instead, we will generate the samples on the \n# fly using the original data.\n# \n# We preprocess the data by subtracting the mean of each timeseries and dividing by the standard deviation. We plan on using the first \n# 200,000 timesteps as training data, so we compute the mean and standard deviation only on this fraction of the data:\n\n# In[8]:\n\n\nmean = float_data[:200000].mean(axis=0)\nfloat_data -= mean\nstd = float_data[:200000].std(axis=0)\nfloat_data /= std\n\n\n# \n# Now here is the data generator that we will use. It yields a tuple `(samples, targets)` where `samples` is one batch of input data and \n# `targets` is the corresponding array of target temperatures. It takes the following arguments:\n# \n# * `data`: The original array of floating point data, which we just normalized in the code snippet above.\n# * `lookback`: How many timesteps back should our input data go.\n# * `delay`: How many timesteps in the future should our target be.\n# * `min_index` and `max_index`: Indices in the `data` array that delimit which timesteps to draw from. This is useful for keeping a segment \n# of the data for validation and another one for testing.\n# * `shuffle`: Whether to shuffle our samples or draw them in chronological order.\n# * `batch_size`: The number of samples per batch.\n# * `step`: The period, in timesteps, at which we sample data. We will set it 6 in order to draw one data point every hour.\n\n# In[9]:\n\n\ndef generator(data, lookback, delay, min_index, max_index,\n shuffle=False, batch_size=128, step=6):\n if max_index is None:\n max_index = len(data) - delay - 1\n i = min_index + lookback\n while 1:\n if shuffle:\n rows = np.random.randint(\n min_index + lookback, max_index, size=batch_size)\n else:\n if i + batch_size >= max_index:\n i = min_index + lookback\n rows = np.arange(i, min(i + batch_size, max_index))\n i += len(rows)\n\n samples = np.zeros((len(rows),\n lookback // step,\n data.shape[-1]))\n targets = np.zeros((len(rows),))\n for j, row in enumerate(rows):\n indices = range(rows[j] - lookback, rows[j], step)\n samples[j] = data[indices]\n targets[j] = data[rows[j] + delay][1]\n yield samples, targets\n\n\n# \n# Now let's use our abstract generator function to instantiate three generators, one for training, one for validation and one for testing. \n# Each will look at different temporal segments of the original data: the training generator looks at the first 200,000 timesteps, the \n# validation generator looks at the following 100,000, and the test generator looks at the remainder.\n\n# In[10]:\n\n\nlookback = 1440\nstep = 6\ndelay = 144\nbatch_size = 128\n\ntrain_gen = generator(float_data,\n lookback=lookback,\n delay=delay,\n min_index=0,\n max_index=200000,\n shuffle=True,\n step=step, \n batch_size=batch_size)\nval_gen = generator(float_data,\n lookback=lookback,\n delay=delay,\n min_index=200001,\n max_index=300000,\n step=step,\n batch_size=batch_size)\ntest_gen = generator(float_data,\n lookback=lookback,\n delay=delay,\n min_index=300001,\n max_index=None,\n step=step,\n batch_size=batch_size)\n\n# This is how many steps to draw from `val_gen`\n# in order to see the whole validation set:\nval_steps = (300000 - 200001 - lookback) // batch_size\n\n# This is how many steps to draw from `test_gen`\n# in order to see the whole test set:\ntest_steps = (len(float_data) - 300001 - lookback) // batch_size\n\n\n# ## A common sense, non-machine learning baseline\n# \n# \n# Before we start leveraging black-box deep learning models to solve our temperature prediction problem, let's try out a simple common-sense \n# approach. It will serve as a sanity check, and it will establish a baseline that we will have to beat in order to demonstrate the \n# usefulness of more advanced machine learning models. Such common-sense baselines can be very useful when approaching a new problem for \n# which there is no known solution (yet). A classic example is that of unbalanced classification tasks, where some classes can be much more \n# common than others. If your dataset contains 90% of instances of class A and 10% of instances of class B, then a common sense approach to \n# the classification task would be to always predict \"A\" when presented with a new sample. Such a classifier would be 90% accurate overall, \n# and any learning-based approach should therefore beat this 90% score in order to demonstrate usefulness. Sometimes such elementary \n# baseline can prove surprisingly hard to beat.\n# \n# In our case, the temperature timeseries can safely be assumed to be continuous (the temperatures tomorrow are likely to be close to the \n# temperatures today) as well as periodical with a daily period. Thus a common sense approach would be to always predict that the temperature \n# 24 hours from now will be equal to the temperature right now. Let's evaluate this approach, using the Mean Absolute Error metric (MAE). \n# Mean Absolute Error is simply equal to:\n\n# In[ ]:\n\n\nnp.mean(np.abs(preds - targets))\n\n\n# Here's our evaluation loop:\n\n# In[10]:\n\n\ndef evaluate_naive_method():\n batch_maes = []\n for step in range(val_steps):\n samples, targets = next(val_gen)\n preds = samples[:, -1, 1]\n mae = np.mean(np.abs(preds - targets))\n batch_maes.append(mae)\n print(np.mean(batch_maes))\n \nevaluate_naive_method()\n\n\n# It yields a MAE of 0.29. Since our temperature data has been normalized to be centered on 0 and have a standard deviation of one, this \n# number is not immediately interpretable. It translates to an average absolute error of `0.29 * temperature_std` degrees Celsius, i.e. \n# 2.57˚C. That's a fairly large average absolute error -- now the game is to leverage our knowledge of deep learning to do better. \n\n# ## A basic machine learning approach\n# \n# In the same way that it is useful to establish a common sense baseline before trying machine learning approaches, it is useful to try \n# simple and cheap machine learning models (such as small densely-connected networks) before looking into complicated and computationally \n# expensive models such as RNNs. This is the best way to make sure that any further complexity we throw at the problem later on is legitimate \n# and delivers real benefits.\n# \n# Here is a simply fully-connected model in which we start by flattening the data, then run it through two `Dense` layers. Note the lack of \n# activation function on the last `Dense` layer, which is typical for a regression problem. We use MAE as the loss. Since we are evaluating \n# on the exact same data and with the exact same metric as with our common sense approach, the results will be directly comparable.\n\n# In[17]:\n\n\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(layers.Flatten(input_shape=(lookback // step, float_data.shape[-1])))\nmodel.add(layers.Dense(32, activation='relu'))\nmodel.add(layers.Dense(1))\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\nhistory = model.fit_generator(train_gen,\n steps_per_epoch=500,\n epochs=20,\n validation_data=val_gen,\n validation_steps=val_steps)\n\n\n# Let's display the loss curves for validation and training:\n\n# In[23]:\n\n\nimport matplotlib.pyplot as plt\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(loss))\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n\n\n# \n# Some of our validation losses get close to the no-learning baseline, but not very reliably. This goes to show the merit of having had this baseline in the first place: it turns out not to be so easy to outperform. Our \n# common sense contains already a lot of valuable information that a machine learning model does not have access to.\n# \n# You may ask, if there exists a simple, well-performing model to go from the data to the targets (our common sense baseline), why doesn't \n# the model we are training find it and improve on it? Simply put: because this simple solution is not what our training setup is looking \n# for. The space of models in which we are searching for a solution, i.e. our hypothesis space, is the space of all possible 2-layer networks \n# with the configuration that we defined. These networks are already fairly complicated. When looking for a solution with a space of \n# complicated models, the simple well-performing baseline might be unlearnable, even if it's technically part of the hypothesis space. That \n# is a pretty significant limitation of machine learning in general: unless the learning algorithm is hard-coded to look for a specific kind \n# of simple model, parameter learning can sometimes fail to find a simple solution to a simple problem.\n\n# ## A first recurrent baseline\n# \n# \n# Our first fully-connected approach didn't do so well, but that doesn't mean machine learning is not applicable to our problem. The approach \n# above consisted in first flattening the timeseries, which removed the notion of time from the input data. Let us instead look at our data \n# as what it is: a sequence, where causality and order matter. We will try a recurrent sequence processing model -- it should be the perfect \n# fit for such sequence data, precisely because it does exploit the temporal ordering of data points, unlike our first approach.\n# \n# Instead of the `LSTM` layer introduced in the previous section, we will use the `GRU` layer, developed by Cho et al. in 2014. `GRU` layers \n# (which stands for \"gated recurrent unit\") work by leveraging the same principle as LSTM, but they are somewhat streamlined and thus cheaper \n# to run, albeit they may not have quite as much representational power as LSTM. This trade-off between computational expensiveness and \n# representational power is seen everywhere in machine learning.\n\n# In[25]:\n\n\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))\nmodel.add(layers.Dense(1))\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\nhistory = model.fit_generator(train_gen,\n steps_per_epoch=500,\n epochs=20,\n validation_data=val_gen,\n validation_steps=val_steps)\n\n\n# Let look at our results:\n\n# In[26]:\n\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(loss))\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n\n\n# \n# Much better! We are able to significantly beat the common sense baseline, such demonstrating the value of machine learning here, as well as \n# the superiority of recurrent networks compared to sequence-flattening dense networks on this type of task.\n# \n# Our new validation MAE of ~0.265 (before we start significantly overfitting) translates to a mean absolute error of 2.35˚C after \n# de-normalization. That's a solid gain on our initial error of 2.57˚C, but we probably still have a bit of margin for improvement.\n\n# ## Using recurrent dropout to fight overfitting\n# \n# \n# It is evident from our training and validation curves that our model is overfitting: the training and validation losses start diverging \n# considerably after a few epochs. You are already familiar with a classic technique for fighting this phenomenon: dropout, consisting in \n# randomly zeroing-out input units of a layer in order to break happenstance correlations in the training data that the layer is exposed to. \n# How to correctly apply dropout in recurrent networks, however, is not a trivial question. It has long been known that applying dropout \n# before a recurrent layer hinders learning rather than helping with regularization. In 2015, Yarin Gal, as part of his Ph.D. thesis on \n# Bayesian deep learning, determined the proper way to use dropout with a recurrent network: the same dropout mask (the same pattern of \n# dropped units) should be applied at every timestep, instead of a dropout mask that would vary randomly from timestep to timestep. What's \n# more: in order to regularize the representations formed by the recurrent gates of layers such as GRU and LSTM, a temporally constant \n# dropout mask should be applied to the inner recurrent activations of the layer (a \"recurrent\" dropout mask). Using the same dropout mask at \n# every timestep allows the network to properly propagate its learning error through time; a temporally random dropout mask would instead \n# disrupt this error signal and be harmful to the learning process.\n# \n# Yarin Gal did his research using Keras and helped build this mechanism directly into Keras recurrent layers. Every recurrent layer in Keras \n# has two dropout-related arguments: `dropout`, a float specifying the dropout rate for input units of the layer, and `recurrent_dropout`, \n# specifying the dropout rate of the recurrent units. Let's add dropout and recurrent dropout to our GRU layer and see how it impacts \n# overfitting. Because networks being regularized with dropout always take longer to fully converge, we train our network for twice as many \n# epochs.\n\n# In[28]:\n\n\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(layers.GRU(32,\n dropout=0.2,\n recurrent_dropout=0.2,\n input_shape=(None, float_data.shape[-1])))\nmodel.add(layers.Dense(1))\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\nhistory = model.fit_generator(train_gen,\n steps_per_epoch=500,\n epochs=40,\n validation_data=val_gen,\n validation_steps=val_steps)\n\n\n# In[33]:\n\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(loss))\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n\n\n# Great success; we are no longer overfitting during the first 30 epochs. However, while we have more stable evaluation scores, our best \n# scores are not much lower than they were previously.\n\n# ## Stacking recurrent layers\n# \n# Since we are no longer overfitting yet we seem to have hit a performance bottleneck, we should start considering increasing the capacity of \n# our network. If you remember our description of the \"universal machine learning workflow\": it is a generally a good idea to increase the \n# capacity of your network until overfitting becomes your primary obstacle (assuming that you are already taking basic steps to mitigate \n# overfitting, such as using dropout). As long as you are not overfitting too badly, then you are likely under-capacity.\n# \n# Increasing network capacity is typically done by increasing the number of units in the layers, or adding more layers. Recurrent layer \n# stacking is a classic way to build more powerful recurrent networks: for instance, what currently powers the Google translate algorithm is \n# a stack of seven large LSTM layers -- that's huge.\n# \n# To stack recurrent layers on top of each other in Keras, all intermediate layers should return their full sequence of outputs (a 3D tensor) \n# rather than their output at the last timestep. This is done by specifying `return_sequences=True`: \n\n# In[36]:\n\n\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(layers.GRU(32,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_sequences=True,\n input_shape=(None, float_data.shape[-1])))\nmodel.add(layers.GRU(64, activation='relu',\n dropout=0.1, \n recurrent_dropout=0.5))\nmodel.add(layers.Dense(1))\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\nhistory = model.fit_generator(train_gen,\n steps_per_epoch=500,\n epochs=40,\n validation_data=val_gen,\n validation_steps=val_steps)\n\n\n# Let's take a look at our results:\n\n# In[37]:\n\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(loss))\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n\n\n# We can see that the added layers does improve ours results by a bit, albeit not very significantly. We can draw two conclusions:\n# \n# * Since we are still not overfitting too badly, we could safely increase the size of our layers, in quest for a bit of validation loss \n# improvement. This does have a non-negligible computational cost, though. \n# * Since adding a layer did not help us by a significant factor, we may be seeing diminishing returns to increasing network capacity at this \n# point.\n\n# ## Using bidirectional RNNs\n# \n# \n# The last technique that we will introduce in this section is called \"bidirectional RNNs\". A bidirectional RNN is common RNN variant which \n# can offer higher performance than a regular RNN on certain tasks. It is frequently used in natural language processing -- you could call it \n# the Swiss army knife of deep learning for NLP.\n# \n# RNNs are notably order-dependent, or time-dependent: they process the timesteps of their input sequences in order, and shuffling or \n# reversing the timesteps can completely change the representations that the RNN will extract from the sequence. This is precisely the reason \n# why they perform well on problems where order is meaningful, such as our temperature forecasting problem. A bidirectional RNN exploits \n# the order-sensitivity of RNNs: it simply consists of two regular RNNs, such as the GRU or LSTM layers that you are already familiar with, \n# each processing input sequence in one direction (chronologically and antichronologically), then merging their representations. By \n# processing a sequence both way, a bidirectional RNN is able to catch patterns that may have been overlooked by a one-direction RNN.\n# \n# Remarkably, the fact that the RNN layers in this section have so far processed sequences in chronological order (older timesteps first) may \n# have been an arbitrary decision. At least, it's a decision we made no attempt at questioning so far. Could it be that our RNNs could have \n# performed well enough if it were processing input sequences in antichronological order, for instance (newer timesteps first)? Let's try \n# this in practice and see what we get. All we need to do is write a variant of our data generator, where the input sequences get reverted \n# along the time dimension (replace the last line with `yield samples[:, ::-1, :], targets`). Training the same one-GRU-layer network as we \n# used in the first experiment in this section, we get the following results:\n\n# In[40]:\n\n\ndef reverse_order_generator(data, lookback, delay, min_index, max_index,\n shuffle=False, batch_size=128, step=6):\n if max_index is None:\n max_index = len(data) - delay - 1\n i = min_index + lookback\n while 1:\n if shuffle:\n rows = np.random.randint(\n min_index + lookback, max_index, size=batch_size)\n else:\n if i + batch_size >= max_index:\n i = min_index + lookback\n rows = np.arange(i, min(i + batch_size, max_index))\n i += len(rows)\n\n samples = np.zeros((len(rows),\n lookback // step,\n data.shape[-1]))\n targets = np.zeros((len(rows),))\n for j, row in enumerate(rows):\n indices = range(rows[j] - lookback, rows[j], step)\n samples[j] = data[indices]\n targets[j] = data[rows[j] + delay][1]\n yield samples[:, ::-1, :], targets\n \ntrain_gen_reverse = reverse_order_generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=0,\n max_index=200000,\n shuffle=True,\n step=step, \n batch_size=batch_size)\nval_gen_reverse = reverse_order_generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=200001,\n max_index=300000,\n step=step,\n batch_size=batch_size)\n\n\n# In[44]:\n\n\nmodel = Sequential()\nmodel.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))\nmodel.add(layers.Dense(1))\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\nhistory = model.fit_generator(train_gen_reverse,\n steps_per_epoch=500,\n epochs=20,\n validation_data=val_gen_reverse,\n validation_steps=val_steps)\n\n\n# In[45]:\n\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(loss))\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n\n\n# \n# So the reversed-order GRU strongly underperforms even the common-sense baseline, indicating that the in our case chronological processing is very \n# important to the success of our approach. This makes perfect sense: the underlying GRU layer will typically be better at remembering the \n# recent past than the distant past, and naturally the more recent weather data points are more predictive than older data points in our \n# problem (that's precisely what makes the common-sense baseline a fairly strong baseline). Thus the chronological version of the layer is \n# bound to outperform the reversed-order version. Importantly, this is generally not true for many other problems, including natural \n# language: intuitively, the importance of a word in understanding a sentence is not usually dependent on its position in the sentence. Let's \n# try the same trick on the LSTM IMDB example from the previous section:\n\n# In[3]:\n\n\nfrom keras.datasets import imdb\nfrom keras.preprocessing import sequence\nfrom keras import layers\nfrom keras.models import Sequential\n\n# Number of words to consider as features\nmax_features = 10000\n# Cut texts after this number of words (among top max_features most common words)\nmaxlen = 500\n\n# Load data\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\n\n# Reverse sequences\nx_train = [x[::-1] for x in x_train]\nx_test = [x[::-1] for x in x_test]\n\n# Pad sequences\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n\nmodel = Sequential()\nmodel.add(layers.Embedding(max_features, 128))\nmodel.add(layers.LSTM(32))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['acc'])\nhistory = model.fit(x_train, y_train,\n epochs=10,\n batch_size=128,\n validation_split=0.2)\n\n\n# \n# We get near-identical performance as the chronological-order LSTM we tried in the previous section.\n# \n# Thus, remarkably, on such a text dataset, reversed-order processing works just as well as chronological processing, confirming our \n# hypothesis that, albeit word order *does* matter in understanding language, *which* order you use isn't crucial. Importantly, a RNN trained \n# on reversed sequences will learn different representations than one trained on the original sequences, in much the same way that you would \n# have quite different mental models if time flowed backwards in the real world -- if you lived a life where you died on your first day and \n# you were born on your last day. In machine learning, representations that are *different* yet *useful* are always worth exploiting, and the \n# more they differ the better: they offer a new angle from which to look at your data, capturing aspects of the data that were missed by other \n# approaches, and thus they can allow to boost performance on a task. This is the intuition behind \"ensembling\", a concept that we will \n# introduce in the next chapter.\n# \n# A bidirectional RNN exploits this idea to improve upon the performance of chronological-order RNNs: it looks at its inputs sequence both \n# ways, obtaining potentially richer representations and capturing patterns that may have been missed by the chronological-order version alone.\n\n# ![bidirectional rnn](https://s3.amazonaws.com/book.keras.io/img/ch6/bidirectional_rnn.png)\n\n# To instantiate a bidirectional RNN in Keras, one would use the `Bidirectional` layer, which takes as first argument a recurrent layer \n# instance. `Bidirectional` will create a second, separate instance of this recurrent layer, and will use one instance for processing the \n# input sequences in chronological order and the other instance for processing the input sequences in reversed order. Let's try it on the \n# IMDB sentiment analysis task:\n\n# In[4]:\n\n\nfrom keras import backend as K\nK.clear_session()\n\n\n# In[5]:\n\n\nmodel = Sequential()\nmodel.add(layers.Embedding(max_features, 32))\nmodel.add(layers.Bidirectional(layers.LSTM(32)))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\nhistory = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2)\n\n\n# It performs slightly better than the regular LSTM we tried in the previous section, going above 88% validation accuracy. It also seems to \n# overfit faster, which is unsurprising since a bidirectional layer has twice more parameters than a chronological LSTM. With some \n# regularization, the bidirectional approach would likely be a strong performer on this task.\n# \n# Now let's try the same approach on the weather prediction task:\n\n# In[15]:\n\n\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(layers.Bidirectional(\n layers.GRU(32), input_shape=(None, float_data.shape[-1])))\nmodel.add(layers.Dense(1))\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\nhistory = model.fit_generator(train_gen,\n steps_per_epoch=500,\n epochs=40,\n validation_data=val_gen,\n validation_steps=val_steps)\n\n\n# \n# It performs about as well as the regular GRU layer. It's easy to understand why: all of the predictive capacity must be coming from the \n# chronological half of the network, since the anti-chronological half is known to be severely underperforming on this task (again, because \n# the recent past matters much more than the distant past in this case).\n\n# \n# ## Going even further\n# \n# At this stage, there are still many other things you could try in order to improve performance on our weather forecasting problem:\n# \n# * Adjust the number of units in each recurrent layer in the stacked setup. Our current choices are largely arbitrary and thus likely \n# suboptimal.\n# * Adjust the learning rate used by our `RMSprop` optimizer.\n# * Try using `LSTM` layers instead of `GRU` layers.\n# * Try using a bigger densely-connected regressor on top of the recurrent layers, i.e. a bigger `Dense` layer or even a stack of `Dense` \n# layers.\n# * Don't forget to eventually run the best performing models (in terms of validation MAE) on the test set! Least you start developing \n# architectures that are overfitting to the validation set. \n# \n# As usual: deep learning is more an art than a science, and while we can provide guidelines as to what is likely to work or not work on a \n# given problem, ultimately every problem is unique and you will have to try and evaluate different strategies empirically. There is \n# currently no theory that will tell you in advance precisely what you should do to optimally solve a problem. You must try and iterate.\n# \n# \n# ## Wrapping up\n# \n# Here's what you should take away from this section:\n# \n# * As you first learned in Chapter 4, when approaching a new problem, \n# it is good to first establish common sense baselines for your metric of choice. If you don't have a \n# baseline to beat, you can't tell if you are making any real progress.\n# * Try simple models before expensive ones, to justify the additional expense. Sometimes a simple model will turn out to be your best option.\n# * On data where temporal ordering matters, recurrent networks are a great fit and easily outperform models that first flatten the temporal \n# data.\n# * To use dropout with recurrent networks, one should use a time-constant dropout mask and recurrent dropout mask. This is built into Keras \n# recurrent layers, so all you have to do is use the `dropout` and `recurrent_dropout` arguments of recurrent layers.\n# * Stacked RNNs provide more representational power than a single RNN layer. They are also much more expensive, and thus not always worth it. \n# While they offer clear gains on complex problems (e.g. machine translation), they might not always be relevant to smaller, simpler problems.\n# * Bidirectional RNNs, which look at a sequence both ways, are very useful on natural language processing problems. However, they will not \n# be strong performers on sequence data where the recent past is much more informative than the beginning of the sequence.\n# \n# Note there are two important concepts that we will not cover in detail here: recurrent \"attention\", and sequence masking. Both tend to be \n# especially relevant for natural language processing, and are not particularly applicable to our temperature forecasting problem. We will \n# leave them for future study outside of this book.\n","repo_name":"liuxinfengabc/cultivate","sub_path":"5.机器学习/src/deep-learning-with-pyton/6.3-advanced-usage-of-recurrent-neural-networks.py","file_name":"6.3-advanced-usage-of-recurrent-neural-networks.py","file_ext":"py","file_size_in_byte":36300,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"78"} +{"seq_id":"24277313854","text":"import sys\r\n\r\nfrom PyQt5.QtWidgets import QApplication, QPushButton, QMainWindow, QLineEdit, QCheckBox\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setGeometry(400, 200, 400, 200)\r\n self.setWindowTitle('Прятки для виджетов')\r\n self.checkmark1 = QCheckBox(self)\r\n self.checkmark1.setText('edit1')\r\n self.checkmark1.move(20, 0)\r\n self.checkmark1.clicked.connect(self.click)\r\n self.wndw1 = QLineEdit(self)\r\n self.wndw1.setText('Поле edit1')\r\n self.wndw1.move(70, 0)\r\n self.checkmark2 = QCheckBox(self)\r\n self.checkmark2.setText('edit2')\r\n self.checkmark2.move(20, 40)\r\n self.wndw2 = QLineEdit(self)\r\n self.wndw2.setText('Поле edit2')\r\n self.wndw2.move(70, 40)\r\n self.checkmark3 = QCheckBox(self)\r\n self.checkmark3.setText('edit3')\r\n self.checkmark3.move(20, 80)\r\n self.wndw3 = QLineEdit(self)\r\n self.wndw3.setText('Поле edit3')\r\n self.wndw3.move(70, 80)\r\n self.checkmark4 = QCheckBox(self)\r\n self.checkmark4.setText('edit4')\r\n self.checkmark4.move(20, 120)\r\n self.checkmark2.clicked.connect(self.click)\r\n self.checkmark3.clicked.connect(self.click)\r\n self.checkmark4.clicked.connect(self.click)\r\n\r\n self.wndw4 = QLineEdit(self)\r\n self.wndw4.setText('Поле edit4')\r\n self.wndw4.move(70, 120)\r\n\r\n def click(self):\r\n if self.checkmark1.isChecked():\r\n self.wndw1.show()\r\n else:\r\n self.wndw1.hide()\r\n if self.checkmark2.isChecked():\r\n self.wndw2.show()\r\n else:\r\n self.wndw2.hide()\r\n if self.checkmark3.isChecked():\r\n self.wndw3.show()\r\n else:\r\n self.wndw3.hide()\r\n if self.checkmark4.isChecked():\r\n self.wndw4.show()\r\n else:\r\n self.wndw4.hide()\r\n\r\n\r\nsys._excepthook = sys.excepthook\r\n\r\n\r\ndef exception_hook(exctype, value, traceback):\r\n sys._excepthook(exctype, value, traceback)\r\n sys.exit(1)\r\n\r\n\r\nsys.excepthook = exception_hook\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = MainWindow()\r\n ex.show()\r\n sys.exit(app.exec())","repo_name":"DanilaCrazy0/homework","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22741783969","text":"#! /usr/bin/env python\n\n## Map level\n### * 0: free\n### * 1: ocupado (muro/obstáculo)\n### * 2: visited\n### * 3: start\n### * 4: goal\n### * 5: path\n\n## Graph level\n### * -2: starting node parentId\n### * -1: goal node temp parentId\n\nimport argparse\nfrom os import system\nimport numpy as np\nnp.random.seed(0)\nfrom queue import PriorityQueue\n\nimport time\nimport yaml\nimport math\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--map', type=str, default=None, help='Map to test')\nparser.add_argument('--root_path', type=str, default=None, help='Path to the project root')\nparser.add_argument('--route', type=str, default='/usr/local/share/master-ipr/map1/map1.csv', help='Route to the desired map')\nparser.add_argument('--start', nargs=2, type=int, default=(2,2), help='Starting XY coords')\nparser.add_argument('--end', nargs=2, type=int, default=(7,2), help='Ending XY coords')\nargs = parser.parse_args()\n\n# Node class\nclass Node:\n def __init__(self, x, y, myId, parentId, c):\n self.x = x # X coord\n self.y = y # Y coord\n self.myId = myId # Node ID\n self.parentId = parentId # Parent ID\n self.c = c # Cost\n\n # Override <\n def __lt__(self, node):\n if self.myId < node.myId:\n return True\n else:\n return False\n # Override <=\n def __le__(self, node):\n if self.myId <= node.myId:\n return True\n else:\n return False\n\n def dump(self):\n print(\"---------- x \"+str(self.x)+\\\n \" | y \"+str(self.y)+\\\n \" | id \"+str(self.myId)+\\\n \" | parentId \"+str(self.parentId)+\\\n \" | Cost \"+str(self.c))\n\n\n# Read conf map\nif args.map is not None:\n with open(args.root_path + '/src/map_cfgs.yaml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\n map = args.root_path + cfg[args.map]['route']\n start_x = cfg[args.map]['coords']['start_x']\n start_y = cfg[args.map]['coords']['start_y']\n end_x = cfg[args.map]['coords']['end_x']\n end_y = cfg[args.map]['coords']['end_y']\n\nelse:\n map = args.route\n start_x = args.start[0]\n start_y = args.start[1]\n end_x = args.end[0]\n end_y = args.end[1]\n\n# List of Nodes\npath = []\n\n# Starting position Node\ninit = Node(start_x, start_y, 0, -2, 0)\npath.append((init.c, init))\n\n# Map 2D matrix\ncharMap = []\n# Cost 2D matrix\ncostMap = []\n\n# Print color coded map\ndef dumpMap():\n system('clear')\n for line in charMap:\n for i in range(len(line)):\n # Highlight path\n if line[i] == '5':\n print('\\033[1;31m{}\\033[0m'.format(line[i]), end=' ') # Red: path\n elif line[i] == '3' or line[i] == '4':\n print('\\033[1;34m{}\\033[0m'.format(line[i]), end=' ') # Blue: start and goal\n elif line[i] == '1':\n print('\\033[1;40m{}\\033[0m'.format(line[i]), end=' ') # Black: obstacles\n elif line[i] == '2':\n print('\\033[1;32m{}\\033[0m'.format(line[i]), end=' ') # Green: evaluated nodes\n else:\n print('\\033[1;33m{}\\033[0m'.format(line[i]), end=' ') # Yellow: non-evaluated nodes\n print('\\n')\n\n# Print cost matrix\ndef dumpCost():\n for line in costMap:\n for i in range(len(line)):\n print('\\033[1;37m{}\\033[0m'.format(line[i]), end=' ')\n print('\\n')\n\n# Load map from file and generate random cost data\nwith open(map) as f:\n line = f.readline()\n while line:\n charLine = line.strip().split(',')\n charMap.append(charLine)\n costMap.append(np.random.randint(7,size=len(charLine)))\n line = f.readline()\n\n# Load start and end positions\ncharMap[start_x][start_y] = '3' # 3: start\ncharMap[end_x][end_y] = '4' # 4: goal\n\ndone = False # Exit loop when done\ngoalParentId = -1 # -1: goal node temp parentId\n\n# Allowed grid moves\nmoves = {'up': (-1,0),\n 'upright': (-1,1),\n 'right': (0,1),\n 'downright': (1,1),\n 'down': (1,0),\n 'downleft': (1,-1),\n 'left': (0,-1),\n 'upleft': (-1,-1),}\n# List to move through the dict\nids = ['up', 'upright', 'right', 'downright', 'down', 'downleft', 'left', 'upleft']\n\n# PriorityQueue to store nodes in cost order\neval = PriorityQueue()\nid_nodes = 0\ngoal_cost = 0\n\n# Main algorithm\nend = 0\nstart = time.time()\nwhile not done:\n print(\"--------------------- number of nodes: \"+str(id_nodes+1))\n p = path[-1][1]\n p.dump()\n\n # Cycle through allowed moves\n for id in ids:\n tmpX = p.x + moves[id][0]\n tmpY = p.y + moves[id][1]\n\n if charMap[tmpX][tmpY] == '4':\n end = time.time() - start\n print(\"GOALLLL!!!\")\n goalParentId = p.myId\n goal_cost = costMap[tmpX][tmpY]\n done = True\n break\n elif charMap[tmpX][tmpY] == '0':\n id_nodes = id_nodes+1\n print(\"Mark visited\")\n # Calculate cost\n c = costMap[tmpX][tmpY]\n newNode = Node(tmpX, tmpY, id_nodes, p.myId, c)\n charMap[tmpX][tmpY] = '2'\n eval.put((c, newNode))\n else:\n print(\"Obstacle\")\n\n # Get node with best f\n path.append(eval.get())\n\nprint(\"%%%%%%%%%%%%%%%%%%%\")\nprint(f\"Time until finding the goal: {end*1000} ms\")\nok = False\ndist = 0\nwhile not ok:\n for p in path:\n node = p[1]\n if( node.myId == goalParentId ):\n if charMap[node.x][node.y] != '3':\n charMap[node.x][node.y] = '5'\n node.dump()\n dist += node.c\n goalParentId = node.parentId\n if( goalParentId == -2):\n print(\"%%%%%%%%%%%%%%%%%2\")\n ok = True\n\ndumpMap()\nprint('\\n')\ndumpCost()\n\nprint('\\033[1;34m-----------------\\033[0m')\nprint('\\033[1;34mPath distance: {}\\033[0m'.format(dist+goal_cost))\nprint('\\033[1;34mEvaluated nodes: {}\\033[0m'.format(id_nodes+1))\nprint('\\033[1;34mTime until finding the goal: {} ms\\033[0m'.format(end*1000))\nprint('\\033[1;34m-----------------\\033[0m')\n","repo_name":"darjwx/master-ipr","sub_path":"src/python/algorithms/best-first-search/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34315993041","text":"import logging\r\nimport datetime\r\n\r\nclass Classroom:\r\n def __init__(self, name):\r\n self.name = name\r\n self.students = []\r\n self.assignments = []\r\n\r\n def add_student(self, student):\r\n self.students.append(student)\r\n\r\n def list_students(self):\r\n return [student.name for student in self.students]\r\n\r\n def schedule_assignment(self, assignment):\r\n self.assignments.append(assignment)\r\n\r\n def list_assignments(self):\r\n return [assignment.details for assignment in self.assignments]\r\n\r\nclass Student:\r\n def __init__(self, student_id, name):\r\n self.student_id = student_id\r\n self.name = name\r\n\r\nclass Assignment:\r\n def __init__(self, details, deadline):\r\n self.details = details\r\n self.deadline = deadline\r\n self.submissions = []\r\n\r\n def submit(self, student):\r\n self.submissions.append(student)\r\n\r\nclass VirtualClassroomManager:\r\n def __init__(self):\r\n self.classrooms = {}\r\n\r\n def add_classroom(self, name):\r\n if name in self.classrooms:\r\n logging.error(f\"Classroom '{name}' already exists.\")\r\n else:\r\n self.classrooms[name] = Classroom(name)\r\n logging.info(f\"Classroom '{name}' has been created.\")\r\n\r\n def list_classrooms(self):\r\n return list(self.classrooms.keys())\r\n\r\n def remove_classroom(self, name):\r\n if name in self.classrooms:\r\n del self.classrooms[name]\r\n logging.info(f\"Classroom '{name}' has been removed.\")\r\n else:\r\n logging.error(f\"Classroom '{name}' not found.\")\r\n\r\n def add_student(self, student_id, class_name, student_name):\r\n if class_name in self.classrooms:\r\n student = Student(student_id, student_name)\r\n self.classrooms[class_name].add_student(student)\r\n logging.info(f\"Student {student_id} has been enrolled in {class_name}.\")\r\n else:\r\n logging.error(f\"Classroom '{class_name}' not found.\")\r\n\r\n def list_students(self, class_name):\r\n if class_name in self.classrooms:\r\n return self.classrooms[class_name].list_students()\r\n else:\r\n logging.error(f\"Classroom '{class_name}' not found.\")\r\n return []\r\n\r\n def schedule_assignment(self, class_name, assignment_details, deadline):\r\n if class_name in self.classrooms:\r\n assignment = Assignment(assignment_details, deadline)\r\n self.classrooms[class_name].schedule_assignment(assignment)\r\n logging.info(f\"Assignment for {class_name} has been scheduled.\")\r\n else:\r\n logging.error(f\"Classroom '{class_name}' not found.\")\r\n\r\n def list_assignments(self, class_name):\r\n if class_name in self.classrooms:\r\n return self.classrooms[class_name].list_assignments()\r\n else:\r\n logging.error(f\"Classroom '{class_name}' not found.\")\r\n return []\r\n\r\n def submit_assignment(self, student_id, class_name, assignment_details):\r\n if class_name in self.classrooms:\r\n classroom = self.classrooms[class_name]\r\n student_found = False\r\n for assignment in classroom.assignments:\r\n if assignment.details == assignment_details:\r\n for student in classroom.students:\r\n if student.student_id == student_id:\r\n assignment.submit(student)\r\n logging.info(f\"Assignment submitted by Student {student_id} in {class_name}.\")\r\n student_found = True\r\n break\r\n if not student_found:\r\n logging.error(f\"Student {student_id} is not enrolled in {class_name}.\")\r\n break\r\n else:\r\n logging.error(f\"Assignment '{assignment_details}' not found in {class_name}.\")\r\n else:\r\n logging.error(f\"Classroom '{class_name}' not found.\")\r\n\r\ndef main():\r\n logging.basicConfig(filename='virtual_classroom.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\r\n\r\n classroom_manager = VirtualClassroomManager()\r\n\r\n while True:\r\n print(\"\\nVirtual Classroom Manager\")\r\n print(\"1. Add Classroom\")\r\n print(\"2. List Classrooms\")\r\n print(\"3. Remove Classroom\")\r\n print(\"4. Add Student\")\r\n print(\"5. List Students\")\r\n print(\"6. Schedule Assignment\")\r\n print(\"7. List Assignments\")\r\n print(\"8. Submit Assignment\")\r\n print(\"9. Exit\")\r\n\r\n choice = input(\"Enter your choice: \")\r\n\r\n if choice == \"1\":\r\n name = input(\"Enter classroom name: \")\r\n classroom_manager.add_classroom(name)\r\n elif choice == \"2\":\r\n classrooms = classroom_manager.list_classrooms()\r\n print(\"Classrooms:\", classrooms)\r\n elif choice == \"3\":\r\n name = input(\"Enter classroom name to remove: \")\r\n classroom_manager.remove_classroom(name)\r\n elif choice == \"4\":\r\n student_id = input(\"Enter student ID: \")\r\n class_name = input(\"Enter classroom name to enroll in: \")\r\n student_name = input(\"Enter student name: \")\r\n classroom_manager.add_student(student_id, class_name, student_name)\r\n elif choice == \"5\":\r\n class_name = input(\"Enter classroom name to list students: \")\r\n students = classroom_manager.list_students(class_name)\r\n print(\"Students:\", students)\r\n elif choice == \"6\":\r\n class_name = input(\"Enter classroom name to schedule assignment: \")\r\n assignment_details = input(\"Enter assignment details: \")\r\n deadline = input(\"Enter assignment deadline (YYYY-MM-DD): \")\r\n classroom_manager.schedule_assignment(class_name, assignment_details, deadline)\r\n elif choice == \"7\":\r\n class_name = input(\"Enter classroom name to list assignments: \")\r\n assignments = classroom_manager.list_assignments(class_name)\r\n print(\"Assignments:\", assignments)\r\n elif choice == \"8\":\r\n student_id = input(\"Enter student ID: \")\r\n class_name = input(\"Enter classroom name: \")\r\n assignment_details = input(\"Enter assignment details: \")\r\n classroom_manager.submit_assignment(student_id, class_name, assignment_details)\r\n elif choice == \"9\":\r\n print(\"Exiting Virtual Classroom Manager.\")\r\n break\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Reddythedeveloper/chat_room","sub_path":"py/chat_room.py","file_name":"chat_room.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39990179643","text":"import os, re, sys, win32serviceutil\r\n\r\n# settings\r\nappWorkPath = os.environ['FORESTMANPATH']\r\nwebwarePath = os.environ['WEBWAREPATH']\r\nserviceName = 'ForestMan'\r\nserviceDisplayName = 'ForestMan App Server'\r\n\r\n# ensure Webware is on sys.path\r\nsys.path.insert(0, webwarePath)\r\n\r\n# Construct customized version of ThreadedAppServerService that uses our\r\n# specified service name, service display name, and working dir\r\nfrom WebKit.ThreadedAppServerService import ThreadedAppServerService\r\nclass NTService(ThreadedAppServerService):\r\n\t_svc_name_ = serviceName\r\n\t_svc_display_name_ = serviceDisplayName\r\n\tdef workDir(self):\r\n\t\treturn appWorkPath\r\n\r\n# Handle the command-line args\r\nif __name__=='__main__':\r\n\twin32serviceutil.HandleCommandLine(NTService)\r\n","repo_name":"BackupTheBerlios/forestman-svn","sub_path":"NTService.py","file_name":"NTService.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20686540516","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals # isort:skip\r\n\r\n# Bibliotecas de terceiros\r\nfrom django.conf.urls import url\r\n\r\n# Modulos locais\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n url(r'^avaliar/$', views.avaliar, name='assistido_avaliar'),\r\n url(r'^buscar/$', views.buscar, name='assistido_buscar'),\r\n url(r'^comunidade/listar/$', views.listar_comunidade, name='assistido_listar_comunidade'),\r\n url(r'^cpf/existe/$', views.cpf_existe, name='assistido_cpf_existe'),\r\n url(r'^cadastrar/$', views.editar, name='assistido_editar'),\r\n url(r'^cadastrar/(?P[0-9]+)/(?P[0-9]+)/$', views.editar, name='assistido_editar'),\r\n url(r'^editar/(\\d+)/$', views.editar, name='assistido_editar'),\r\n url(r'^editar/(?P[0-9]+)/(?P[0-9]+)/$', views.editar, name='assistido_editar'),\r\n url(r'^excluir/(\\d+)/$', views.excluir, name='assistido_excluir'),\r\n url(r'^filiacao/excluir/$', views.excluir_filiacao, name='assistido_excluir_filiacao'),\r\n url(r'^endereco/excluir/$', views.excluir_endereco, name='assistido_excluir_endereco'),\r\n url(r'^foto/salvar/$', views.salvar_foto, name='assistido_salvar_foto'),\r\n url(r'^json/get/$', views.get_json, name='assistido_json_get'),\r\n url(r'^(\\d+)/endereco/json/get/$', views.get_json_enderecos_pessoa_assistida, name='assistido_endereco_json_get'),\r\n url(r'^(?P[0-9]+)/documento/(?P[0-9]+)/excluir/$',\r\n views.excluir_documento,\r\n name='assistido_excluir_documento'),\r\n url(r'^(?P[0-9]+)/documento/listar/$', views.listar_documento, name='assistido_documento_listar'),\r\n url(r'^(?P[0-9]+)/documento/adicionar/$', views.salvar_documento, name='assistido_documento_adicionar'),\r\n url(r'^(?P[0-9]+)/foto/salvar-agora/$', views.salvar_foto_agora, name='assistido_salvar_foto_agora'),\r\n url(\r\n r'^(\\d+)/endereco/historico/json/get/$',\r\n views.get_json_enderecos_historico_pessoa_assistida,\r\n name='assistido_endereco_historico_json_get'\r\n ),\r\n url(r'^(\\d+)/json/get/$', views.get_json, name='assistido_json_get'),\r\n url(r'^cpf/(\\d+)/json/get/$', views.get_json_by_cpf, name='assistido_json_get_by_cpf'),\r\n url(r'^salvar/$', views.salvar, name='assistido_salvar'),\r\n url(r'^profissao/listar/$', views.listar_profissao, name='assistido_listar_profissao'),\r\n url(r'^(\\d+)/telefone/excluir/$', views.excluir_telefone, name='assistido_excluir_telefone'),\r\n url(r'^campos-obrigatorios/$',\r\n views.index_campos_obrigatorios,\r\n name='campos_obrigatorios_index'),\r\n url(r'^campos-obrigatorios/perfil/(?P[0-9]+)/$',\r\n views.configurar_campos_obrigatorios,\r\n name='campos_obrigatorios_configurar_perfil'),\r\n url(r'^campos-obrigatorios/perfil/(?P[0-9]+)/salvar/$',\r\n views.salvar_campos_obrigatorios,\r\n name='campos_obrigatorios_salvar_perfil'),\r\n url(r'^unificar/$', views.unificar, name='assistido_unificar'),\r\n url(r'^patrimonio/(?P[0-9]+)/$', views.excluir_patrimonio_assistido_por_id),\r\n url(r'^editar/([0-9.-]+)/acesso/$', views.listar_acesso, name='assistido_acesso_listar'),\r\n url(r'^editar/([0-9.-]+)/acesso/conceder/$', views.conceder_acesso, name='assistido_acesso_conceder'),\r\n url(r'^editar/([0-9.-]+)/acesso/revogar/$', views.revogar_acesso, name='assistido_acesso_revogar'),\r\n url(r'^editar/([0-9.-]+)/acesso/solicitar/$', views.solicitar_acesso, name='assistido_acesso_solicitar'),\r\n]\r\n","repo_name":"SegurancaDPDF/SOLAR-Backend","sub_path":"assistido/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16347005683","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 9 22:50:38 2018\r\n\r\n@author: nit_n\r\n\"\"\"\r\n\r\n\r\nfrom numpy import array, arange\r\nfrom pylab import figure, clf, plot, xlabel, legend, show, ylim, title\r\n\r\n\r\nalpha = 1\r\nbeta = 0.5\r\ngamma = 0.5\r\ndelta = 2\r\n\r\ntstart = 0.0\r\ntstop = 30.0\r\nN = 1000\r\n\r\n# rabbits\r\ndef f(r, t):\r\n x = r[0]\r\n y = r[1]\r\n rabbit = alpha*x - beta*x*y\r\n fox = gamma*x*y - delta*y\r\n return array([rabbit, fox], float)\r\n \r\n\r\n# start with empty matrix and append values\r\nh = (tstop - tstart)/N\r\ntpoints = arange(tstart, tstop, h)\r\n\r\nxpoints = []\r\nypoints = []\r\n\r\n# initial population conditions\r\nr = array([2, 2], float)\r\n\r\nfor t in tpoints:\r\n xpoints.append(r[0])\r\n ypoints.append(r[1])\r\n \r\n # now do some runge-kutta (p336)\r\n k1 = h*f(r, t)\r\n k2 = h*f(r + 0.5*k1, t + 0.5*h)\r\n k3 = h*f(r + 0.5*k2, t + 0.5*h)\r\n k4 = h*f(r + k3, t + h)\r\n \r\n r += (k1 + 2*k2 + 2*k3 + k4)/6\r\n \r\nfigure(1)\r\nclf()\r\nplot(tpoints, xpoints, label='rabbit population')\r\nplot(tpoints, ypoints, label='fox population')\r\nxlabel(\"time\")\r\nylim(0, 8.5)\r\nlegend(loc='upper left')\r\ntitle(\"Lotka-Volterra equations\")\r\nshow()\r\n\r\nprint(\"Figure 1 shows the population changes of rabbits and foxes in a system. We can see that when the fox population is low, about t=1.5, that the rabbit poulation has a huge increase in size.\")\r\nprint(\"Likewise, we see that the fox population begins to increase when the rabbit population is at it's peak, about t=2.7.\")","repo_name":"nrwade0/computational-physics","sub_path":"8/8_2_n_w.py","file_name":"8_2_n_w.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72238966652","text":"from setuptools import setup, find_packages\n\nwith open(\"requirements.txt\", \"r\") as fr:\n REQUIRES = fr.read()\n\n\nsetup(name=\"tesqt\",\nversion='0.1',\nscripts=['testqt.py'],\ninclude_package_data=True,\npython_requires='>=3.7',\ninstall_requires=REQUIRES,\nentry_points={\n 'console_scripts':[\n 'testqtqt=testqt:window']}\n)\n","repo_name":"Hari102119/python-qt5-snapcraft","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18582078194","text":"import pandas as pd\nimport numpy as np\n\n# Read clipboard\npdf = pd.read_clipboard(sep='\\t', skiprows=range(1, 10000, 2))\n\n# Keep only night with more than 1GB of data\npdf = pdf[pdf['Size'].apply(lambda x: 'G' in x)]\n\n# Number of TeraBytes\nn_tera = pdf['Size'].apply(lambda x: float(x[:-1])).sum() / 1024\nprint('Data volume: {} TB'.format(n_tera))\n\n# Format URL\npdf['Name'].apply(lambda x: 'https://ztf.uw.edu/alerts/public/' + x)\\\n .to_csv('uri-ztf.csv', header=None, index=False)\n\n# Number of nights per call\nn_file_per_split = 5\n\n# Split\npdf = pd.read_csv('uri-ztf.csv', header=None)\nindices = np.arange(0, len(pdf), n_file_per_split).tolist() + [-1]\nfor left, right in zip(indices[0:-1], indices[1:]):\n subpdf = pd.DataFrame(pdf[0].values[left: right])\n subpdf.to_csv('uris/{:03d}.txt'.format(left), header=None, index=False)\n","repo_name":"astrolabsoftware/fink_ztf_recovery","sub_path":"generate_uri.py","file_name":"generate_uri.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4097695190","text":"# array and kadan's algorithm\n# time O(n)\n# space O(n)\n\nclass Solution:\n def maxSubarraySumCircular(self, nums: List[int]) -> int:\n n = len(nums)\n right_max = [0] * n\n right_max[n-1] = nums[n-1]\n suffix_sum = nums[n-1]\n\n for i in range(n-2, -1, -1):\n suffix_sum += nums[i]\n right_max[i] = max(right_max[i+1], suffix_sum)\n\n max_sum = special_sum = nums[0]\n suffix_sum = cur_max = 0\n\n for i in range(n):\n cur_max = max(cur_max, 0) + nums[i]\n # This is Kadane's algorithm.\n max_sum = max(max_sum, cur_max)\n suffix_sum += nums[i]\n if i+1 < n:\n special_sum = max(special_sum, suffix_sum+right_max[i+1])\n\n return max(max_sum, special_sum)\n","repo_name":"boknowswiki/mytraning","sub_path":"lc/python/0918_maximum_sum_circular_subarray.py","file_name":"0918_maximum_sum_circular_subarray.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71691343291","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.model_selection import train_test_split\n\nPROBLEM_TYPE = pd.read_json(\"options/build_options.json\")[\"problem_types\"][\"default\"]\nCOL_FREQUENCY_CONTINUOUS = 10\n\nclass preprocessor():\n\n def __init__(self, X, y):\n\n self.X = pd.DataFrame(X)\n self.y = pd.DataFrame(y)\n self.__num_cols = list(self.X.columns[[i>COL_FREQUENCY_CONTINUOUS for i in self.X.nunique().tolist()]])\n self.__cat_cols = list(set(self.X.columns).difference(set(self.__num_cols)))\n\n def split(\n self,\n test_pct: float = 0.2,\n problem_type: str = PROBLEM_TYPE):\n\n if problem_type == \"regression\":\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=test_pct, shuffle=True, random_state=42)\n\n elif problem_type == \"classification\":\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=test_pct, shuffle=True, random_state=42, stratify = y)\n\n def fit(\n self,\n numeric_imputer,\n categorical_imputer,\n numeric_scaler,\n categorical_encoder):\n\n self.split()\n\n self.numeric_pipeline = Pipeline(\n [\n ('imputer_num', numeric_imputer),\n ('scaler', numeric_scaler)\n ]\n )\n\n self.categorical_pipeline = Pipeline(\n [\n ('imputer_cat', categorical_imputer),\n ('onehot', categorical_encoder)\n ]\n )\n self.preprocessor_pipe = ColumnTransformer(\n [\n ('categoricals', self.categorical_pipeline, self.__cat_cols),\n ('numericals', self.numeric_pipeline, self.__num_cols)\n ],\n )\n self.preprocessor_pipe.fit(self.X_train)\n\n def fit_transform(\n self,\n numeric_imputer = None,\n categorical_imputer = None,\n numeric_scaler = None,\n categorical_encoder = None):\n\n if numeric_imputer == None or categorical_encoder == None or numeric_scaler == None or categorical_encoder == None:\n\n if hasattr(self, 'preprocessor_pipe'):\n self.preprocessor_pipe.transform(self.X_train)\n\n else:\n raise ValueError(\"Please fit the pipeline first or pass the necessary values.\")\n \n else:\n \n self.fit(numeric_imputer, categorical_imputer, numeric_scaler, categorical_encoder)\n self.preprocessor_pipe.transform(self.X_train)\n\n def transform(self):\n\n if hasattr(self, 'preprocessor_pipe'):\n return self.preprocessor_pipe.transform(self.X_test)\n \n else:\n raise ValueError(\"Please fit the pipeline first.\")\n \n\n\n\n","repo_name":"coderkol95/ML_App","sub_path":"src/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20147001905","text":"from ._base import BaseProgressor\n\nimport warnings\nimport numpy as np\ntry:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n from tqdm.notebook import tqdm_notebook as tqdm\n elif shell == 'TerminalInteractiveShell':\n from tqdm import tqdm\n else:\n from tqdm import tqdm\nexcept NameError:\n from tqdm.tqdm import tqdm\n\n\nclass SGDProgressor(BaseProgressor):\n\n def __init__(self, clf):\n super().__init__(clf)\n self.max_iter = self.clf.max_iter\n\n def fit(self, X, y=None, sample_weight=None):\n self.classes = np.unique(y)\n for _ in tqdm(range(self.max_iter)):\n r = self.clf.partial_fit(X, y, self.classes, sample_weight)\n return r\n\n\nclass GLMProgressor(BaseProgressor):\n\n def __init__(self, clf):\n super().__init__(clf)\n self.max_iter = self.clf.max_iter\n\n def fit(self, X, y=None):\n self.clf.set_params(warm_start=True)\n with tqdm(range(self.max_iter)) as pbar:\n for i in range(self.max_iter):\n self.clf.set_params(max_iter=1)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n r = self.clf.fit(X, y)\n pbar.update(1)\n self.clf.set_params(warm_start=False, max_iter=self.max_iter)\n return r\n","repo_name":"tsterbak/scikit-progress","sub_path":"skprog/wrappers/_linear.py","file_name":"_linear.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"70322334011","text":"from triangle_class.decorated_triangle import *\nfrom helper_functions.add_new_triangle_functions import *\n\nt = 1\ne01 = 2\ne02 = 3\ne10 = 4\ne12 = 5\ne20 = 6\ne21 = 7\n\nc0 = [1, 0, 0]\nc1 = [0, t, 0]\nc2 = [0, 0, 1]\nr0 = [0, e01 / t, e02]\nr1 = [e10, 0, e12]\nr2 = [e20, e21 / t, 0]\nc0_clover = [1, 0, 0]\nc1_clover = [0, 1, 0]\nc2_clover = [0, 0, 1]\nx_coord_t = compute_t(e01, e12, e20, e10, e21, e02)\ncube_root_x_coord_t = np.power(x_coord_t, 1 / 3)\nr0_clover = [0, cube_root_x_coord_t, 1]\nr1_clover = [1, 0, cube_root_x_coord_t]\nr2_clover = [cube_root_x_coord_t, 1, 0]\n\ne03 = 8\ne30 = 9\ne23 = 10\ne32 = 11\nA023 = 12\n\ntest_surface = Surface(c0, c1, c2, r0, r1, r2, c0_clover, c1_clover, c2_clover, r0_clover, r1_clover, r2_clover)\n\n\n#\nfor triangle in test_surface.triangles:\n for edge in triangle.edges:\n print(edge.v0.c_clover)\n\nprint('e03: ', e03)\nprint('e30: ', e30)\nprint('e23: ', e23)\nprint('e32: ', e32)\n\nr3,c3 = compute_all_until_r3c3(r0,r2,c0,c2,e03,e23,e30,e32,A023)\nr3_clover,c3_clover = compute_all_until_r3c3(r0_clover,r2_clover,c0_clover,c2_clover,e03,e23,e30,e32,A023)\n\n\nconnecting_edge = test_surface.triangles[0].edges[0]\n\ntest_surface.add_triangle(connecting_edge, Vertex(c3,r3,c3_clover, r3_clover))\n\nprint(test_surface.triangles[1])","repo_name":"sepehrsaryazdi/cpsvis0.1","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31090171944","text":"# ============================ Dictionary and Methods =============================\r\n# 1- Select the correct way to access the value of a history subject.\r\n\r\nsampleDict = { \r\n \"class\":{ \r\n \"student\":{ \r\n \"name\":\"Mike\",\r\n \"marks\":{ \r\n \"physics\":70,\r\n \"history\":80\r\n }\r\n }\r\n }\r\n}\r\n\r\nprint(sampleDict[\"class\"][\"student\"][\"marks\"][\"history\"])\r\n\r\nprint(\"============================================================\")\r\n\r\n# 2- Select the all correct way to remove the key marks from a dictionary\r\nstudent = { \r\n \"name\": \"Emma\", \r\n \"class\": 9, \r\n \"marks\": 75 \r\n}\r\n\r\n# student.pop(\"marks\") # True\r\n# del student[\"marks\"] # True\r\n# student.remove(\"marks\") # False\r\n# student.popitem(\"marks\") # False\r\nstudent.popitem() # True\r\n\r\nprint(student)\r\nprint(\"============================================================\")\r\n\r\n# 3- Select the correct way to print Emma’s age.\r\nstudent = {1: {'name': 'Emma', 'age': '27', 'sex': 'Female'},\r\n 2: {'name': 'Mike', 'age': '22', 'sex': 'Male'}}\r\n\r\n\r\n\r\n# print( student[0][1] ) # False\r\nprint( student[1][\"age\"] ) # True\r\n# print(student[0][\"age\"]) # False\r\nprint(\"============================================================\")\r\n\r\n# 4- Please select all correct ways to empty the following dictionary\r\nstudent = { \r\n \"name\": \"Emma\", \r\n \"class\": 9, \r\n \"marks\": 75 \r\n}\r\n\r\n# del student # False\r\n# del student[0:2] # False\r\nstudent.clear() # True\r\n\r\nprint( student )\r\nprint(\"============================================================\")\r\n\r\n\r\n# 5- What is the output of the following dictionary operation\r\n# dict1 = {\"name\": \"Mike\", \"salary\": 8000}\r\n# temp = dict1.pop(\"age\")\r\n\r\n# print(temp)\r\n\r\n# KeyError: ‘age’\r\n# None\r\nprint(\"============================================================\")\r\n\r\n# 6- Select the correct ways to get the value of marks key.\r\nstudent = {\r\n \"name\": \"Emma\",\r\n \"class\": 9,\r\n \"marks\": 75\r\n}\r\n\r\n# m = student.get(2) # False\r\nm = student.get('marks') # True\r\n# m = student[2]\r\nm = student['marks'] # True\r\n\r\nprint(m)\r\n# ===================================== End ========================================","repo_name":"yahyaaly151989/Learn_Python_in_2023","sub_path":"#018_exercises_about_dicts.py","file_name":"#018_exercises_about_dicts.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"40302386361","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/8/31 13:52\n# @File : p9_group.py\n\nimport pandas as pd\nimport numpy as np\n\n# 聚合\nsales = [{'account': 'Jones LLC', 'type': 'a', 'Jan': 150, 'Feb': 200, 'Mar': 140},\n {'account': 'Alpha Co', 'type': 'b', 'Jan': 200, 'Feb': 210, 'Mar': 215},\n {'account': 'Blue Inc', 'type': 'a', 'Jan': 50, 'Feb': 90, 'Mar': 95}]\n\ndf2 = pd.DataFrame(sales)\nprint(df2)\nprint(df2.groupby('type'))\nprint(df2.groupby('type').groups)\n\nfor a, b in df2.groupby('type'):\n print(a)\n print(b)\n\n# 聚合后再计算\nprint(df2.groupby('type').count())\nprint(df2.groupby('Jan').sum())\n# 各类型产品的销售数量和销售总额\nprint(df2.groupby('type').aggregate({'type': 'count', 'Feb': 'sum'}))\n\ngroup = ['x', 'y', 'z']\ndata = pd.DataFrame({\n \"group\": [group[x] for x in np.random.randint(0, len(group), 10)],\n \"salary\": np.random.randint(5, 50, 10),\n \"age\": np.random.randint(15, 50, 10)\n})\n\nprint(data)\nprint(data.groupby('group').agg('mean'))\nprint(data.groupby('group').mean().to_dict())\nprint(data.groupby('group').transform('mean'))\n\n# 数据透视表\npd.pivot_table(data,\n values='salary',\n columns='group',\n index='age',\n aggfunc='count',\n margins=True\n ).reset_index()\n","repo_name":"xk-coco/geekbangtrain","sub_path":"fourWeek/1pandas/p9_group.py","file_name":"p9_group.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9868344995","text":"import os\nimport platform\nimport shutil\nfrom zipfile import ZipFile\n\nBASE_DIR:str = os.path.dirname(__file__)\nBIN_NAME = \"blynk.exe\" if str(\n platform.system()).lower() == \"windows\" else \"blynk\"\nBIN_PATH: str = os.path.join(BASE_DIR, \"target\", \"release\", BIN_NAME)\nLICENSE_FILE:str = \"LICENSE.md\"\n\nprint(\"BASE DIR: \", BASE_DIR )\nprint(\"BINARY PATH: \", BIN_PATH)\n\nprint(\"moving release binary...\")\nshutil.copyfile(BIN_PATH, os.path.join(BASE_DIR, BIN_NAME))\nprint(\"done moving!\")\n\nprint(\"zipping...\")\nwith ZipFile(f'blynk_latest_{platform.system().lower()}.zip', 'w') as zip:\n zip.write(\"blynk\")\n zip.write(\"LICENSE.md\")\nprint(\"Done zipping!\")\n","repo_name":"BirnadinErick/blynk","sub_path":".github/dump/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4348411255","text":"import glob\nfrom functools import partial\nfrom os import listdir\nfrom os.path import isfile, join\nimport sys\n\nimport numpy as np\nimport star_description\nfrom star_description import StarDescription, StarMetaData\nfrom typing import List, Dict, Tuple\nimport multiprocessing as mp\nfrom multiprocessing import cpu_count\nimport re\nimport logging\nfrom astropy.coordinates import SkyCoord\nfrom star_metadata import CatalogData\nfrom collections import namedtuple\nfrom pandas import DataFrame\n\n# all info needed for ui purposes\nStarUI = namedtuple(\n \"StarInfo\",\n \"catalog_name, separation, extradata, filename_raw_no_ext, filename_no_ext, \"\n \"filename_raw_no_suff_no_ext, filename_no_suff_no_ext\",\n)\nStarDict = Dict[int, StarDescription]\n\n\n# Select files conforming to the match_pattern using percentage which is between 0 and 1\ndef file_selector(the_dir, match_pattern, percentage=1) -> List[str]:\n matched_files = glob.glob(the_dir + match_pattern)\n desired_length = max(1, int(len(matched_files) * float(percentage)))\n logging.debug(\n f\"Reading.file_selector: {the_dir + match_pattern}, \"\n f\"total:{len(matched_files)}, desired:{desired_length}\"\n )\n np.random.seed(42) # for the same percentage, we always get the same selection\n selected_files = np.random.choice(\n matched_files, size=desired_length, replace=False\n ).tolist()\n return selected_files\n\n\ndef add_trailing_slash(the_path):\n return join(the_path, \"\")\n\n\ndef get_files_in_dir(mypath):\n return [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\n\n# out012345.dat -> 12345\ndef get_starid_from_outfile(outfile) -> int:\n m = re.search(\"out(.*).dat\", outfile)\n try:\n result = int(m.group(1).lstrip(\"0\"))\n except:\n logging.error(f\"Could not extract the starid from file: {outfile}\")\n return result\n\n\n# returns a dict with the local_id as key\ndef get_localid_to_sd_dict(stars: List[StarDescription]) -> Dict[int, StarDescription]:\n cachedict = {}\n for sd in stars:\n cachedict[sd.local_id] = sd\n return cachedict\n\n\n# filter a list of star descriptions on the presence of a catalog\ndef catalog_filter(star: StarDescription, catalog_name):\n return star.has_metadata(catalog_name)\n\n\n# filters a DataFrame with a floatJD column according to julian dates\ndef jd_filter_df(df: DataFrame, jdfilter: List[float]):\n \"\"\" takes a list of 2 julian dates and uses these so the region between them is not used. The DataFrame needs a column named 'floatJD' \"\"\"\n if jdfilter is not None:\n logging.debug(\n f\"Before jd_filter_df(): jdfilter is: {jdfilter}, len is {len(df)}\"\n )\n df = df[(df.floatJD <= jdfilter[0]) | (df.floatJD >= jdfilter[1])]\n logging.debug(f\"After jd_filter_df(): len is {len(df)}\")\n if len(df) < 2:\n logging.warning(\n f\"Applying the jdfilter caused the lightcurve to contain less than 2 points! \"\n f\"Everything between {jdfilter[0]} and {jdfilter[1]} is thrown away\"\n )\n return df\n\n\ndef jd_filter_array(jds, values, jdfilter: List[float]):\n \"\"\" takes a JD array and a value array (mags) together with a min/max jdfilter list \"\"\"\n if jdfilter is not None:\n logging.debug(\n f\"jd_filter_array(): jdfilter is: {jdfilter}, of type {type(jdfilter)}\"\n )\n return zip(\n *filter(\n lambda jdsvaluezip: jdsvaluezip[0] <= jdfilter[0]\n or jdsvaluezip[0] >= jdfilter[1],\n zip(jds, values),\n )\n )\n else:\n return jds, values\n\n\ndef get_hms_dms(coord: SkyCoord):\n return \"{:2.0f}h {:02.0f}m {:02.2f}s {:2.0f}d {:02.0f}' {:02.2f}\\\"\".format(\n coord.ra.hms.h,\n abs(coord.ra.hms.m),\n abs(coord.ra.hms.s),\n coord.dec.dms.d,\n abs(coord.dec.dms.m),\n abs(coord.dec.dms.s),\n )\n\n\ndef get_hms_dms_sober(coord: SkyCoord):\n return \"{:2.0f} {:02.0f} {:02.2f} {:2.0f} {:02.0f} {:02.2f}\".format(\n coord.ra.hms.h,\n abs(coord.ra.hms.m),\n abs(coord.ra.hms.s),\n coord.dec.dms.d,\n abs(coord.dec.dms.m),\n abs(coord.dec.dms.s),\n )\n\n\ndef get_hms_dms_matplotlib(coord: SkyCoord):\n return r\"{:2.0f}$^h$ {:02.0f}$^m$ {:02.2f}$^s$ | {:2.0f}$\\degree$ {:02.0f}$'$ {:02.2f}$''$\".format(\n coord.ra.hms.h,\n abs(coord.ra.hms.m),\n abs(coord.ra.hms.s),\n coord.dec.dms.d,\n abs(coord.dec.dms.m),\n abs(coord.dec.dms.s),\n )\n\n\ndef get_lesve_coords(coord: SkyCoord):\n return \"{:2.0f} {:02.0f} {:02.2f} {:2.0f} {:02.0f} {:02.2f}\".format(\n coord.ra.hms.h,\n abs(coord.ra.hms.m),\n abs(coord.ra.hms.s),\n coord.dec.dms.d,\n abs(coord.dec.dms.m),\n abs(coord.dec.dms.s),\n )\n\n\ndef get_pool(processes=cpu_count() - 1, maxtasksperchild=10):\n return mp.Pool(processes, maxtasksperchild=maxtasksperchild)\n\n\ndef add_metadata(stars: List[star_description.StarDescription], metadata: StarMetaData):\n \"\"\"\n Add a static StarMetaData (or children) object to all stars in the list\n :param stars:\n :param metadata:\n :return:\n \"\"\"\n for star in stars:\n star.metadata = metadata\n\n\ndef get_stars_with_metadata(\n stars: List[star_description.StarMetaData],\n catalog_name: str,\n exclude: List[str] = [],\n) -> List[star_description.StarDescription]:\n # gets all stars which have a catalog of name 'catalog_name'\n assert isinstance(exclude, list) and isinstance(stars, list)\n return list(\n filter(\n partial(metadata_filter, catalog_name=catalog_name, exclude=exclude), stars\n )\n )\n\n\ndef concat_sd_lists(*star_descriptions):\n result = []\n id_set = set()\n for sd_list in star_descriptions:\n assert isinstance(sd_list, list)\n for sd in sd_list:\n if sd.local_id not in id_set:\n result.append(sd)\n id_set.add(sd.local_id)\n return result\n\n\n# Does this star have a catalog with catalog_name? Used in combination with filter()\ndef metadata_filter(star: StarDescription, catalog_name, exclude=[]):\n catalogs = star.get_metadata_list()\n return catalog_name in catalogs and len([x for x in exclude if x in catalogs]) == 0\n\n\nclass MetadataSorter:\n pattern = re.compile(r\".*?(\\d+)$\") # finding the number in our name\n\n def get_mixed_sort_value(\n self, startuple: Tuple[int, StarDescription], names: List[str]\n ):\n \"\"\" gets the value to sort, works with mixed int/str types \"\"\"\n idx, _ = startuple\n name = names[idx]\n if name is None:\n result = -1, -1\n elif isinstance(name, int) or isinstance(name, np.int64) or name.isdigit():\n result = 0, int(name)\n else:\n result = 1, self.get_string_number_part_or_default(name)\n return result\n\n def get_string_number_part_or_default(\n self, star_name: str, default_value: int = sys.maxsize\n ):\n match = re.match(self.pattern, star_name)\n return int(match.group(1)) if match is not None else default_value\n\n @staticmethod\n def get_metadata_from_star(\n star: StarDescription, metadata_id: str, warnings: bool = False\n ):\n result = star.get_metadata(metadata_id)\n if result is None and warnings:\n logging.warning(\n f\"The metadata {metadata_id} for star {star.local_id} does not exist\"\n )\n return result\n\n @staticmethod\n def get_name_from_metadata(obj, name_var: str, warning: bool = False):\n try:\n return getattr(obj, name_var)\n except AttributeError:\n if warning:\n logging.warning(\n f\"The metadata {obj} does not have a name variable called {name_var}\"\n )\n return None\n\n def __call__(\n self,\n stars: List[StarDescription],\n metadata_id=\"SITE\",\n name_variable=\"name\",\n warnings=True,\n ):\n metadata = [\n MetadataSorter.get_metadata_from_star(x, metadata_id, warnings)\n for x in stars\n ]\n names = [\n MetadataSorter.get_name_from_metadata(x, name_variable, warnings)\n for x in metadata\n ]\n sorted_stars = [\n x[1]\n for x in sorted(\n enumerate(stars), key=partial(self.get_mixed_sort_value, names=names)\n )\n ]\n return sorted_stars\n\n\nmetadata_sorter = MetadataSorter()\n\n\ndef sort_selected(stars: List[StarDescription]) -> List[StarDescription]:\n non_vsx = get_stars_with_metadata(stars, \"SITE\", exclude=[\"VSX\"])\n # logging.info(f\"Non vsx stars: {[x for x in non_vsx]}\")\n vsx = get_stars_with_metadata(stars, \"VSX\")\n # logging.info(f\"Vsx stars: {[x for x in non_vsx]}\")\n assert len(stars) == len(non_vsx) + len(vsx)\n non_vsx_sorted_stars = metadata_sorter(\n non_vsx, metadata_id=\"SITE\", name_variable=\"our_name\"\n )\n # logging.info(f\"Non vsx stars sorted: {[x for x in non_vsx_sorted_stars]}\")\n vsx_sorted_stars = metadata_sorter(\n vsx, metadata_id=\"SITE\", name_variable=\"our_name\"\n )\n # logging.info(f\"Vsx stars sorted: {[x for x in vsx_sorted_stars]}\")\n return non_vsx_sorted_stars + vsx_sorted_stars\n\n\ndef add_star_lists(list1: List[StarDescription], list2: List[StarDescription]):\n ids = [x.local_id for x in list1]\n list2_filtered = [x for x in list2 if x.local_id not in ids]\n return list1 + list2_filtered\n\n\ndef reject_outliers_iqr(df, column, cut=5):\n q1, q3 = np.percentile(df[column], [cut, 100 - cut])\n iqr = q3 - q1\n lower_bound = q1 - (iqr * 1.5)\n upper_bound = q3 + (iqr * 1.5)\n logging.debug(f\"q1 {q1} q3 {q3} iqr {iqr} lower {lower_bound} upper {upper_bound}\")\n return df[(df[column] < upper_bound) & (df[column] > lower_bound)]\n\n\ndef get_star_or_catalog_name(star: StarDescription, suffix: str = \"\") -> StarUI:\n extradata = None\n if star.has_metadata(\"VSX\"):\n catalog = star.get_metadata(\"VSX\")\n catalog_name, separation = catalog.name, catalog.separation\n extradata = catalog.extradata\n elif star.has_metadata(\"SITE\"):\n catalog = star.get_metadata(\"SITE\")\n catalog_name, separation = catalog.our_name, catalog.separation\n else:\n catalog_name, separation = star.local_id, None\n filename_no_suff_no_ext = (\n f\"{int(catalog_name):05}\"\n if isinstance(catalog_name, int) or isinstance(catalog_name, np.int64) or catalog_name.isdigit()\n else f\"{catalog_name}\"\n )\n filename_no_ext = f\"{filename_no_suff_no_ext}{suffix}\"\n\n filename_raw_no_ext = filename_no_ext\n filename_no_ext = replace_spaces(replace_dots(filename_raw_no_ext))\n\n filename_raw_no_suff_no_ext = filename_no_suff_no_ext\n filename_no_suff_no_ext = replace_spaces(replace_dots(filename_raw_no_suff_no_ext))\n return StarUI(\n catalog_name,\n separation,\n extradata,\n filename_raw_no_ext,\n filename_no_ext,\n filename_raw_no_suff_no_ext,\n filename_no_suff_no_ext,\n )\n\n\ndef get_star_names(star: StarDescription) -> List[str]:\n def unique_append(alist, new):\n if new not in alist:\n alist.append(new)\n\n names = []\n if star.has_metadata(\"VSX\"):\n unique_append(names, star.get_metadata(\"VSX\").name)\n if star.has_metadata(\"SITE\"):\n unique_append(names, star.get_metadata(\"SITE\").our_name)\n return names if len(names) > 0 else None\n\n\ndef get_pretty_ucac4_of_sd(star: StarDescription):\n catdata: CatalogData = get_ucac4_of_sd(star)\n return catdata.catalog_id if catdata is not None else \"Unknown\"\n\n\ndef get_ucac4_of_sd(star: StarDescription) -> CatalogData:\n catdata: CatalogData = star.get_metadata(\"UCAC4\")\n return catdata\n\n\ndef get_full_ucac4_id(ucac4_input: str) -> str:\n \"\"\" Takes a partial ucac4 id and makes it complete. E.g.: '233-155284' ==> 'UCAC4 233-155284'\"\"\"\n # UCAC4 233-155284\n if len(ucac4_input) >= 10:\n return f\"UCAC4 {ucac4_input[-10:]}\"\n return None\n\n\n# replace spaces with dashes\ndef replace_dots(a_string: str):\n return a_string.replace(\".\", \"-\")\n\n\n# replace spaces with underscores\ndef replace_spaces(a_string: str):\n return a_string.replace(\" \", \"_\")\n\n\n# replace spaces with underscores\ndef replace_underscores(a_string: str):\n return a_string.replace(\"_\", \" \")\n\n# if var type is L or period is -1 then it's not periodic\ndef is_var_type_aperiodic(var_type, period: float):\n if var_type in ['L', 'None'] or period == -1:\n return True\n return False\n\ndef is_check(var_type):\n return var_type == 'Check'\n\n\n# not used\n\n\ndef find_index_of_file(the_dir, the_file, the_filter=\"*\"):\n the_dir = glob.glob(the_dir + \"*\" + the_filter)\n the_dir.sort()\n indices = [i for i, elem in enumerate(the_dir) if the_file in elem]\n return indices[0]\n\n\n# not used\n\n\ndef find_file_for_index(the_dir, index, the_filter=\"*\"):\n the_dir = glob.glob(the_dir + the_filter)\n the_dir.sort()\n return the_dir[index]\n","repo_name":"mrosseel/vast-automation","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13061,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"24118361819","text":"from models.bytebank import Employee \nimport pytest\n\nclass TestBytebank:\n def test_age_when_input_is_valid(self):\n employee = Employee('John', '11/11/2000', 1000)\n assert employee.age() == 23\n \n def test_get_full_name(self):\n name = 'Jhon Doe Serafim'\n result = 'Jhon Doe Serafim'\n employee = Employee(name, '11/11/2000', 1000)\n assert employee.full_name == result\n \n # def test_print_instance(self):\n # employee = Employee('Jhon Doe', '11/11/2000', 1000)\n # assert str(employee) == 'Funcionario(Jhon Doe, 11/11/2000, 1000)' \n\n @pytest.mark.calculate_credits \n def test_calculate_credits(self):\n employee = Employee('John', '11/11/2000', 1000)\n assert employee.calculate_credits() == 100\n\n @pytest.mark.calculate_credits \n def test_calculate_credits_if_salary_is_20000_should_return_exception(self):\n with pytest.raises(Exception):\n salary = 20000\n employee = Employee('John', '11/11/2000', salary)\n assert employee.calculate_credits()\n\n def test_get_last_name(self):\n employee = Employee('John Doe', '11/11/2000', 1000)\n assert employee.last_name == \"Doe\"\n\n # test if the employee is a partner and salary >= 100000 decrease 10%\n def test_partner_salary_decrease_when_receive_100000_return_90000(self):\n # arrange\n input_salary = 100000\n input_name = 'Ciro Moura'\n result = 90000\n\n partner = Employee(input_name, '11/11/2000', input_salary)\n partner.decrease_salary() # act\n\n assert partner.salary == result # assert\n \n def test_member_salary_decrease_when_receive_100000_return_10000(self):\n input_salary = 100000\n input_name = 'Jhon Doe'\n result = 100000\n\n partner = Employee(input_name, '11/11/2000', input_salary)\n partner.decrease_salary() # act\n\n assert partner.salary == result # assert\n ","repo_name":"Jackson-Vieira/tdd-python-alura","sub_path":"tests/test_bytebank.py","file_name":"test_bytebank.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8301955369","text":"class Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n ls = [x for x in nums1[:m]]\n i, j = 0, 0\n while i None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n while n > 0: \n if m <= 0 or nums1[m-1] <= nums2[n-1]:\n nums1[m+n-1] = nums2[n-1]\n n -= 1\n else:\n nums1[m+n-1] = nums1[m-1]\n m -= 1\n \n ","repo_name":"yeonpark/K_Coders_HKU","sub_path":"Team_B/Solutions/Algorithm/1. Searching and Sorting/LST003_Dongun.py","file_name":"LST003_Dongun.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"17292255415","text":"import argparse\n\nimport h5py\n\nfrom keras.layers import Activation, BatchNormalization, Conv2D, Dense, Flatten, Input\nfrom keras.models import Model\nfrom dlgo import zero\nfrom dlgo.goboard_fast import GameState, Player, Point\n\n\ndef main():\n board_size = 9\n encoder = zero.ZeroEncoder(board_size)\n\n board_input = Input(shape=encoder.shape(), name='board_input')\n\n pb = board_input\n\n for i in range(4):\n pb = Conv2D(64, (3, 3),\n padding='same',\n data_format='channels_first')(pb)\n pb = BatchNormalization(axis=1)(pb)\n pb = Activation('relu')(pb)\n\n # Policy output\n policy_conv = Conv2D(2, (1, 1), data_format='channels_first')(pb)\n policy_batch = BatchNormalization(axis=1)(policy_conv)\n policy_relu = Activation('relu')(policy_batch)\n policy_flat = Flatten()(policy_relu)\n policy_output = Dense(encoder.num_moves(), activation='softmax')(\n policy_flat)\n\n # Value output\n value_conv = Conv2D(1, (1, 1), data_format='channels_first')(pb)\n value_batch = BatchNormalization(axis=1)(value_conv)\n value_relu = Activation('relu')(value_batch)\n value_flat = Flatten()(value_relu)\n value_hidden = Dense(256, activation='relu')(value_flat)\n value_output = Dense(1, activation='tanh')(value_hidden)\n\n model = Model(\n inputs=[board_input],\n outputs=[policy_output, value_output])\n\n c1 = zero.ZeroExperienceCollector()\n c2 = zero.ZeroExperienceCollector()\n black_agent = zero.ZeroAgent(model, encoder, rounds_per_move=10, c=2.0)\n white_agent = zero.ZeroAgent(model, encoder, rounds_per_move=10, c=2.0)\n black_agent.set_collector(c1)\n white_agent.set_collector(c2)\n\n print('Starting the game!')\n game = GameState.new_game(board_size)\n\n c1.begin_episode()\n c2.begin_episode()\n black_move = black_agent.select_move(game)\n print('B', black_move)\n game = game.apply_move(black_move)\n white_move = white_agent.select_move(game)\n print('W', white_move)\n black_move = black_agent.select_move(game)\n print('B', black_move)\n\n c1.complete_episode(1)\n c2.complete_episode(-1)\n exp = zero.combine_experience([c1, c2])\n black_agent.train(exp, 0.01, 2048)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"maxpumperla/deep_learning_and_the_game_of_go","sub_path":"code/zero_test.py","file_name":"zero_test.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":906,"dataset":"github-code","pt":"78"} +{"seq_id":"23960090222","text":"from typing import Any\n\nimport markdown\nfrom markdown import Markdown\nfrom markdown.extensions.toc import TocExtension\nfrom markdown_link_attr_modifier import LinkAttrModifierExtension\nfrom slugify import slugify\n\n\ndef _slugify(value: str, *args: Any, **kwargs: Any) -> str:\n return slugify(value, max_length=64)\n\n\ndef get_markdown() -> Markdown:\n md = markdown.Markdown(\n extensions=[\n \"fenced_code\",\n \"codehilite\",\n TocExtension(slugify=_slugify, permalink=\"\", toc_depth=3),\n \"markdown_captions\",\n \"attr_list\",\n LinkAttrModifierExtension(\n new_tab=\"external_only\", no_referrer=\"external_only\", auto_title=\"on\"\n ),\n \"pymdownx.tilde\",\n \"pymdownx.highlight\",\n ]\n )\n return md\n","repo_name":"Chaoyingz/chaoying.dev","sub_path":"app/service/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15278602109","text":"from ctypes import ArgumentError\nimport re\nimport sys\nimport unittest\n\nfrom PIL import Image\nfrom common import file_path\n\nfrom openslide import (\n OpenSlide,\n OpenSlideCache,\n OpenSlideError,\n OpenSlideUnsupportedFormatError,\n lowlevel,\n)\n\n\nclass TestCache(unittest.TestCase):\n @unittest.skipUnless(lowlevel.cache_create.available, \"requires OpenSlide 4.0.0\")\n def test_create_cache(self):\n OpenSlideCache(0)\n OpenSlideCache(1)\n OpenSlideCache(4 << 20)\n self.assertRaises(ArgumentError, lambda: OpenSlideCache(-1))\n self.assertRaises(ArgumentError, lambda: OpenSlideCache(1.3))\n\n\nclass TestSlideWithoutOpening(unittest.TestCase):\n def test_detect_format(self):\n self.assertTrue(OpenSlide.detect_format(file_path('__missing_file')) is None)\n self.assertTrue(OpenSlide.detect_format(file_path('../setup.py')) is None)\n self.assertEqual(\n OpenSlide.detect_format(file_path('boxes.tiff')), 'generic-tiff'\n )\n\n def test_open(self):\n self.assertRaises(\n OpenSlideUnsupportedFormatError, lambda: OpenSlide('__does_not_exist')\n )\n self.assertRaises(\n OpenSlideUnsupportedFormatError, lambda: OpenSlide('setup.py')\n )\n self.assertRaises(OpenSlideUnsupportedFormatError, lambda: OpenSlide(None))\n self.assertRaises(OpenSlideUnsupportedFormatError, lambda: OpenSlide(3))\n self.assertRaises(\n OpenSlideUnsupportedFormatError, lambda: OpenSlide('unopenable.tiff')\n )\n\n def test_operations_on_closed_handle(self):\n osr = OpenSlide(file_path('boxes.tiff'))\n props = osr.properties\n associated = osr.associated_images\n osr.close()\n self.assertRaises(ArgumentError, lambda: osr.read_region((0, 0), 0, (100, 100)))\n self.assertRaises(ArgumentError, lambda: osr.close())\n self.assertRaises(ArgumentError, lambda: props['openslide.vendor'])\n self.assertRaises(ArgumentError, lambda: associated['label'])\n\n def test_context_manager(self):\n osr = OpenSlide(file_path('boxes.tiff'))\n with osr:\n self.assertEqual(osr.level_count, 4)\n self.assertRaises(ArgumentError, lambda: osr.level_count)\n\n\nclass _SlideTest:\n def setUp(self):\n self.osr = OpenSlide(file_path(self.FILENAME))\n\n def tearDown(self):\n self.osr.close()\n\n\nclass TestSlide(_SlideTest, unittest.TestCase):\n FILENAME = 'boxes.tiff'\n\n def test_repr(self):\n self.assertEqual(repr(self.osr), 'OpenSlide(%r)' % file_path('boxes.tiff'))\n\n def test_basic_metadata(self):\n self.assertEqual(self.osr.level_count, 4)\n self.assertEqual(\n self.osr.level_dimensions, ((300, 250), (150, 125), (75, 62), (37, 31))\n )\n self.assertEqual(self.osr.dimensions, (300, 250))\n\n self.assertEqual(len(self.osr.level_downsamples), self.osr.level_count)\n self.assertEqual(self.osr.level_downsamples[0:2], (1, 2))\n self.assertAlmostEqual(self.osr.level_downsamples[2], 4, places=0)\n self.assertAlmostEqual(self.osr.level_downsamples[3], 8, places=0)\n\n self.assertEqual(self.osr.get_best_level_for_downsample(0.5), 0)\n self.assertEqual(self.osr.get_best_level_for_downsample(3), 1)\n self.assertEqual(self.osr.get_best_level_for_downsample(37), 3)\n\n def test_properties(self):\n self.assertEqual(self.osr.properties['openslide.vendor'], 'generic-tiff')\n self.assertRaises(KeyError, lambda: self.osr.properties['__does_not_exist'])\n # test __len__ and __iter__\n self.assertEqual(\n len([v for v in self.osr.properties]), len(self.osr.properties)\n )\n self.assertEqual(\n repr(self.osr.properties), '<_PropertyMap %r>' % dict(self.osr.properties)\n )\n\n @unittest.skipUnless(\n lowlevel.read_icc_profile.available, \"requires OpenSlide 4.0.0\"\n )\n def test_color_profile(self):\n self.assertEqual(self.osr.color_profile.profile.device_class, 'mntr')\n self.assertEqual(\n len(self.osr.read_region((0, 0), 0, (100, 100)).info['icc_profile']), 588\n )\n self.assertEqual(\n len(self.osr.get_thumbnail((100, 100)).info['icc_profile']), 588\n )\n\n def test_read_region(self):\n self.assertEqual(\n self.osr.read_region((-10, -10), 1, (400, 400)).size, (400, 400)\n )\n\n def test_read_region_size_dimension_zero(self):\n self.assertEqual(self.osr.read_region((0, 0), 1, (400, 0)).size, (400, 0))\n\n def test_read_region_bad_level(self):\n self.assertEqual(self.osr.read_region((0, 0), 4, (100, 100)).size, (100, 100))\n\n def test_read_region_bad_size(self):\n self.assertRaises(\n OpenSlideError, lambda: self.osr.read_region((0, 0), 1, (400, -5))\n )\n\n @unittest.skipIf(sys.maxsize < 1 << 32, '32-bit Python')\n # Broken on Pillow < 6.2.0.\n # https://github.com/python-pillow/Pillow/issues/3963\n @unittest.skipIf(\n [int(i) for i in getattr(Image, '__version__', '0').split('.')] < [6, 2, 0],\n 'broken on Pillow < 6.2.0',\n )\n # Disabled to avoid OOM killer on small systems, since the stdlib\n # doesn't provide a way to find out how much RAM we have\n def _test_read_region_2GB(self):\n self.assertEqual(\n self.osr.read_region((1000, 1000), 0, (32768, 16384)).size, (32768, 16384)\n )\n\n def test_thumbnail(self):\n self.assertEqual(self.osr.get_thumbnail((100, 100)).size, (100, 83))\n\n @unittest.skipUnless(lowlevel.cache_create.available, \"requires OpenSlide 4.0.0\")\n def test_set_cache(self):\n self.osr.set_cache(OpenSlideCache(64 << 10))\n self.assertEqual(self.osr.read_region((0, 0), 0, (400, 400)).size, (400, 400))\n self.assertRaises(TypeError, lambda: self.osr.set_cache(None))\n self.assertRaises(TypeError, lambda: self.osr.set_cache(3))\n\n\nclass TestAperioSlide(_SlideTest, unittest.TestCase):\n FILENAME = 'small.svs'\n\n def test_associated_images(self):\n self.assertEqual(self.osr.associated_images['thumbnail'].size, (16, 16))\n self.assertRaises(KeyError, lambda: self.osr.associated_images['__missing'])\n # test __len__ and __iter__\n self.assertEqual(\n len([v for v in self.osr.associated_images]),\n len(self.osr.associated_images),\n )\n\n def mangle_repr(o):\n return re.sub('0x[0-9a-fA-F]+', '(mangled)', repr(o))\n\n self.assertEqual(\n mangle_repr(self.osr.associated_images),\n '<_AssociatedImageMap %s>' % mangle_repr(dict(self.osr.associated_images)),\n )\n\n def test_color_profile(self):\n self.assertIsNone(self.osr.color_profile)\n self.assertNotIn(\n 'icc_profile', self.osr.read_region((0, 0), 0, (100, 100)).info\n )\n self.assertNotIn('icc_profile', self.osr.associated_images['thumbnail'].info)\n self.assertNotIn('icc_profile', self.osr.get_thumbnail((100, 100)).info)\n\n\n# Requires DICOM support in OpenSlide. Use associated image ICC support as\n# a proxy.\n@unittest.skipUnless(\n lowlevel.read_associated_image_icc_profile.available, \"requires OpenSlide 4.0.0\"\n)\nclass TestDicomSlide(_SlideTest, unittest.TestCase):\n FILENAME = 'boxes_0.dcm'\n\n def test_color_profile(self):\n self.assertEqual(self.osr.color_profile.profile.device_class, 'mntr')\n main_profile = self.osr.read_region((0, 0), 0, (100, 100)).info['icc_profile']\n associated_profile = self.osr.associated_images['thumbnail'].info['icc_profile']\n self.assertEqual(len(main_profile), 456)\n self.assertEqual(main_profile, associated_profile)\n self.assertIs(main_profile, associated_profile)\n\n\nclass TestUnreadableSlide(_SlideTest, unittest.TestCase):\n FILENAME = 'unreadable.svs'\n\n def test_read_bad_region(self):\n self.assertEqual(self.osr.properties['openslide.vendor'], 'aperio')\n self.assertRaises(\n OpenSlideError, lambda: self.osr.read_region((0, 0), 0, (16, 16))\n )\n # verify that errors are sticky\n self.assertRaises(\n OpenSlideError, lambda: self.osr.properties['openslide.vendor']\n )\n\n def test_read_bad_associated_image(self):\n self.assertEqual(self.osr.properties['openslide.vendor'], 'aperio')\n # Prints \"JPEGLib: Bogus marker length.\" to stderr due to\n # https://github.com/openslide/openslide/issues/36\n self.assertRaises(\n OpenSlideError, lambda: self.osr.associated_images['thumbnail']\n )\n # verify that errors are sticky\n self.assertRaises(\n OpenSlideError, lambda: self.osr.properties['openslide.vendor']\n )\n","repo_name":"openslide/openslide-python","sub_path":"tests/test_openslide.py","file_name":"test_openslide.py","file_ext":"py","file_size_in_byte":8773,"program_lang":"python","lang":"en","doc_type":"code","stars":340,"dataset":"github-code","pt":"78"} +{"seq_id":"6396657954","text":"# Check whether a given directed graph with 𝑛 vertices and 𝑚 edges contains a cycle\n\nfrom collections import defaultdict\n\nclass graph:\n def __init__(self):\n self.adj = defaultdict(list)\n\n def addEdge(self, p,q):\n self.adj[p].append(q)\n \n def dfs(self,ver):\n self.visited = defaultdict(bool)\n self.rec = defaultdict(bool)\n for i in range(1, ver+1):\n if(self.visited[i]==False):\n if(self._dfs(i)):\n return(1)\n return(0)\n \n def _dfs(self, u):\n self.visited[u] = True\n self.rec[u] = True\n for i in self.adj[u]:\n if(self.visited[i]==False):\n if(self._dfs(i)):\n return(True)\n elif(self.rec[i]):\n return(True)\n self.rec[u] = False\n return(False)\n\n\nif __name__ == \"__main__\":\n ver,e = map(int, input().split())\n g = graph()\n for i in range(e):\n u,v = map(int,input().split())\n g.addEdge(u,v) \n print(g.dfs(ver)) #pass random vertex and examine acyclicity","repo_name":"UjwalAgrawal/Data-Structure-Algorithms","sub_path":"Graphs/Acyclicity.py","file_name":"Acyclicity.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"38900855939","text":"# Author: Daniel Rodriguez Amezaga\n\nclass MiException(Exception):\n def __init__(self, params):\n self.params = params\n \ntry:\n numero = int(input(\"Enter a value: \"))\n raise MiException(numero) \n \nexcept ValueError:\n print(\"The value entered is not an integer\")\n \nexcept MiException as e:\n if e.params > 20:\n print(\"The value entered is greater than 20\")\n else:\n print(e.params)\n\n\n\n\n\n","repo_name":"danielrodriguezamezaga/good_programming_practicies_DRA","sub_path":"Lesson-1/ejercicio1_1.py","file_name":"ejercicio1_1.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22149873683","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\ndf = pd.DataFrame(columns=[\"total\", \"ja\", \"en\", \"url\"])\npage = 55\nwhile True:\n url = \"https://tabelog.com/tw/tokyo/rstLst/\" + str(page) +\"/?SrtT=rt\"\n print(\"正在爬第\" + str(page) + \"幾頁的url\", url)\n try:\n response = urlopen(url)\n except HTTPError:\n print(\"最後一頁\")\n break\n html = BeautifulSoup(response)\n # print(html)\n\n res = html.find_all(\"li\", class_=\"list-rst\")\n for r in res:\n ja = r.find(\"small\", class_=\"list-rst__name-ja\")\n en = r.find(\"a\", class_=\"list-rst__name-main\")\n ratings = r.find_all(\"b\", class_=\"c-rating__val\")\n # print(en)\n print(ratings[0].text,\n ja.text, en.text, en[\"href\"])\n\n s = pd.Series([ratings[0].text, ja.text,\n en.text, en[\"href\"]],\n index=[\"total\", \"ja\", \"en\", \"url\"])\n # print(s)\n df = df.append(s, ignore_index=True)\n page = page + 1\ndf.to_csv(\"tabelog.csv\", encoding=\"utf-8\", index=False)\n","repo_name":"kd110544/Python_Demo1","sub_path":"14-2.py","file_name":"14-2.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70508030651","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nimport math\r\nfrom numpy import unravel_index\r\n\r\ndemand = pd.read_csv('demand.csv')\r\nstops = pd.read_csv('stops.csv')\r\n\r\nx_origin = np.array(demand['OriginX'])\r\ny_origin = np.array(demand['OriginY'])\r\n\r\nx_dest = np.array(demand['DestinationX'])\r\ny_dest = np.array(demand['DestinationY'])\r\n\r\nx_stops = np.array(stops['X'])\r\ny_stops = np.array(stops['Y'])\r\n\r\nplt.plot(x_origin,y_origin,'g.')\r\nplt.plot(x_dest,y_dest,'r.')\r\nplt.plot(x_stops,y_stops,'b.')\r\n\r\nplt.savefig('points.png',ppi=300)\r\n# plt.show()\r\n\r\n\r\n# Find the density matrix of origin coordinates,\r\n# D[i][j]= the number of passengers in [i-1,i]x[j-1,j] \r\ns = (int(math.ceil(np.max(x_origin))), int(math.ceil(np.max(y_origin))))\r\nDensity_origin = np.zeros(s) \r\nDict_dest = {}\r\nfor i in range(len(x_origin)):\r\n x = math.floor(x_origin[i])\r\n y = math.floor(y_origin[i])\r\n Dict_dest[(x,y)] = [] #corresponding to origin density (x,y) gives destination locations\r\n\r\nfor i in range(len(x_origin)):\r\n x = int(math.floor(x_origin[i]))\r\n y = int(math.floor(y_origin[i]))\r\n Density_origin[x][y] += 1 \r\n Dict_dest[(x,y)].append((x_dest[i], y_dest[i]))\r\n \r\n\r\n# Find the density matrix of bus stations\r\n# D[i,j] = the number of passengers can use the bus station at (i,j) \r\ns = (np.max(x_stops), np.max(y_stops))\r\nDensity_bus_stops = np.zeros(s) \r\nfor a in range(len(x_stops)):\r\n i = x_stops[a]\r\n j = y_stops[a]\r\n if i != len(Density_origin) and j != len(Density_origin[0]):\r\n Density_bus_stops[i][j] = Density_origin[i-1][j-1]\\\r\n +Density_origin[i-1][j] \\\r\n +Density_origin[i][j-1] \\\r\n +Density_origin[i][j]\r\n\r\n# average distance map from (x,y) in Density_origin to (s1,s2) that preserved the density in destination\r\nDict_dest_avg = {}\r\nfor k,v in Dict_dest.items():\r\n s = [0,0]\r\n for it in v:\r\n s[0] += it[0]\r\n s[1] += it[1]\r\n s[0] //= len(v)\r\n s[1] //= len(v)\r\n Dict_dest_avg[k] = tuple(s)\r\n Dict_dest\r\n \r\n \r\n# find the proper map from choosing S_O to the corresponding destination, S_D.\r\nDict_O2D = {}\r\nfor k,v in Dict_dest_avg.items(): \r\n a = k[0]+1\r\n b = k[1]+1\r\n Dict_O2D[(a,b)] = v \r\n \r\n \r\n# dictionary with key : bus IDs and values: [bus stations, start pnt, endpnt, length of rout] \r\nL_M = 18 \r\nM = 15 \r\nRout_Dict = {}\r\nfor i in range (M): \r\n (a,b) = unravel_index(Density_bus_stops.argmax(), Density_bus_stops.shape) \r\n if (a,b) in Dict_O2D:\r\n Rout_Dict[i+1] = [[Dict_O2D[(a,b)], (a,b)], (a,b), Dict_O2D[(a,b)],0]\r\n \r\n Density_bus_stops[a][b] = 0\r\nDone = True\r\nwhile Done:\r\n (c,d) = unravel_index(Density_bus_stops.argmax(), Density_bus_stops.shape) \r\n \r\n if (c,d) in Dict_O2D:\r\n (x,y) = Dict_O2D[(c,d)]\r\n for i in range (M): \r\n (a,b) = Rout_Dict[i+1][1]\r\n (u,v) = Rout_Dict[i+1][2]\r\n if (abs(x-a) + abs(y-b)) >(abs(x-c) + abs(y-d)):\r\n if (Rout_Dict[i+1][3] + (abs(c-a) + abs(d-b) ) + (abs(c-x) + abs(d-y) ) ) <= L_M:\r\n Rout_Dict[i+1][0].append((c,d))\r\n Rout_Dict[i+1][1] = (c,d)\r\n Rout_Dict[i+1][2] = (x,y)\r\n Rout_Dict[i+1][3] += (abs(c-a) + abs(d-b) ) #distance from (a,b) to (c,d)\r\n Density_bus_stops[c][d] = 0\r\n break\r\n \r\n Density_bus_stops[c][d] = 0\r\n if np.max(Density_bus_stops) == 0:\r\n Done = False \r\n\r\n\r\n## plot routes\r\n#colors = cm.jet(np.linspace(0, 1, len(Rout_Dict)))\r\n#counter = 0\r\n#for k,v in Rout_Dict.items():\r\n# x = [i[0] for i in v[0]]\r\n# y = [i[1] for i in v[0]]\r\n# x.append(x[0])\r\n# y.append(y[0])\r\n# x = x[1:]\r\n# y = y[1:]\r\n# plt.plot(x,y,color=colors[counter])\r\n# plt.plot(x,y,'o',color=colors[counter])\r\n# counter +=1\r\n#plt.savefig('routes.png',ppi=600)\r\n \r\n\r\n# dictionary with key : passanger location and values: stop ID \r\nstopXY2stopID = {}\r\nfor idx in range(len(stops['StopId'])):\r\n stopXY2stopID[(stops['X'][idx],stops['Y'][idx])] = stops['StopId'][idx]\r\n \r\nstopID2stopXY = {v:k for k,v in stopXY2stopID.items()}\r\n\r\n\r\n# dictionary with key : bus IDs and values: bus stations \r\noutput_dict = {}\r\nfor k,v in Rout_Dict.items():\r\n seq = [stopXY2stopID[t] for t in v[0]]\r\n seq.append(seq[0])\r\n seq = seq[1:]\r\n output_dict[k] = seq\r\n\r\n\r\n# remove the repeated bus stops \r\nChec_list = []\r\nfor i in range(M):\r\n Chec_list += output_dict[M-i]\r\nfor i in range(M):\r\n l = len(output_dict[M-i])\r\n ID = output_dict[M-i][-1]\r\n cnt = Chec_list.count(ID)\r\n if cnt > 1: \r\n item_index = Chec_list.index(ID)\r\n output_dict[M-i].pop()\r\n\r\n\r\n # write in a text file \r\nwith open('output.txt','w') as f:\r\n f.write('100\\n')\r\n for v in output_dict.values():\r\n for i in v[:-1]:\r\n f.write('%d,'%i)\r\n f.write('%d'%v[-1])\r\n f.write('\\n')\r\n \r\n# find the distance\r\ndef dist_manhattan(origin_tuple,dest_list_tuples):\r\n d_manh = []\r\n for dest_tuple in dest_list_tuples:\r\n d_manh.append(abs(origin_tuple[0]-dest_tuple[0])+abs(origin_tuple[1]-dest_tuple[1]))\r\n return d_manh\r\n\r\n# compute optimal routes and their probabilities\r\np_i = []\r\nfor i in range(demand.shape[0]):\r\n pi_route_j = []\r\n for j in output_dict:\r\n stopXY_tuples = [stopID2stopXY[st] for st in output_dict[j]]\r\n # compute the probability of customer i taking route j\r\n # step 1: find closest point on route to origin\r\n dist_origin2allStops = dist_manhattan( (demand['OriginX'][i],demand['OriginY'][i]) , stopXY_tuples )\r\n d_o = sorted(dist_origin2allStops)[0]\r\n\r\n # step 2: find closest point on route to destination\r\n dist_dest2allStops = dist_manhattan( (demand['DestinationX'][i],demand['DestinationY'][i]) , stopXY_tuples )\r\n d_d = sorted(dist_dest2allStops)[0]\r\n\r\n # step 3: compute the probability\r\n pi_route_j.append(max( 0 , 1 - (d_o+d_d)/2. ))\r\n\r\n # compute probability of customer take bus\r\n # this is the maximum route-wise probability\r\n p_i.append(sorted(pi_route_j)[-1])\r\n\r\n# compute profit from customers\r\nK = 4\r\nprofit_customers = 0\r\nfor i in range(demand.shape[0]):\r\n # use probability from previous step\r\n profit_customers += K * p_i[i]\r\n\r\n# compute costs\r\nCf = 100\r\nCv = 5\r\ncosts = 0\r\nfor j in Rout_Dict:\r\n # compute cost per route\r\n costs += Cf + Cv * Rout_Dict[j][3]\r\n\r\n# compute total profit\r\nprofit_total = profit_customers - costs\r\n\r\nprint('Total profit is %f' % profit_total)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n","repo_name":"ezahedi/bus-routing-problem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70617852732","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\n# Create sample data\nt = np.linspace(0, 2*np.pi, 100)\nx = np.sin(t)\ny = np.cos(t)\nX, Y = np.meshgrid(x, y)\nZ = np.sqrt(X**2 + Y**2)\n\n# Calculate angles for coloring\nangles = np.arctan2(Y, X) # Calculate angles in radians\n\n# Create custom colormap with specified colors\ncolors = [(1, 0, 0), (1,0.5,0), (1, 1, 0),(0,1,0), (0, 0, 1), (0,0.5,1), (0.5, 0, 0.5), (1,0,0)] \ncmap_name = 'custom_color_map'\ncm = LinearSegmentedColormap.from_list(cmap_name, colors, N=256)\n\n# Normalize angles to [0, 1] range for colormap mapping\nangle_normalized = (angles + np.pi) / (2 * np.pi)\n\n# Create a contour plot with colored lines\ncontour = plt.contourf(X, Y, Z, levels=100, cmap='viridis') # Grayscale background\nplt.colorbar(contour, label='Radius')\n\n# Draw colored lines connecting the center to the edge\nfor i in range(len(x)):\n for j in range(len(y)):\n color = cm(angle_normalized[j, i]) # Get color from colormap\n plt.plot([0, X[j, i]], [0, Y[j, i]], color=color)\n\nplt.gca().set_aspect('equal', adjustable='box') # Make the plot square\nplt.title('Colored Lines from Center to Edge')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.show()","repo_name":"PNUVV/ML-Study-Minjun","sub_path":"mid-project/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43133560252","text":"import traceback\nimport functools\nimport logging\n\nfrom .time_trace import TimeTrace\nfrom .transaction import current_transaction\nfrom ..core.database_node import DatabaseNode\nfrom ..common.object_wrapper import FunctionWrapper, wrap_object\n\n_logger = logging.getLogger(__name__)\n\ndef register_database_client(dbapi2_module, database_name,\n quoting_style='single', explain_query=None, explain_stmts=[],\n instance_name=None):\n\n _logger.debug('Registering database client module %r where database '\n 'is %r, quoting style is %r, explain query statement is %r and '\n 'the SQL statements on which explain plans can be run are %r.',\n dbapi2_module, database_name, quoting_style, explain_query,\n explain_stmts)\n\n dbapi2_module._nr_database_name = database_name\n dbapi2_module._nr_quoting_style = quoting_style\n dbapi2_module._nr_explain_query = explain_query\n dbapi2_module._nr_explain_stmts = explain_stmts\n dbapi2_module._nr_instance_name = instance_name\n\nclass DatabaseTrace(TimeTrace):\n\n def __init__(self, transaction, sql, dbapi2_module=None,\n connect_params=None, cursor_params=None,\n sql_parameters=None, execute_params=None):\n\n super(DatabaseTrace, self).__init__(transaction)\n\n if transaction:\n self.sql = transaction._intern_string(sql)\n else:\n self.sql = sql\n\n self.dbapi2_module = dbapi2_module\n\n self.connect_params = connect_params\n self.cursor_params = cursor_params\n self.sql_parameters = sql_parameters\n self.execute_params = execute_params\n\n def __repr__(self):\n return '<%s %s>' % (self.__class__.__name__, dict(\n sql=self.sql, dbapi2_module=self.dbapi2_module))\n\n def finalize_data(self, transaction, exc=None, value=None, tb=None):\n self.stack_trace = None\n\n connect_params = None\n cursor_params = None\n sql_parameters = None\n execute_params = None\n\n settings = transaction.settings\n transaction_tracer = settings.transaction_tracer\n agent_limits = settings.agent_limits\n\n if (transaction_tracer.enabled and settings.collect_traces and\n transaction_tracer.record_sql != 'off'):\n if self.duration >= transaction_tracer.stack_trace_threshold:\n if (transaction._stack_trace_count <\n agent_limits.slow_sql_stack_trace):\n self.stack_trace = [transaction._intern_string(x) for\n x in traceback.format_stack()]\n transaction._stack_trace_count += 1\n\n\n # Only remember all the params for the calls if know\n # there is a chance we will need to do an explain\n # plan. We never allow an explain plan to be done if\n # an exception occurred in doing the query in case\n # doing the explain plan with the same inputs could\n # cause further problems.\n\n if (exc is None and transaction_tracer.explain_enabled and\n self.duration >= transaction_tracer.explain_threshold and\n self.connect_params is not None):\n if (transaction._explain_plan_count <\n agent_limits.sql_explain_plans):\n connect_params = self.connect_params\n cursor_params = self.cursor_params\n sql_parameters = self.sql_parameters\n execute_params = self.execute_params\n transaction._explain_plan_count += 1\n\n self.sql_format = transaction_tracer.record_sql\n\n self.connect_params = connect_params\n self.cursor_params = cursor_params\n self.sql_parameters = sql_parameters\n self.execute_params = execute_params\n\n def create_node(self):\n return DatabaseNode(dbapi2_module=self.dbapi2_module, sql=self.sql,\n children=self.children, start_time=self.start_time,\n end_time=self.end_time, duration=self.duration,\n exclusive=self.exclusive, stack_trace=self.stack_trace,\n sql_format=self.sql_format, connect_params=self.connect_params,\n cursor_params=self.cursor_params,\n sql_parameters=self.sql_parameters,\n execute_params=self.execute_params)\n\n def terminal_node(self):\n return True\n\ndef DatabaseTraceWrapper(wrapped, sql, dbapi2_module=None):\n\n def _nr_database_trace_wrapper_(wrapped, instance, args, kwargs):\n transaction = current_transaction()\n\n if transaction is None:\n return wrapped(*args, **kwargs)\n\n if callable(sql):\n if instance is not None:\n _sql = sql(instance, *args, **kwargs)\n else:\n _sql = sql(*args, **kwargs)\n else:\n _sql = sql\n\n with DatabaseTrace(transaction, _sql, dbapi2_module):\n return wrapped(*args, **kwargs)\n\n return FunctionWrapper(wrapped, _nr_database_trace_wrapper_)\n\ndef database_trace(sql, dbapi2_module=None):\n return functools.partial(DatabaseTraceWrapper, sql=sql,\n dbapi2_module=dbapi2_module)\n\ndef wrap_database_trace(module, object_path, sql, dbapi2_module=None):\n wrap_object(module, object_path, DatabaseTraceWrapper,\n (sql, dbapi2_module))\n","repo_name":"devs1991/test_edx_docmode","sub_path":"venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/api/database_trace.py","file_name":"database_trace.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42517100949","text":"from __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"preview\"],\n \"supported_by\": \"community\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: oci_network_drg_route_distribution_statements_actions\nshort_description: Perform actions on a DrgRouteDistributionStatements resource in Oracle Cloud Infrastructure\ndescription:\n - Perform actions on a DrgRouteDistributionStatements resource in Oracle Cloud Infrastructure\n - For I(action=add), adds one or more route distribution statements to the specified route distribution.\n - For I(action=remove), removes one or more route distribution statements from the specified route distribution's map.\n - For I(action=update), updates one or more route distribution statements in the specified route distribution.\nversion_added: \"2.9.0\"\nauthor: Oracle (@oracle)\noptions:\n statement_ids:\n description:\n - The Oracle-assigned ID of each route distribution to remove.\n - Applicable only for I(action=remove).\n type: list\n elements: str\n drg_route_distribution_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the route distribution.\n type: str\n aliases: [\"id\"]\n required: true\n statements:\n description:\n - The collection of route distribution statements to insert into the route distribution.\n - Required for I(action=add), I(action=update).\n type: list\n elements: dict\n suboptions:\n action:\n description:\n - \"Accept: import/export the route \\\\\"as is\\\\\"\"\n type: str\n choices:\n - \"ACCEPT\"\n id:\n description:\n - The Oracle-assigned ID of each route distribution statement to be updated.\n type: str\n match_criteria:\n description:\n - The action is applied only if all of the match criteria is met.\n type: list\n elements: dict\n suboptions:\n drg_attachment_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the DRG attachment.\n - Required when match_type is 'DRG_ATTACHMENT_ID'\n type: str\n attachment_type:\n description:\n - The type of the network resource to be included in this match. A match for a network type implies that all\n DRG attachments of that type insert routes into the table.\n - Required when match_type is 'DRG_ATTACHMENT_TYPE'\n type: str\n choices:\n - \"VCN\"\n - \"VIRTUAL_CIRCUIT\"\n - \"REMOTE_PEERING_CONNECTION\"\n - \"IPSEC_TUNNEL\"\n match_type:\n description:\n - The type of the match criteria for a route distribution statement.\n type: str\n choices:\n - \"DRG_ATTACHMENT_ID\"\n - \"DRG_ATTACHMENT_TYPE\"\n - \"MATCH_ALL\"\n required: true\n priority:\n description:\n - This field is used to specify the priority of each statement in a route distribution.\n The priority will be represented as a number between 0 and 65535 where a lower number\n indicates a higher priority. When a route is processed, statements are applied in the order\n defined by their priority. The first matching rule dictates the action that will be taken\n on the route.\n type: int\n action:\n description:\n - The action to perform on the DrgRouteDistributionStatements.\n type: str\n required: true\n choices:\n - \"add\"\n - \"remove\"\n - \"update\"\nextends_documentation_fragment: [ oracle.oci.oracle ]\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: Perform action add on drg_route_distribution_statements\n oci_network_drg_route_distribution_statements_actions:\n # required\n drg_route_distribution_id: \"ocid1.drgroutedistribution.oc1..xxxxxxEXAMPLExxxxxx\"\n statements:\n - # optional\n action: ACCEPT\n id: \"ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\"\n match_criteria:\n - # required\n drg_attachment_id: \"ocid1.drgattachment.oc1..xxxxxxEXAMPLExxxxxx\"\n match_type: DRG_ATTACHMENT_ID\n priority: 56\n action: add\n\n- name: Perform action remove on drg_route_distribution_statements\n oci_network_drg_route_distribution_statements_actions:\n # required\n drg_route_distribution_id: \"ocid1.drgroutedistribution.oc1..xxxxxxEXAMPLExxxxxx\"\n action: remove\n\n # optional\n statement_ids: [ \"statement_ids_example\" ]\n\n- name: Perform action update on drg_route_distribution_statements\n oci_network_drg_route_distribution_statements_actions:\n # required\n drg_route_distribution_id: \"ocid1.drgroutedistribution.oc1..xxxxxxEXAMPLExxxxxx\"\n statements:\n - # optional\n action: ACCEPT\n id: \"ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\"\n match_criteria:\n - # required\n drg_attachment_id: \"ocid1.drgattachment.oc1..xxxxxxEXAMPLExxxxxx\"\n match_type: DRG_ATTACHMENT_ID\n priority: 56\n action: update\n\n\"\"\"\n\nRETURN = \"\"\"\ndrg_route_distribution_statements:\n description:\n - Details of the DrgRouteDistributionStatements resource acted upon by the current operation\n returned: on success\n type: complex\n contains:\n match_criteria:\n description:\n - The action is applied only if all of the match criteria is met.\n If there are no match criteria in a statement, any input is considered a match and the action is applied.\n returned: on success\n type: complex\n contains:\n drg_attachment_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the DRG attachment.\n returned: on success\n type: str\n sample: \"ocid1.drgattachment.oc1..xxxxxxEXAMPLExxxxxx\"\n attachment_type:\n description:\n - The type of the network resource to be included in this match. A match for a network type implies that all\n DRG attachments of that type insert routes into the table.\n returned: on success\n type: str\n sample: VCN\n match_type:\n description:\n - The type of the match criteria for a route distribution statement.\n returned: on success\n type: str\n sample: DRG_ATTACHMENT_TYPE\n action:\n description:\n - \"`ACCEPT` indicates the route should be imported or exported as-is.\"\n returned: on success\n type: str\n sample: ACCEPT\n priority:\n description:\n - This field specifies the priority of each statement in a route distribution.\n Priorities must be unique within a particular route distribution.\n The priority will be represented as a number between 0 and 65535 where a lower number\n indicates a higher priority. When a route is processed, statements are applied in the order\n defined by their priority. The first matching rule dictates the action that will be taken\n on the route.\n returned: on success\n type: int\n sample: 56\n id:\n description:\n - The Oracle-assigned ID of the route distribution statement.\n returned: on success\n type: str\n sample: \"ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\"\n sample: {\n \"match_criteria\": [{\n \"drg_attachment_id\": \"ocid1.drgattachment.oc1..xxxxxxEXAMPLExxxxxx\",\n \"attachment_type\": \"VCN\",\n \"match_type\": \"DRG_ATTACHMENT_TYPE\"\n }],\n \"action\": \"ACCEPT\",\n \"priority\": 56,\n \"id\": \"ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\"\n }\n\"\"\"\n\nfrom ansible_collections.oracle.oci.plugins.module_utils import (\n oci_common_utils,\n oci_wait_utils,\n)\nfrom ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (\n OCIActionsHelperBase,\n OCIAnsibleModule,\n get_custom_class,\n)\n\ntry:\n from oci.core import VirtualNetworkClient\n from oci.core.models import AddDrgRouteDistributionStatementsDetails\n from oci.core.models import RemoveDrgRouteDistributionStatementsDetails\n from oci.core.models import UpdateDrgRouteDistributionStatementsDetails\n\n HAS_OCI_PY_SDK = True\nexcept ImportError:\n HAS_OCI_PY_SDK = False\n\n\nclass DrgRouteDistributionStatementsActionsHelperGen(OCIActionsHelperBase):\n \"\"\"\n Supported actions:\n add\n remove\n update\n \"\"\"\n\n @staticmethod\n def get_module_resource_id_param():\n return \"drg_route_distribution_id\"\n\n def get_module_resource_id(self):\n return self.module.params.get(\"drg_route_distribution_id\")\n\n def add(self):\n action_details = oci_common_utils.convert_input_data_to_model_class(\n self.module.params, AddDrgRouteDistributionStatementsDetails\n )\n return oci_wait_utils.call_and_wait(\n call_fn=self.client.add_drg_route_distribution_statements,\n call_fn_args=(),\n call_fn_kwargs=dict(\n drg_route_distribution_id=self.module.params.get(\n \"drg_route_distribution_id\"\n ),\n add_drg_route_distribution_statements_details=action_details,\n ),\n waiter_type=oci_wait_utils.NONE_WAITER_KEY,\n operation=\"{0}_{1}\".format(\n self.module.params.get(\"action\").upper(),\n oci_common_utils.ACTION_OPERATION_KEY,\n ),\n waiter_client=self.get_waiter_client(),\n resource_helper=self,\n wait_for_states=self.get_action_desired_states(\n self.module.params.get(\"action\")\n ),\n )\n\n def remove(self):\n action_details = oci_common_utils.convert_input_data_to_model_class(\n self.module.params, RemoveDrgRouteDistributionStatementsDetails\n )\n return oci_wait_utils.call_and_wait(\n call_fn=self.client.remove_drg_route_distribution_statements,\n call_fn_args=(),\n call_fn_kwargs=dict(\n drg_route_distribution_id=self.module.params.get(\n \"drg_route_distribution_id\"\n ),\n remove_drg_route_distribution_statements_details=action_details,\n ),\n waiter_type=oci_wait_utils.NONE_WAITER_KEY,\n operation=\"{0}_{1}\".format(\n self.module.params.get(\"action\").upper(),\n oci_common_utils.ACTION_OPERATION_KEY,\n ),\n waiter_client=self.get_waiter_client(),\n resource_helper=self,\n wait_for_states=self.get_action_desired_states(\n self.module.params.get(\"action\")\n ),\n )\n\n def update(self):\n action_details = oci_common_utils.convert_input_data_to_model_class(\n self.module.params, UpdateDrgRouteDistributionStatementsDetails\n )\n return oci_wait_utils.call_and_wait(\n call_fn=self.client.update_drg_route_distribution_statements,\n call_fn_args=(),\n call_fn_kwargs=dict(\n drg_route_distribution_id=self.module.params.get(\n \"drg_route_distribution_id\"\n ),\n update_drg_route_distribution_statements_details=action_details,\n ),\n waiter_type=oci_wait_utils.NONE_WAITER_KEY,\n operation=\"{0}_{1}\".format(\n self.module.params.get(\"action\").upper(),\n oci_common_utils.ACTION_OPERATION_KEY,\n ),\n waiter_client=self.get_waiter_client(),\n resource_helper=self,\n wait_for_states=self.get_action_desired_states(\n self.module.params.get(\"action\")\n ),\n )\n\n\nDrgRouteDistributionStatementsActionsHelperCustom = get_custom_class(\n \"DrgRouteDistributionStatementsActionsHelperCustom\"\n)\n\n\nclass ResourceHelper(\n DrgRouteDistributionStatementsActionsHelperCustom,\n DrgRouteDistributionStatementsActionsHelperGen,\n):\n pass\n\n\ndef main():\n module_args = oci_common_utils.get_common_arg_spec(\n supports_create=False, supports_wait=False\n )\n module_args.update(\n dict(\n statement_ids=dict(type=\"list\", elements=\"str\"),\n drg_route_distribution_id=dict(aliases=[\"id\"], type=\"str\", required=True),\n statements=dict(\n type=\"list\",\n elements=\"dict\",\n options=dict(\n action=dict(type=\"str\", choices=[\"ACCEPT\"]),\n id=dict(type=\"str\"),\n match_criteria=dict(\n type=\"list\",\n elements=\"dict\",\n options=dict(\n drg_attachment_id=dict(type=\"str\"),\n attachment_type=dict(\n type=\"str\",\n choices=[\n \"VCN\",\n \"VIRTUAL_CIRCUIT\",\n \"REMOTE_PEERING_CONNECTION\",\n \"IPSEC_TUNNEL\",\n ],\n ),\n match_type=dict(\n type=\"str\",\n required=True,\n choices=[\n \"DRG_ATTACHMENT_ID\",\n \"DRG_ATTACHMENT_TYPE\",\n \"MATCH_ALL\",\n ],\n ),\n ),\n ),\n priority=dict(type=\"int\"),\n ),\n ),\n action=dict(type=\"str\", required=True, choices=[\"add\", \"remove\", \"update\"]),\n )\n )\n\n module = OCIAnsibleModule(argument_spec=module_args, supports_check_mode=True)\n\n if not HAS_OCI_PY_SDK:\n module.fail_json(msg=\"oci python sdk required for this module.\")\n\n resource_helper = ResourceHelper(\n module=module,\n resource_type=\"drg_route_distribution_statements\",\n service_client_class=VirtualNetworkClient,\n namespace=\"core\",\n )\n\n result = resource_helper.perform_action(module.params.get(\"action\"))\n\n module.exit_json(**result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oracle/oci-ansible-collection","sub_path":"plugins/modules/oci_network_drg_route_distribution_statements_actions.py","file_name":"oci_network_drg_route_distribution_statements_actions.py","file_ext":"py","file_size_in_byte":15546,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"78"} +{"seq_id":"40399207593","text":"import os\nimport pickle\nimport numpy as np\n\ndef insert_zeros(path, num, save_path):\n files = os.listdir(path)\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n for file in files:\n with open(os.path.join(path, file), \"rb\") as f:\n data = pickle.load(f)\n data = sorted(data, key=lambda x: x[0])\n tmp = []\n last_id = (0, 0)\n last_conn = (data[0][0], data[0][1], 0, data[0][3])\n for i, d in enumerate(data):\n if d[0] != last_id[0]:\n cnt = i - last_id[1]\n if cnt < num:\n for _ in range(num - cnt):\n tmp.append(last_conn)\n last_id = (d[0], i)\n last_conn = (d[0], d[1], 0, d[3])\n tmp.append(d)\n with open(os.path.join(save_path, file), \"wb\") as f:\n pickle.dump(np.array(tmp), f)\n \n\n\n\n","repo_name":"SonpKing/CNN2SNN","sub_path":"hardware/insert_zeros.py","file_name":"insert_zeros.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15194991529","text":"import json\nimport redis\nimport gevent\nfrom flask import Blueprint\n\n\nws = Blueprint('ws', __name__, url_prefix='/ws')\nredis = redis.from_url('redis://127.0.0.1:6379')\n\n\nclass Chatroom(object):\n\n def __init__(self):\n self.clients = []\n self.pubsub = redis.pubsub()\n self.pubsub.subscribe('chat')\n\n def register(self, client):\n self.clients.append(client)\n\n def send(self, client, data):\n try:\n client.send(data.decode('utf-8'))\n except:\n self.clients.remove(client)\n\n def run(self):\n for message in self.pubsub.listen():\n if message['type'] == 'message':\n data = message.get('data')\n for client in self.clients:\n gevent.spawn(self.send, client, data)\n\n def start(self):\n gevent.spawn(self.run)\n\n\nchat = Chatroom()\nchat.start()\n\n\n@ws.route('/send')\ndef inbox(ws):\n while not ws.closed:\n gevent.sleep(0.1)\n message = ws.receive()\n\n if message:\n redis.publish('chat', message)\n\n\n@ws.route('/recv')\ndef outbox(ws):\n chat.register(ws)\n redis.publish('chat', json.dumps(dict(\n username='New user come in, people count',\n text=len(chat.clients)\n )))\n while not ws.closed:\n gevent.sleep(0.1)\n","repo_name":"shiyanlou/louplus-python","sub_path":"Python 进阶挑战(旧)/26-send-notification-when-new-user-come-in-living-room/simpledu/simpledu/handlers/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"78"} +{"seq_id":"20581278065","text":"from quantopian.algorithm import attach_pipeline, pipeline_output\nfrom quantopian.pipeline import Pipeline\nfrom quantopian.pipeline.factors import CustomFactor, SimpleMovingAverage, AverageDollarVolume, Latest, RSI\nfrom quantopian.pipeline.data.builtin import USEquityPricing\nfrom quantopian.pipeline.data import morningstar as mstar\nfrom quantopian.pipeline.filters.morningstar import IsPrimaryShare\nfrom quantopian.pipeline.data.psychsignal import stocktwits\nfrom quantopian.pipeline.classifiers.morningstar import Sector\nfrom quantopian.pipeline.data import morningstar\n\nimport numpy as np\nimport pandas as pd\n\nclass Value(CustomFactor):\n \n inputs = [morningstar.valuation_ratios.book_value_yield,\n morningstar.valuation_ratios.sales_yield,\n morningstar.valuation_ratios.fcf_yield] \n \n window_length = 1\n \n def compute(self, today, assets, out, book_value, sales, fcf):\n value_table = pd.DataFrame(index=assets)\n value_table[\"book_value\"] = book_value[-1]\n value_table[\"sales\"] = sales[-1]\n value_table[\"fcf\"] = fcf[-1]\n out[:] = value_table.rank().mean(axis=1)\n\nclass Momentum(CustomFactor):\n \n inputs = [USEquityPricing.close]\n window_length = 252\n \n def compute(self, today, assets, out, close): \n out[:] = close[-20] / close[0]\n\nclass MessageVolume(CustomFactor):\n inputs = [stocktwits.total_scanned_messages]\n window_length = 21\n def compute(self, today, assets, out, msgs):\n out[:] = -np.nansum(msgs, axis=0)\n \ndef make_pipeline():\n \"\"\"\n Create and return our pipeline.\n \n We break this piece of logic out into its own function to make it easier to\n test and modify in isolation.\n \n In particular, this function can be copy/pasted into research and run by itself.\n \"\"\"\n pipe = Pipeline()\n \n initial_screen = filter_universe()\n\n factors = {\n \"Message\": MessageVolume(mask=initial_screen),\n \"Momentum\": Momentum(mask=initial_screen),\n \"Value\": Value(mask=initial_screen),\n }\n \n clean_factors = None\n for name, factor in factors.items():\n if not clean_factors:\n clean_factors = factor.isfinite()\n else:\n clean_factors = clean_factors & factor.isfinite() \n \n combined_rank = None\n for name, factor in factors.items():\n if not combined_rank:\n combined_rank = factor.rank(mask=clean_factors)\n else:\n combined_rank += factor.rank(mask=clean_factors)\n pipe.add(combined_rank, 'factor')\n\n # Build Filters representing the top and bottom 200 stocks by our combined ranking system.\n # We'll use these as our tradeable universe each day.\n longs = combined_rank.percentile_between(80, 90)\n shorts = combined_rank.percentile_between(10, 20)\n \n pipe.set_screen(longs | shorts)\n \n pipe.add(longs, 'longs')\n pipe.add(shorts, 'shorts')\n return pipe\n\n\ndef initialize(context):\n context.long_leverage = 1.0\n context.short_leverage = -1.0\n context.spy = sid(8554)\n \n attach_pipeline(make_pipeline(), 'ranking_example')\n \n # Used to avoid purchasing any leveraged ETFs \n context.dont_buys = security_lists.leveraged_etf_list.current_securities(get_datetime())\n \n # Schedule my rebalance function\n schedule_function(func=rebalance, \n date_rule=date_rules.month_start (days_offset=0), \n time_rule=time_rules.market_open(hours=0,minutes=30), \n half_days=True)\n \n # Schedule a function to plot leverage and position count\n schedule_function(func=record_vars, \n date_rule=date_rules.every_day(), \n time_rule=time_rules.market_close(), \n half_days=True)\n\ndef before_trading_start(context, data):\n # Call pipeline_output to get the output\n # Note this is a dataframe where the index is the SIDs for all \n # securities to pass my screen and the columns are the factors which\n output = pipeline_output('ranking_example')\n ranks = output['factor']\n \n long_ranks = ranks[output['longs']].rank()\n short_ranks = ranks[output['shorts']].rank()\n\n context.long_weights = (long_ranks / long_ranks.sum())\n log.info(\"Long Weights:\")\n log.info(context.long_weights)\n \n context.short_weights = (short_ranks / short_ranks.sum())\n log.info(\"Short Weights:\")\n log.info(context.short_weights)\n \n context.active_portfolio = context.long_weights.index.union(context.short_weights.index)\n\n\ndef record_vars(context, data): \n \n # Record and plot the leverage, number of positions, and expsoure of our portfolio over time. \n record(num_positions=len(context.portfolio.positions),\n exposure=context.account.net_leverage, \n leverage=context.account.leverage)\n \n\n# This function is scheduled to run at the start of each month.\ndef rebalance(context, data):\n \"\"\"\n Allocate our long/short portfolio based on the weights supplied by\n context.long_weights and context.short_weights.\n \"\"\"\n # Order our longs.\n log.info(\"ordering longs\")\n for long_stock, long_weight in context.long_weights.iterkv():\n if data.can_trade(long_stock):\n if long_stock in context.dont_buys:\n continue\n order_target_percent(long_stock, context.long_leverage * long_weight)\n \n # Order our shorts.\n log.info(\"ordering shorts\")\n for short_stock, short_weight in context.short_weights.iterkv():\n if data.can_trade(short_stock):\n if short_stock in context.dont_buys:\n continue\n order_target_percent(short_stock, context.short_leverage * short_weight)\n \n # Sell any positions in assets that are no longer in our target portfolio.\n for security in context.portfolio.positions:\n if data.can_trade(security): # Work around inability to sell de-listed stocks.\n if security not in context.active_portfolio:\n order_target_percent(security, 0)\n \ndef filter_universe(): \n \"\"\"\n 9 filters:\n 1. common stock\n 2 & 3. not limited partnership - name and database check\n 4. database has fundamental data\n 5. not over the counter\n 6. not when issued\n 7. not depository receipts\n 8. primary share\n 9. high dollar volume\n Check Scott's notebook for more details.\n \"\"\"\n common_stock = mstar.share_class_reference.security_type.latest.eq('ST00000001')\n not_lp_name = ~mstar.company_reference.standard_name.latest.matches('.* L[\\\\. ]?P\\.?$')\n not_lp_balance_sheet = mstar.balance_sheet.limited_partnership.latest.isnull()\n have_data = mstar.valuation.market_cap.latest.notnull()\n not_otc = ~mstar.share_class_reference.exchange_id.latest.startswith('OTC')\n not_wi = ~mstar.share_class_reference.symbol.latest.endswith('.WI')\n not_depository = ~mstar.share_class_reference.is_depositary_receipt.latest\n primary_share = IsPrimaryShare()\n \n # Combine the above filters.\n tradable_filter = (common_stock & not_lp_name & not_lp_balance_sheet &\n have_data & not_otc & not_wi & not_depository & primary_share)\n \n high_volume_tradable = AverageDollarVolume(\n window_length=21,\n mask=tradable_filter\n ).rank(ascending=False) < 500\n\n mask = high_volume_tradable\n \n return mask","repo_name":"FrankPSch/quantopian_algos","sub_path":"algorithms/FrankPSch/PsychSignal Sample Algorithm 2.py","file_name":"PsychSignal Sample Algorithm 2.py","file_ext":"py","file_size_in_byte":7477,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"78"} +{"seq_id":"74344307132","text":"import cv2\nimport numpy as np\n#IrisFeatureExtraction.py: filtering the iris and extracting features;\ndef IrisFeatureExtraction(enhanced_iris):\n # Compute the kernel of the defined spatial filter\n def kernel(x, y, f, sigma_x, sigma_y):\n M = np.cos(2 * np.pi * f * np.sqrt(x**2 + y**2))\n G = M * (1 / (2 * np.pi * sigma_x * sigma_y)) * np.exp(- (x**2 / (2 * sigma_x**2) + y**2 / (2 * sigma_y**2)))\n return G\n # Filter the iris image with the given spatial filter\n def filter_iris(ROI, f, sigma_x, sigma_y):\n height, width = ROI.shape\n # Calculate the spatial filter\n spatial_filter = np.array([[kernel(x - width // 2, y - height // 2, f, sigma_x, sigma_y) \n for x in range(width)] \n for y in range(height)])\n \n return cv2.filter2D(ROI, -1, spatial_filter)\n # Extract statistical features from 8x8 blocks\n def extract_features(filtered_irises):\n features = []\n for filtered_iris in filtered_irises:\n height, width = filtered_iris.shape\n block_size = 8\n for y in range(0, height, block_size):\n for x in range(0, width, block_size):\n block = filtered_iris[y:y+block_size, x:x+block_size]\n mean = np.mean(block)\n var = np.var(block)\n features.extend([mean, var])\n return np.array(features)\n # Scale the iris that contains useful information\n ROI = enhanced_iris[:48, :]\n # Set number of the frequency\n f = 3\n # Set number of sigma_x and sigma_y for the two channels\n sigmas = [(5, 7), (8, 6)]\n # Filter the ROI using both channels\n filtered_irises = [filter_iris(ROI, f, sigma_x, sigma_y) for sigma_x, sigma_y in sigmas]\n # Extract and return features\n return extract_features(filtered_irises)","repo_name":"wowNorth0516/Iris_recogonition","sub_path":"IrisFeatureExtraction.py","file_name":"IrisFeatureExtraction.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33081355737","text":"\"\"\"\nServices to do with the necessities of life.\n\"\"\"\n\nfrom dexter.service import Service, Handler, Result\nfrom dexter.core.log import LOG\nfrom dexter.core.util import (fuzzy_list_range,\n parse_number,\n to_alphanumeric)\nfrom fuzzywuzzy.process import fuzz\n\nimport json\nimport os\n\n# ----------------------------------------------------------------------\n\nclass _ListAddHandler(Handler):\n \"\"\"\n Add something to the shopping list.\n \"\"\"\n def __init__(self, service, tokens, what, belief):\n \"\"\"\n @see Handler.__init__()\n \"\"\"\n super().__init__(service, tokens, belief, True)\n self._what = what\n\n\n def handle(self):\n \"\"\"\n @see Handler.handle()\n \"\"\"\n try:\n self.service.add(self._what)\n return Result(\n self,\n \"Okay, I added %s to your shopping list\" % (\n ' '.join(self._what)\n ),\n True,\n False\n )\n except Exception as e:\n LOG.error(\"Could not add %s to the shopping list: %s\",\n ' '.join(self._what),\n e)\n return Result(\n self,\n \"Sorry, there was a problem adding %s to your shopping list\" % (\n ' '.join(self._what)\n ),\n False,\n False\n )\n\n\nclass _ListRemoveHandler(Handler):\n \"\"\"\n Remove something from the shopping list.\n \"\"\"\n def __init__(self, service, tokens, what, belief):\n \"\"\"\n @see Handler.__init__()\n \"\"\"\n super().__init__(service, tokens, belief, True)\n self._what = what\n\n\n def handle(self):\n \"\"\"\n @see Handler.handle()\n \"\"\"\n try:\n self.service.remove(self._what)\n return Result(\n self,\n \"Okay, I removed %s from your shopping list\" % (\n ' '.join(self._what)\n ),\n True,\n False\n )\n except Exception as e:\n LOG.error(\"Could not add %s to the shopping list: %s\",\n ' '.join(self._what),\n e)\n return Result(\n self,\n \"Sorry, there was a problem adding %s to your shopping list\" % (\n ' '.join(self._what)\n ),\n False,\n False\n )\n\n\nclass _ListClearHandler(Handler):\n \"\"\"\n Clear the shopping list.\n \"\"\"\n def __init__(self, service, tokens, what, belief):\n \"\"\"\n @see Handler.__init__()\n \"\"\"\n super().__init__(service, tokens, belief, True)\n\n\n def handle(self):\n \"\"\"\n @see Handler.handle()\n \"\"\"\n self.service.clear()\n return Result(\n self,\n \"Okay, I cleared your shopping list\",\n True,\n False\n )\n\n\nclass _GetListHandler(Handler):\n \"\"\"\n Get the shopping list.\n \"\"\"\n def __init__(self, service, tokens, belief):\n \"\"\"\n @see Handler.__init__()\n \"\"\"\n super().__init__(service, tokens, belief, True)\n\n\n def handle(self):\n \"\"\"\n @see Handler.handle()\n \"\"\"\n lst = self.service.get()\n if lst:\n # We have stuff!\n response = \\\n \"Your shopping list contains:\\n%s.\" % (\n ',\\n'.join(\n \"%d %s\" % (\n v,\n (self.service.singularise(k) if v == 1 else\n self.service.pluralise (k))\n ) for (k, v) in lst.items()\n )\n )\n else:\n # Nothing to see here, move along please...\n response = \"Your shopping list is empty\"\n\n # And give it back\n return Result(self, response, True, False)\n\n\nclass ShoppingListService(Service):\n \"\"\"\n A service which simply parrots back what was given to it.\n \"\"\"\n # Some wordlists which we will want to match\n _MY_LIST = 'my shopping list' .split()\n _ON_TO = 'on to my shopping list' .split()\n _TO = 'to my shopping list' .split()\n _ON = 'on my shopping list' .split()\n _OFF = 'off my shopping list' .split()\n _FROM = 'from my shopping list' .split()\n _WHATS_ON = 'whats on my shopping list'.split()\n\n # Things which might be pluralised\n _PLURALS_OF = (\n ('packets of ', 'packet of '),\n ('bags of ', 'bag of '),\n ('boxes of ', 'box of '),\n ('cans of ', 'can of '),\n ('tins of ', 'tin of '),\n ('pints of ', 'pint of '),\n ('gallons of ', 'gallon of '),\n )\n\n def __init__(self, state, filename=None):\n \"\"\"\n @see Service.__init__()\n \"\"\"\n super().__init__(\"ShoppingList\", state)\n\n self._filename = str(filename) if filename else None\n self._list = dict()\n\n\n def add(self, what):\n \"\"\"\n Add items to the shopping list.\n \"\"\"\n (item, count) = self._normalise(what)\n if count is None:\n count = 1\n count = self._list.get(item, 0) + count\n self._list[item] = count\n self._save()\n\n\n def remove(self, what):\n \"\"\"\n Remove items from the shopping list.\n \"\"\"\n (item, count) = self._normalise(what)\n if item in self._list:\n if count is None:\n del self._list[item]\n else:\n count = self._list.get(item, 0) - count\n if count > 0:\n self._list[item] = count\n else:\n del self._list[item]\n\n self._save()\n\n\n def clear(self):\n \"\"\"\n Empty the shopping list.\n \"\"\"\n self._list = dict()\n self._save()\n\n\n def get(self):\n \"\"\"\n Get the shopping list. Do not mutate this.\n \"\"\"\n return self._list\n\n\n def evaluate(self, tokens):\n \"\"\"\n @see Service.evaluate()\n \"\"\"\n # A reasonable matching threshold\n threshold = 75\n\n # Render to lower-case, for matching purposes.\n words = self._words(tokens)\n\n # Look for \"Add blah to my shopping list\" or \"Put blah on [to] my shopping\n # list\"\n match = None\n for (action, phrases, handler) in (\n (\"add\", (self._ON_TO, self._TO, self._ON), _ListAddHandler),\n (\"put\", (self._ON_TO, self._TO, self._ON), _ListAddHandler),\n (\"take\", (self._OFF, self._FROM ), _ListRemoveHandler),\n (\"remove\", (self._OFF, self._FROM ), _ListRemoveHandler),\n (\"delete\", (self._OFF, self._FROM ), _ListRemoveHandler),\n (\"clear\", (self._MY_LIST, ), _ListClearHandler),\n (\"reset\", (self._MY_LIST, ), _ListClearHandler),\n ):\n if len(words) >= 4 and words[0] == action:\n # Look to match the phrase\n for phrase in phrases:\n try:\n # Look for the prefix in the words\n (start, end, score) = fuzzy_list_range(words, phrase)\n LOG.debug(\"%s matches %s with from %d to %d with score %d\",\n phrase, words, start, end, score)\n if score >= threshold and \\\n (match is None or match[2] < score):\n LOG.debug(\"Matched '%s' with score %d for '%s'\",\n ' '.join(phrase),\n score,\n ' '.join(words))\n match = (handler, words[1:start], score)\n\n except ValueError:\n pass\n\n # Did we get something?\n if match:\n (handler, what, score) = match\n return handler(self, tokens, what, score/100)\n\n # Now look for a direct query\n for phrase in (['whats', 'on'], ['tell', 'me']):\n phrase = phrase + self._MY_LIST\n try:\n # Look for the prefix in the words\n (start, end, score) = fuzzy_list_range(words, phrase)\n LOG.debug(\"%s matches %s with from %d to %d with score %d\",\n phrase, words, start, end, score)\n if score >= threshold and \\\n start == 0 and end == len(words):\n LOG.debug(\"Matched '%s' with score %d for '%s'\",\n ' '.join(phrase),\n score,\n ' '.join(words))\n return _GetListHandler(self, tokens, score/100)\n\n except ValueError:\n pass\n\n # No match\n return None\n\n\n def pluralise(self, phrase_):\n \"\"\"\n Take the string phrase and ensure that it is plural.\n \"\"\"\n # Null breeds null\n if not phrase_:\n return phrase_\n\n # Normalise\n words = self._listify(phrase_)\n phrase = ' '.join(words)\n\n # Handle 'blah of ...'\n for (plural, singular) in self._PLURALS_OF:\n if phrase.startswith(singular):\n return phrase.replace(singular, plural)\n\n # Just look for an 's' at the end and assume that it's fine\n if words[-1].endswith('s'):\n return phrase\n\n # Else we need to put an 's' on the end\n word = words[-1]\n\n # Special plural forms\n if word.endswith('tch') or word.endswith('ss'):\n # 'watch' -> 'watches'\n # 'baroness' -> 'baronesses'\n words[-1] = word + 'es'\n elif word.endswith('y'):\n word = word[:-1] + 'ies'\n else:\n # Just add the 's'\n words[-1] = word + 's'\n\n # Rebuild the phrase and give it back\n return ' '.join(words)\n\n\n def singularise(self, phrase_):\n \"\"\"\n Take the phrase and ensure that it is singularise.\n \"\"\"\n # Null breeds null\n if not phrase_:\n return phrase_\n\n # Normalise\n words = self._listify(phrase_)\n phrase = ' '.join(words)\n\n # Handle 'blahs of ...'\n for (plural, singular) in self._PLURALS_OF:\n if phrase.startswith(plural):\n return phrase.replace(plural, singular)\n\n # Plurals end with 's' most of the time; we currently don't handle\n # things like 'funga -> fungi'\n if words[-1].endswith('s'):\n word = words[-1]\n # Watch out for things like \"needless\" becoming \"needles\"\n if word.endswith('ss'):\n # You can't touch this (dah dah-dah dum, tsch-tsch, dah-daaah\n # dum)\n pass\n\n elif word.endswith('tches') or word.endswith('sses'):\n # 'watches' -> 'watch'\n # 'baronesses' -> 'baroness'\n words[-1] = word[:-2]\n\n elif word.endswith('ies'):\n if word in ('cookies',):\n # Just strip the 's' from these\n words[-1] = word[:-1]\n else:\n words[-1] = word[:-3] + 'y'\n else:\n # Just strip the 's'\n words[-1] = word[:-1]\n\n # Rebuild the phrase and give it back\n return ' '.join(words)\n\n # Probably fine then, give back the original\n return phrase_\n\n\n def _start(self):\n \"\"\"\n See `Component._start()`.\n \"\"\"\n self._load()\n\n\n def _stop(self):\n \"\"\"\n See `Component._stop()`.\n \"\"\"\n self._saveload()\n\n\n def _listify(self, phrase):\n \"\"\"\n Take a pharse and ensure it's a list of words.\n \"\"\"\n if isinstance(phrase, str):\n return phrase.split()\n elif not isinstance(phrase, list):\n return list(phrase)\n else:\n return phrase\n\n\n def _normalise(self, phrase):\n \"\"\"\n Take a phrase and ensure it's something which can be parsed and used as a\n key in our shopping list dictionary. This means turning articles like\n 'a' into '1', number words into their numeric form, and ensuring\n everything is singular.\n\n We then break these up into an amount and the item name and give them\n back.\n \"\"\"\n # Null breeds null\n if not phrase:\n return phrase\n\n # Render it into a list of words, and as a string\n words = self._listify(phrase)\n phrase = ' '.join(words)\n\n # Process the phrase into normlised words\n words_ = []\n for word in words:\n # To lower case\n word = word.lower()\n\n # Get rid of punctuation etc.\n word = to_alphanumeric(word)\n\n # Turn things into numbers\n if word in ('a', 'an'):\n word = '1'\n else:\n value = parse_number(word)\n if value is not None:\n word = str(value)\n\n # Safe to append\n words_.append(word)\n words = words_\n\n # Preprocess for groups of words which have meaning. Note that any 'a'\n # will have been turned into a '1' above.\n and_a_half = (('and', '1', 'half'), '.5')\n and_a_quarter = (('and', '1', 'quarter'), '.25')\n for (group, frac) in (and_a_half, and_a_quarter):\n try:\n # See if we have it\n (start, end, score) = fuzzy_list_range(words, group)\n if start > 0 and end < (len(words)-1) and score > 75:\n # Splice in the fraction\n words = (words[:start-1] +\n [words[start-1] + frac] +\n words[end:])\n except ValueError:\n pass\n\n # And '1 dozen' becomes 12 etc.\n for i in range(1, 13):\n try:\n # See if we have it\n (start, end, score) = fuzzy_list_range(words, (str(i), 'dozen'))\n if end < len(words) and score > 75:\n # Replace it\n words = (words[:start] + [str(12 * i)] + words[end:])\n except ValueError:\n pass\n\n # Handle things like \"packets of crisps\" or \"bags of chips\"\n try:\n # Look for 'of'\n idx = words.index('of')\n if idx > 0 and idx < len(words-1):\n # Make sure we have \"bag of chips\" not \"bags of chips\"\n words[idx-1] = self.singularise(words[idx-1])\n except ValueError:\n # No \"of\" in there, we'll assume that the last entry is what needs\n # to be singularised\n words[-1] = self.singularise(words[-1])\n\n # We should have a normalised set of words now. Now we check to see if\n # the first word was a number.\n try:\n # Get it as a number, else throw an exception. Likely we want it to\n # be an int if we can.\n count = float(words[0])\n if int(count) == count:\n count = int(count)\n\n # And the rest of the words make up the key\n what = ' '.join(words[1:])\n\n except ValueError:\n # Nope, so no number that we know of\n count = None\n what = ' '.join(words)\n\n # Okay, we now have an item and its amount, so give them back\n return (what, count)\n\n\n def _save(self):\n \"\"\"\n Save our current list to disk, if we have a filename.\n \"\"\"\n if self._filename:\n try:\n with open(self._filename, 'w') as fh:\n json.dump(self._list, fh)\n LOG.info(\"Saved to %s\", self._filename)\n except Exception as e:\n LOG.error(\"Error saving to %s: %s\", self._filename, e)\n\n\n def _load(self):\n \"\"\"\n Load our list from disk, if we have a filename.\n \"\"\"\n if self._filename and os.path.exists(self._filename):\n try:\n with open(self._filename, 'r') as fh:\n self._list = json.load(fh)\n LOG.info(\"Loaded from %s\", self._filename)\n except Exception as e:\n LOG.error(\"Error loading from %s: %s\", self._filename, e)\n","repo_name":"iamsrp/dexter","sub_path":"service/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":16746,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"78"} +{"seq_id":"450711479","text":"# from celery.decorators import task\nfrom celery import shared_task\nfrom django.conf import settings\nimport requests\nimport grequests\nimport celery\nimport datetime\nfrom celery.task.base import periodic_task\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom . import models\n\n\n@shared_task()\ndef fetch_and_insert_in_db(x):\n if x is None:\n return None\n soup = BeautifulSoup(x.text, \"html.parser\")\n try:\n title = soup.select_one('head > title').text.replace(\"Poorly Drawn Lines – \", \"\")\n image = soup.select_one('.post > p > img').attrs['src']\n description = soup.select_one('.post > p > img').attrs['alt']\n except AttributeError as e:\n print(x)\n\n return None\n\n if not models.Comic.objects.filter(title=title).exists():\n comic = models.Comic()\n comic.description = description\n comic.title = title\n comic.image = image\n comic.link = x.url\n\n return comic\n else:\n\n return None\n\n\n@periodic_task(run_every=datetime.timedelta(hours=5))\ndef fetch_comics():\n print(\"fething ...............\")\n response = requests.get(settings.ARCHIVE_URL)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n all_comics = soup.select('.content > ul > li > a')\n\n # to_fetch = grequests.map((grequests.get(x.attrs['href']) for x in all_comics))\n # TODO: 502/503 while running asyncronoously use syncronous requets?\n\n print(\"fetched now saving....\")\n\n for index, x in enumerate(all_comics):\n res = requests.get(x.attrs['href'])\n comic = fetch_and_insert_in_db(res)\n\n if comic:\n comic.save()\n\n print(index)\n\n\n# @periodic_task(run_every=datetime.timedelta(hours=5))\n# def fetch_comics():\n# print(\"fething ...............\")\n# response = requests.get(settings.ARCHIVE_URL)\n#\n# soup = BeautifulSoup(response.text, \"html.parser\")\n#\n# all_comics = soup.select('.content > ul > li > a')\n#\n# # to_fetch = grequests.map((grequests.get(x.attrs['href']) for x in all_comics))\n# # TODO: 502/503 while running asyncronoously use syncronous requets?\n# to_fetch = [requests.get(x.attrs['href']) for x in all_comics]\n# print(\"fetched now saving....\")\n#\n# comiccs = [fetch_and_insert_in_db(x) for x in to_fetch]\n# comicsave = [x for x in comiccs if x is not None]\n# print(len(comicsave))\n#\n# models.Comic.objects.bulk_create(comicsave)\n\n\ndef f():\n print(\"fething ...............\")\n response = requests.get(settings.ARCHIVE_URL)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n all_comics = soup.select('.content > ul > li > a')\n\n # to_fetch = grequests.map((grequests.get(x.attrs['href']) for x in all_comics))\n # TODO: 502/503 while running asyncronoously use syncronous requets?\n\n print(\"fetched now saving....\")\n\n for index, x in enumerate(all_comics):\n res = requests.get(x.attrs['href'])\n comic = fetch_and_insert_in_db(res)\n\n if comic:\n comic.save()\n\n print(index)\n","repo_name":"othreecodes/Poorly-Drawn-API","sub_path":"poorlydrawn/api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"41020175126","text":"#!/usr/bin/env python3 \n\nimport socket\nimport sys\nimport time\n\nHOST = '127.0.0.1'\nPORT = 65432\n\nOC = (input('Enter an operator (+, -, *, /): ')).encode()\nfirst_num = (input('Enter first number: ')).encode()\nsecond_num = (input('Enter second number: ')).encode()\n\n# Creates socket\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# Sends data stream\ns.sendto(OC, (HOST, PORT))\ns.sendto(first_num, (HOST, PORT))\ns.sendto(second_num, (HOST, PORT))\n\n# Receives status code, result\n\nd = 0.1\nwhile d < 2:\n status, addr = s.recvfrom(1024)\n result, addr = s.recvfrom(1024)\n\n # Timer\n time.sleep(d)\n if status:\n # Successful \n print('Status: ' + status.decode())\n print('Result: ' + result.decode())\n break\n\n # Timer expires\n d = 2*d\n\nif d >= 2:\n raise Exception(\"Dropped packet!\")\n\ns.close()\n \n","repo_name":"thomasbui1997/tcp_udp","sub_path":"udp2_client.py","file_name":"udp2_client.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71050115451","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier, ExtraTreesClassifier,BaggingClassifier\n\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom lightgbm import LGBMClassifier\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\n\n\nimport warnings\n\nwarnings.simplefilter(\"ignore\")\n\nimport os\n\nprint(os.listdir(\"../input\"))\n\n\n\n# Any results you write to the current directory are saved as output.\nX_train = pd.read_csv(\"../input/X_train.csv\")\n\nX_test = pd.read_csv(\"../input/X_test.csv\")\n\ny_train = pd.read_csv(\"../input/y_train.csv\")\nX_train.head()\ny_train.head()\nfrom scipy.stats import kurtosis\n\nfrom scipy.stats import skew\n\ndef quaternion_to_euler(x, y, z, w):\n\n import math\n\n t0 = +2.0 * (w * x + y * z)\n\n t1 = +1.0 - 2.0 * (x * x + y * y)\n\n X = math.atan2(t0, t1)\n\n\n\n t2 = +2.0 * (w * y - z * x)\n\n t2 = +1.0 if t2 > +1.0 else t2\n\n t2 = -1.0 if t2 < -1.0 else t2\n\n Y = math.asin(t2)\n\n\n\n t3 = +2.0 * (w * z + x * y)\n\n t4 = +1.0 - 2.0 * (y * y + z * z)\n\n Z = math.atan2(t3, t4)\n\n\n\n return X, Y, Z\n\n\n\ndef generate_features(data):\n\n new_data = pd.DataFrame()\n\n data['total_angular_velocity'] = (data['angular_velocity_X'] ** 2 + data['angular_velocity_Y'] ** 2 + data['angular_velocity_Z'] ** 2) ** 0.5\n\n data['total_linear_acceleration'] = (data['linear_acceleration_X'] ** 2 + data['linear_acceleration_Y'] ** 2 + data['linear_acceleration_Z'] ** 2) ** 0.5\n\n \n\n data['acc_vs_vel'] = data['total_linear_acceleration'] / data['total_angular_velocity']\n\n \n\n x, y, z, w = data['orientation_X'].tolist(), data['orientation_Y'].tolist(), data['orientation_Z'].tolist(), data['orientation_W'].tolist()\n\n nx, ny, nz = [], [], []\n\n for i in range(len(x)):\n\n xx, yy, zz = quaternion_to_euler(x[i], y[i], z[i], w[i])\n\n nx.append(xx)\n\n ny.append(yy)\n\n nz.append(zz)\n\n \n\n data['euler_x'] = nx\n\n data['euler_y'] = ny\n\n data['euler_z'] = nz\n\n \n\n data['total_angle'] = (data['euler_x'] ** 2 + data['euler_y'] ** 2 + data['euler_z'] ** 2) ** 5\n\n data['angle_vs_acc'] = data['total_angle'] / data['total_linear_acceleration']\n\n data['angle_vs_vel'] = data['total_angle'] / data['total_angular_velocity']\n\n \n\n def mean_change_of_abs_change(x):\n\n return np.mean(np.diff(np.abs(np.diff(x))))\n\n\n\n def mean_abs_change(x):\n\n return np.mean(np.abs(np.diff(x)))\n\n \n\n for col in data.columns:\n\n if col in ['row_id', 'series_id', 'measurement_number']:\n\n continue\n\n new_data[col + '_mean'] = data.groupby(['series_id'])[col].mean()\n\n new_data[col + '_min'] = data.groupby(['series_id'])[col].min()\n\n new_data[col + '_max'] = data.groupby(['series_id'])[col].max()\n\n new_data[col + '_std'] = data.groupby(['series_id'])[col].std()\n\n new_data[col + '_max_to_min'] = new_data[col + '_max'] / new_data[col + '_min']\n\n new_data[col + '_kurtosis'] = data.groupby('series_id')[col].apply(lambda x: kurtosis(x))\n\n new_data[col + '_skew'] = data.groupby('series_id')[col].apply(lambda x: skew(x))\n\n \n\n # 1st order derivative\n\n new_data[col + '_mean_abs_change'] = data.groupby('series_id')[col].apply(mean_abs_change)\n\n \n\n # 2nd order derivative\n\n new_data[col + '_mean_change_of_abs_change'] = data.groupby('series_id')[col].apply(mean_change_of_abs_change)\n\n \n\n new_data[col + '_abs_max'] = data.groupby('series_id')[col].apply(lambda x: np.max(np.abs(x)))\n\n new_data[col + '_abs_min'] = data.groupby('series_id')[col].apply(lambda x: np.min(np.abs(x)))\n\n\n\n return new_data\nX_train = generate_features(X_train)\n\nX_test = generate_features(X_test)\nlabel_encoder = LabelEncoder()\n\ny_train['surface'] = label_encoder.fit_transform(y_train['surface'])\nfolds = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\n# https://www.kaggle.com/emanueleamcappella/random-forest-hyperparameters-tuning\n\nclassifier = [RandomForestClassifier,AdaBoostClassifier, ExtraTreesClassifier,BaggingClassifier, DecisionTreeClassifier]\n\nclassifier_avg = []\n\nfor model in classifier:\n\n print(\"Model : {}\". format(model))\n\n submission_predictions = np.zeros((X_test.shape[0], 9))\n\n oof_predictions = np.zeros((X_train.shape[0]))\n\n score = 0\n\n for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train['surface'])):\n\n clf = model()\n\n clf.fit(X_train.iloc[trn_idx], y_train['surface'][trn_idx])\n\n oof_predictions[val_idx] = clf.predict(X_train.iloc[val_idx])\n\n submission_predictions += clf.predict_proba(X_test) / folds.n_splits\n\n score += clf.score(X_train.iloc[val_idx], y_train['surface'][val_idx])\n\n print('Fold: {} score: {}'.format(fold_,clf.score(X_train.iloc[val_idx], y_train['surface'][val_idx])))\n\n print('Avg Accuracy', score / folds.n_splits)\n\n classifier_avg.append(score / folds.n_splits)\ntemp = pd.DataFrame()\n\ntemp[\"Classifier\"] = classifier\n\ntemp[\"Average\"] = classifier_avg\nplt.figure(figsize = (20,8))\n\nsns.barplot(y = temp[\"Classifier\"], x = temp[\"Average\"], orient='h')\nsubmission_predictions = np.zeros((X_test.shape[0], 9))\n\noof_predictions = np.zeros((X_train.shape[0]))\n\nscore = 0\n\nfor fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train['surface'])):\n\n clf = ExtraTreesClassifier(n_estimators=2000, n_jobs=-1)\n\n clf.fit(X_train.iloc[trn_idx], y_train['surface'][trn_idx])\n\n oof_predictions[val_idx] = clf.predict(X_train.iloc[val_idx])\n\n submission_predictions += clf.predict_proba(X_test) / folds.n_splits\n\n score += clf.score(X_train.iloc[val_idx], y_train['surface'][val_idx])\n\n print('Fold: {} score: {}'.format(fold_,clf.score(X_train.iloc[val_idx], y_train['surface'][val_idx])))\n\nprint('Avg Accuracy', score / folds.n_splits)\nsubmission = pd.read_csv('../input/sample_submission.csv')\n\nsubmission['surface'] = label_encoder.inverse_transform(submission_predictions.argmax(axis=1))\n\nsubmission.to_csv('submission.csv', index=False)\n\nsubmission.head()","repo_name":"aorursy/new-nb-1","sub_path":"ashishpatel26_best-model-checking-extratree-classifier.py","file_name":"ashishpatel26_best-model-checking-extratree-classifier.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21610789616","text":"import os\nimport random\nimport tarfile\n\nimport pandas as pd\nimport torch\nfrom pygod.generator import gen_contextual_outliers, gen_structural_outliers\nfrom torch_geometric.data import Dataset, Data\nfrom torch_geometric.data.data import BaseData\n\n\nclass _StreamSpotEdge:\n def __init__(self, departure_node, departure_node_attr, destination_node, destination_node_attr, edge_attr,\n graph_num):\n self.departure_node = departure_node\n self.departure_node_attr = departure_node_attr\n self.destination_node = destination_node\n self.destination_node_attr = destination_node_attr\n self.edge_attr = edge_attr\n self.graph_num = graph_num\n\n\nedge_type = list(range(0, 29))\n\n\ndef expand_tensor(t: torch.Tensor, edge_cnt: int) -> torch.Tensor:\n assert t.dim() == 2 and t.shape[1] == 1, \"t must be a 2D tensor with only 1 column\"\n assert edge_cnt >= t.shape[0], \"E must be greater than or equal to the number of rows in t\"\n padding = torch.tensor([[random.choice(edge_type)] for _ in range(edge_cnt - t.shape[0])])\n return torch.cat((t, padding), dim=0)\n\n\n# Node Type 5 Edge Type 29\ndef convert_to_type(char):\n if 'a' <= char <= 'e':\n return ord(char) - ord('a')\n elif 'f' <= char <= 'z':\n return ord(char) - ord('f')\n elif 'A' <= char <= 'H':\n return ord(char) - ord('A') + 21\n else:\n return None\n\n\n# 0~299 400~599 为正常数据集\n# 300~399 为异常数据集\n# 该数据集为正常数据集\nclass OriginalStreamSpotDataset(Dataset):\n def __init__(self, root, transform=None, pre_transform=None, pre_filter=None, database_tar=\"all.tar.gz\"):\n self.database_tar = database_tar\n super().__init__(root, transform, pre_transform, pre_filter)\n\n @property\n def raw_dir(self) -> str:\n return self.root\n\n @property\n def raw_file_names(self):\n return [self.database_tar]\n\n @property\n def processed_file_names(self):\n return [f'streamspot_RGAT_{idx}.pt' for idx in range(0, 600)]\n\n def _tsv_reader(self):\n with tarfile.open(os.path.join(self.root, self.database_tar)) as tar:\n with tar.extractfile(\"all.tsv\") as f:\n tsv_iter = pd.read_csv(f, sep='\\t', chunksize=500000, header=None)\n for chunk in tsv_iter:\n for row in chunk.itertuples():\n yield _StreamSpotEdge(int(row[1]), convert_to_type(str(row[2])),\n int(row[3]), convert_to_type(str(row[4])),\n convert_to_type(str(row[5])), int(row[6]))\n\n def _create_tensor(self, node_attr, edge_list, edge_attr, current_map):\n tensor_x = torch.tensor(node_attr, dtype=torch.long)\n tensor_edge_index = torch.tensor(edge_list, dtype=torch.long).t()\n tensor_edge_attr = torch.tensor(edge_attr, dtype=torch.long)\n if current_map // 100 == 3: # 300-399 为Attack数据集\n tensor_y = torch.ones([len(node_attr), 1])\n else:\n tensor_y = torch.zeros([len(node_attr), 1])\n data = Data(x=tensor_x, edge_index=tensor_edge_index, edge_attr=tensor_edge_attr, y=tensor_y)\n\n if self.pre_filter is not None and not self.pre_filter(data):\n return\n\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n\n torch.save(data, os.path.join(self.processed_dir, f'streamspot_RGAT_{current_map}.pt'))\n print(\"Generate original graph %d\" % current_map)\n\n def process(self):\n node_id_convert_dict = {} # 将传入GNN网络的网络节点ID转化为节点ID\n node_attr = [] # 节点性质,为ASCII码\n edge_list = [] # data.edge_index 的转置列表\n edge_attr = [] # 边性质,为ASCII码\n\n current_map = 0\n node_cnt = 0\n\n for edge in self._tsv_reader():\n if current_map != edge.graph_num:\n self._create_tensor(node_attr, edge_list, edge_attr, current_map)\n # 清理工作\n current_map = edge.graph_num\n node_id_convert_dict.clear()\n node_attr.clear()\n edge_list.clear()\n edge_attr.clear()\n node_cnt = 0\n\n # 检查起始节点是否在记录中\n if edge.departure_node not in node_id_convert_dict:\n node_id_convert_dict[edge.departure_node] = node_cnt\n node_attr.append([edge.departure_node_attr])\n depart_id = node_cnt\n node_cnt = node_cnt + 1\n else:\n depart_id = node_id_convert_dict[edge.departure_node]\n # 检查终止节点是否在记录中\n if edge.destination_node not in node_id_convert_dict:\n node_id_convert_dict[edge.destination_node] = node_cnt\n node_attr.append([edge.destination_node_attr])\n dest_id = node_cnt\n node_cnt = node_cnt + 1\n else:\n dest_id = node_id_convert_dict[edge.destination_node]\n edge_list.append((depart_id, dest_id))\n edge_attr.append([edge.edge_attr])\n\n self._create_tensor(node_attr, edge_list, edge_attr, current_map)\n\n def len(self):\n return 600\n\n def get(self, idx):\n return torch.load(os.path.join(self.processed_dir, f'streamspot_RGAT_{idx}.pt'))\n\n\n# StreamSpot 正常数据集\nclass NormalStreamSpotDataset(OriginalStreamSpotDataset):\n def __init__(self, root, transform=None, pre_transform=None, pre_filter=None, database_tar=\"all.tar.gz\"):\n self.database_tar = database_tar\n super().__init__(root, transform, pre_transform, pre_filter, database_tar=database_tar)\n\n def len(self):\n return 500\n\n def get(self, idx):\n if idx >= 300:\n idx = idx + 100\n return torch.load(os.path.join(self.processed_dir, f'streamspot_RGAT_{idx}.pt'))\n\n\n# StreamSpot 异常数据集\nclass MaliciousStreamSpotDataset(OriginalStreamSpotDataset):\n def __init__(self, root, transform=None, pre_transform=None, pre_filter=None, database_tar=\"all.tar.gz\"):\n self.database_tar = database_tar\n super().__init__(root, transform, pre_transform, pre_filter, database_tar=database_tar)\n\n def len(self):\n return 100\n\n def get(self, idx):\n return torch.load(os.path.join(self.processed_dir, f'streamspot_RGAT_{idx + 300}.pt'))\n\n\n# 生成的异常数据集\nclass GeneratedStreamSpotDataset(Dataset):\n def __init__(self, root, transform=None, pre_transform=None, pre_filter=None, database_tar=\"all.tar.gz\"):\n self.database_tar = database_tar\n super().__init__(root, transform, pre_transform, pre_filter)\n\n @property\n def raw_dir(self) -> str:\n return self.root\n\n @property\n def raw_file_names(self):\n return [f'streamspot_RGAT_{idx}.pt' for idx in range(0, 500)]\n\n def download(self):\n NormalStreamSpotDataset(root=self.root, database_tar=self.database_tar)\n\n @property\n def processed_file_names(self):\n return [f'streamspot_gen_context_RGAT_{idx}.pt' for idx in range(0, 500)] + [\n f'streamspot_gen_structural_RGAT_{idx}.pt' for idx in range(0, 500)]\n\n def process(self):\n # 构造异常图 id:600~1199 语义异常图 id:1200~1799 结构异常图\n for idx in list(range(0, 500)):\n # 构造异常图\n graph = torch.load(os.path.join(self.processed_dir, f'streamspot_RGAT_{idx}.pt'))\n graph.x = graph.x.float()\n graph.y = torch.zeros([graph.x.size(dim=0), 1]) # Create Fake y\n graph, _ = gen_contextual_outliers(graph, n=100, k=50)\n graph.x = graph.x.long()\n graph.y = torch.ones([graph.x.size(dim=0), 1])\n torch.save(graph, os.path.join(self.processed_dir, f'streamspot_gen_context_RGAT_{idx}.pt'))\n\n # 构造语义异常图\n graph = torch.load(os.path.join(self.processed_dir, f'streamspot_RGAT_{idx}.pt'))\n graph.x = graph.x.float()\n graph.y = torch.zeros([graph.x.size(dim=0), 1]) # Create Fake y\n graph, _ = gen_structural_outliers(graph, m=10, n=10)\n graph.x = graph.x.long()\n graph.y = torch.ones([graph.x.size(dim=0), 1])\n # 补齐边\n graph.edge_attr = expand_tensor(graph.edge_attr, graph.edge_index.shape[1])\n torch.save(graph, os.path.join(self.processed_dir, f'streamspot_gen_structural_RGAT_{idx}.pt'))\n print(\"Generated Outlier graph %d\" % idx)\n\n def len(self) -> int:\n return 1000\n\n def get(self, idx: int) -> BaseData:\n if idx < 500:\n return torch.load(os.path.join(self.processed_dir, f'streamspot_gen_context_RGAT_{idx}.pt'))\n else:\n return torch.load(os.path.join(self.processed_dir, f'streamspot_gen_structural_RGAT_{idx - 500}.pt'))\n","repo_name":"6QHTSK/HUSTCSE-Graduation-Project","sub_path":"data/streamspot_RGAT.py","file_name":"streamspot_RGAT.py","file_ext":"py","file_size_in_byte":8941,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"22663553329","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom cryptography.fernet import Fernet\nfrom PIL import Image, ImageDraw, ImageFont\nimport numpy as np\nimport io\nimport os\nimport glob\nimport math\nimport time\nimport progressbar\nimport threading\nimport random\nimport image_slicer\nfrom image_slicer import join\nimport collections\n\n# Get the number of CPUs\n# in the system using\n# os.cpu_count() method\ncpuCount = os.cpu_count()\n\nexitFlag = 0\n\n# initialize lists to hold execution times of encryption and decryption\nenc_times = []\ndec_times = []\n#have thread list\nthreads = []\n#have tile map (for keeping track of the shape and dimensions before encryption)\ntilesMap = {}\n\n#thread class\nclass encryptionThread (threading.Thread):\n def __init__(self, threadID, chunk, key, tile, encryptionMode):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.chunk = chunk\n self.key = key\n self.tile = tile\n self.encryptionMode = encryptionMode\n def run(self):\n if self.encryptionMode == 0: \n print (\"Starting encryption on chunk \" + str(self.threadID) + \"\\n\")\n encrypt_chunk(self.chunk, self.key, self.tile)\n print (\"Finished encrypting chunk \" + str(self.threadID) + \"\\n\")\n else:\n print (\"Starting decryption on chunk \" + str(self.threadID) + \"\\n\")\n decrypt_chunk(self.chunk, self.key, self.tile)\n print (\"Finished decrypting chunk \" + str(self.threadID) + \"\\n\")\n\ndef create_image(cyphertext, tile, mode):\n if mode == 0:\n c_size = len(cyphertext)\n c_pixels = int((c_size+2)/3)\n W = H = int(math.ceil(c_pixels ** 0.5))\n\n data = cyphertext + b'\\0' * (W*H*3 - len(cyphertext))\n cypherpic = Image.frombytes('RGB', (W, H), data)\n tile.image = cypherpic\n else:\n clearimage = Image.fromarray(np.frombuffer(cyphertext, dtype=tilesMap[tile.number].datatype).reshape(tilesMap[tile.number].shape))\n tile.image = clearimage\n\ndef encrypt_chunk(msg, key, tile):\n # get the cyphertext and the execution time from the encryption function\n cyphertext, encryption_time = encrypt(msg, key)\n # append the execution time to the respective list\n enc_times.append(encryption_time)\n #create the image and modify the tile\n create_image(cyphertext, tile, 0)\n\ndef decrypt_chunk(msg, key, tile):\n # get the cleartext (decrypted bytearray/text) and the execution time from the decryption function\n cleartext, decryption_time = decrypt(msg, key)\n # append the execution time to the respective list\n dec_times.append(decryption_time)\n #create the image and modify the tile\n create_image(cleartext, tile, 1)\n\n#global path definitions\nO_path = os.path.join(\"./\", \"Original_Images\")\nE_path = os.path.join(\"./\", \"Encrypted_Images\")\nD_path = os.path.join(\"./\", \"Decrypted_Images\")\n\n# Wrapper for PIL images \nclass Image_Data:\n def __init__(self, image):\n self.image = image\n self.f_name = image.filename\n self.b_array = np.array(image).tobytes()\n self.shape = np.array(image).shape\n self.datatype = np.array(image).dtype.name\n\n# Wrapper for PIL images \nclass Tile_Data:\n def __init__(self, image, num):\n self.image = image\n self.b_array = np.array(image).tobytes()\n self.shape = np.array(image).shape\n self.datatype = np.array(image).dtype.name\n self.tileNumber = num\n\n# Generate Image_Data list\ndef get_images(path):\n img_dir = path\n data_path = os.path.join(img_dir,'*g') \n files = glob.glob(data_path) \n img_arr = []\n\n # initialize a progress bar for loading images\n widgets = ['Loading Images: ', progressbar.Bar('█'),' (', progressbar.ETA(), ') ',]\n bar = progressbar.ProgressBar(28, widgets = widgets).start()\n i = 0\n\n # loop through the images and append the newly created PIL image to the Image_Data list\n for f in files:\n i += 1 \n img = Image.open(f)\n img_arr.append(Image_Data(img))\n bar.update(i)\n print(\"\\n\\n\")\n\n return img_arr\n\n# Fernent setup\ndef get_key():\n key = Fernet.generate_key()\n with open('secret.key', 'wb') as new_key_file:\n new_key_file.write(key)\n\n print(\"Key: \" + str(key) + \"\\n\")\n\n return Fernet(key)\n\n# Create/Empty directories for the (marked) original images, encrypted images, and decrypted images\n# Note that the images have the same original filenames to keep track\ndef setup_directories():\n if os.path.isdir(O_path):\n try:\n files = glob.glob(O_path+'/*')\n for f in files:\n os.remove(f)\n except OSError as e:\n print(\"Error: %s : %s\" % (O_path, e.strerror))\n else:\n try:\n os.mkdir(O_path)\n except OSError as e:\n print(\"Error: %s : %s\" % (O_path, e.strerror))\n\n if os.path.isdir(E_path):\n try:\n files = glob.glob(E_path+'/*')\n for f in files:\n os.remove(f)\n except OSError as e:\n print(\"Error: %s : %s\" % (E_path, e.strerror))\n else:\n try:\n os.mkdir(E_path)\n except OSError as e:\n print(\"Error: %s : %s\" % (E_path, e.strerror))\n\n if os.path.isdir(D_path):\n try:\n files = glob.glob(D_path+'/*')\n for f in files:\n os.remove(f)\n except OSError as e:\n print(\"Error: %s : %s\" % (D_path, e.strerror))\n else:\n try:\n os.mkdir(D_path)\n except OSError as e:\n print(\"Error: %s : %s\" % (D_path, e.strerror))\n\n# Helper function for build_and_save()\n# Write text on the images and save them to their respective directories\ndef label_and_save(image, label, filename, save_dir):\n d = ImageDraw.Draw(image)\n d.text((28,36), label, font=ImageFont.truetype(font=\"arial.ttf\", size=40), fill=(255,0,0))\n #image.save(save_dir + \"/\" + filename.replace(\"./SmallSet_Images/\", ''))\n image.save(save_dir + \"/\" + str(random.randint(1, 1000000)) + \".jpg\")\n\n# Create the images from the bytearray and save them to their respective directories for logging\ndef build_and_save(i_data, image, mode):\n if mode == 0: # encrypted image\n label_and_save(image, \"Encrypted Image\", i_data.f_name, E_path)\n elif mode == 1: #decrypted image\n #output = Image.fromarray(np.frombuffer(b_text, dtype=i_data.datatype).reshape(i_data.shape))\n label_and_save(image, \"Decrypted Image\", i_data.f_name, D_path)\n\n# Encrypt plaintext and get execution time\ndef encrypt(message, F):\n timer1 = time.time()\n c_text = F.encrypt(message)\n extime = round(time.time() - timer1, 4)\n\n return c_text, extime\n\n# Decrypt cyphertext and get execution time\ndef decrypt(cypher, F):\n timer2 = time.time()\n p_text = F.decrypt(cypher)\n extime = round(time.time() - timer2, 4)\n\n return p_text, extime\n\n# print out the number of images used, and the average encryption and decryption times with a decimal precision of 4\ndef print_results(num_images, e_times, d_times):\n print(\"Number of images: \"+str(num_images)+\"\\n\")\n print(\"Average Encryption Time: \"+str(round(np.mean(e_times), 4) * cpuCount)+\" seconds\\n\")\n print(\"Average Decryption Time: \"+str(round(np.mean(d_times), 4) * cpuCount)+\" seconds\\n\")\n\n# main driver code\ndef main():\n # Print the number of\n # CPUs in the system\n print(\"Number of CPUs in the system:\", cpuCount)\n\n # for debugging use the small dataset\n images = get_images(\"./SmallSet_Images/\")\n\n # uncomment the following line to do testing on larger dataset\n # images = get_images(\"./Sample_Images/\")\n\n # if directories already exist, empty them, else create them\n setup_directories()\n\n # get the key\n k = get_key()\n\n # initialize progress bar\n widgets = ['Batch Encryption/Decryption: ', progressbar.Bar('█'),' (', progressbar.ETA(), ') ',]\n bar = progressbar.ProgressBar(28, widgets = widgets).start()\n t = 0\n\n # loop through loaded images and run encryption and decryption\n # timing information is gathered in the encrypt() and decrypt() functions\n for i in images:\n\n #slice up the image according to the number of cpu threads you have\n tiles = image_slicer.slice(i.f_name, cpuCount, save=False)\n\n t += 1\n\n # mark and save the input image to the respective directory\n label_and_save(i.image, \"Original Image\", i.f_name, O_path)\n\n for tile in tiles:\n #save tile data\n tilesMap[tile.number] = Tile_Data(tile.image, tile.number)\n # initialize message variable for input to encryption function (turn each tile into a byte array)\n msg = np.array(tile.image).tobytes()\n #create new thread & pass along the chunk with the key\n newThread = encryptionThread(tile.number, msg, k, tile, 0)\n newThread.start()\n threads.append(newThread)\n \n #wait for all of the threads to finish their work\n print(\"Waiting for all the threads to finish...\")\n for thread in threads:\n thread.join()\n\n # join all of the tiles and save it to its respective directory\n image = join(tiles)\n image.save(\".\\\\Encrypted_Images\\\\\" + str(random.randint(1, 1000000)) + \".png\")\n #build_and_save(i, image, 0)\n\n #start decryption process\n for tile in tiles:\n msg = np.array(tile.image).tobytes()\n\n \n threads[tile.number - 1] = encryptionThread(tile.number, msg, k, tile, 1)\n threads[tile.number - 1].start()\n\n print(\"Number of threads active: \" + str(threading.active_count()))\n \n #wait for all of the threads to finish their work\n print(\"Waiting for all the threads to finish...\")\n for thread in threads:\n thread.join()\n \n image = join(tiles)\n image.save(\".\\\\Decrypted_Images\\\\\" + str(random.randint(1, 1000000)) + \".png\")\n \n # create an image from the cleartext and save it to its respective directory\n ##build_and_save(i, cleartext, 1)\n bar.update(t)\n print(\"\\n\\n\")\n\n # output the results\n print_results(len(images), enc_times, dec_times)\n\nif __name__ == '__main__':\n main()","repo_name":"dangersflow/6335Final---Image-Encryption","sub_path":"Code/Img_Encrypt-parallel.py","file_name":"Img_Encrypt-parallel.py","file_ext":"py","file_size_in_byte":10220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29606373455","text":"import random\nopcoes=[\"pedra\",\"papel\",\"tesoura\"]\n\ndef inserir_escolha():\n valor = input(\"escolha entre pedra papel e tesoura: \")\n if valor not in opcoes:\n print(\"a opção escolhida é inválida,tente de novo\")\n return inserir_escolha()\n else:\n return valor\n\ndef jogar():\n opcao=opcoes[random.randint(0,len(opcoes)-1)]\n escolha=inserir_escolha()\n resultado=0#1=empate,2=vitoria,3=derrota\n if opcao == escolha:\n resultado=1\n\n #derrota\n elif opcao == opcoes[0] and escolha==opcoes[1]:\n resultado=2\n elif opcao == opcoes[1] and escolha==opcoes[2]:\n resultado=2\n elif opcao == opcoes[2] and escolha==opcoes[0]:\n resultado=2\n\n elif escolha == opcoes[0] and opcao==opcoes[1]:\n resultado=3\n elif escolha == opcoes[1] and opcao==opcoes[2]:\n resultado=3\n elif escolha == opcoes[2] and opcao==opcoes[0]:\n resultado=3\n\n if resultado==1:\n print(\"empate\")\n elif resultado==3:\n print(\"derrota\")\n elif resultado==2:\n print(\"vitoria\")\n\nwhile True:\n jogar()","repo_name":"Caicadeira/Trabalhos","sub_path":"PI/trabalho 1/pedra_papel_tesoura.py","file_name":"pedra_papel_tesoura.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28662414729","text":"#_*_ coding:utf-8 _*_\nfrom selenium import webdriver as wd\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pymysql\nimport time\nimport re\n\n\ndef checkdata(word):\n return dict(접수=1, 소관위접수=2, 회부=2, 소관위심사=3, 본회의부의안건=4, 의안정리=5, 정부이송=5, 공포=6, 대안반영폐기=7, 본회의불부의=8, 철회=9, 본회의의결=10).get(word, 9)\n\n\ndef getdata(keyword, key, conn, index):\n main_url = 'http://likms.assembly.go.kr/bill/BillSearchResult.do'\n options = wd.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('window-size=1920x1080')\n options.add_argument(\"disable-gpu\")\n driver = wd.Chrome(executable_path='chromedriver.exe', options=options)\n driver.get(main_url)\n driver.find_element_by_css_selector('button.btnSch01').click()\n\n driver.find_element_by_id('srchBillName').send_keys(keyword)\n driver.find_element_by_css_selector('button.btnSearch01').click()\n try:\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'subContents'))\n )\n except Exception as e:\n print('오류 발생', e)\n\n # lawItems = driver.find_elements_by_css_selector('.tableCol01').pop().find_elements_by_css_selector('tbody')\n # items = lawItems.pop().find_elements_by_css_selector('tr')\n\n if len(driver.find_element_by_css_selector('.tableCol01').find_elements_by_css_selector('a')) != 39:\n max_page = driver.find_element_by_css_selector('.tableCol01').find_elements_by_css_selector('.paging')\n limit_page = len(max_page.pop().text)\n else:\n info_num = driver.find_element_by_css_selector('.tableCol01').find_elements_by_css_selector('.btnPage')[1].get_attribute('href')\n limit_page = int(info_num[18:20])\n lawItems = []\n # output dataset\n names = []\n names2 = []\n names3 = []\n starts = []\n ends = []\n values = []\n counts1 = []\n\n # contents = []\n num = 0\n max_repeat = 0\n for page in range(1, limit_page+1):\n driver.execute_script(\"javascript:GoPage(%s)\" % page)\n laws = driver.find_elements_by_css_selector('.tableCol01').pop().find_elements_by_css_selector('tbody')\n items = laws.pop().find_elements_by_css_selector('tr')\n max_repeat = len(items)\n time.sleep(1)\n for tr in items:\n if num == max_repeat:\n num = 0\n break\n num = num + 1\n # 의안 조건을 걸어 저장 유무 판단 하기!\n try:\n lawItems.append(tr.find_element_by_css_selector('a').get_attribute('href'))\n items.append(tr.find_element_by_css_selector('a').text)\n temp = tr.find_elements_by_css_selector('td')\n names.append(temp[1].text)\n starts.append(temp[3].text)\n ends.append(temp[4].text)\n values.append(temp[7].text)\n except:\n print(\"failed to find items\")\n continue\n\n length = len(lawItems)\n # f = io.open(filename, mode=\"w\", encoding=\"utf-8\")\n # tag = \"\\\"title\\\",\\\"name\\\",\\\"start\\\",\\\"end\\\",\\\"value\\\"\\n\"\n # f.write(tag)\n # for li in range(0, 10):\n for li in range(length):\n value = checkdata(values[li])\n if ends[li] == \"\":\n ends[li] = time.strftime(\"%Y-%m-%d\")\n\n remove = re.compile('\\(.+?\\)')\n names2.append(remove.sub('',names[li]))\n if names2[li] in names2[0:li]:\n for i in range(len(counts1)):\n remove = re.compile('\\(.+?\\)')\n temp1 = remove.sub('', names2[li])\n temp2 = remove.sub('', names[i])\n if temp1 == temp2:\n break\n counts1[i][value] = counts1[i][value] + 1\n else:\n counts2 = []\n for num in range(0, 10):\n counts2.append(0)\n counts1.append(counts2)\n counts1[len(counts1)-1][value] = counts1[len(counts1)-1][value] + 1\n names3.append(names2[li])\n\n name = names[li][2:len(names[li])]\n #with conn.cursor() as cursor:\n # sql = 'INSERT INTO my_db.apr_bill_data (title, name, start, end, value) SELECT %s, %s, %s, %s, %s FROM dual WHERE NOT EXISTS (SELECT * FROM my_db.apr_bill_data WHERE name=%s and start=%s and end=%s);'\n # cursor.execute(sql, (key+str(index), name, starts[li], ends[li], str(value), name, starts[li], ends[li]))\n #conn.commit()\n # f.close()\n for num in range(len(counts1)):\n name = names3[num][2:len(names3[num])]\n #with conn.cursor() as cursor:\n # sql = 'INSERT INTO my_db.apr_bill_data_2 (title, name, one, two, three, four, five, six, seven, eight, nine, ten) SELECT %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s FROM dual WHERE NOT EXISTS (SELECT * FROM my_db.apr_bill_data_2 WHERE name=%s);'\n # cursor.execute(sql, (key, name, counts1[num][0], counts1[num][1], counts1[num][2], counts1[num][3], counts1[num][4], counts1[num][5], counts1[num][6], counts1[num][7], counts1[num][8], counts1[num][9], name))\n #conn.commit()\n\n # 종료\n driver.close()\n return True\n","repo_name":"ktjylsj/algomanthm","sub_path":"algo_final/final_auto/find_sim_jud.py","file_name":"find_sim_jud.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3584714814","text":"# -*- coding: utf-8 -*-\n\n\nimport datetime\nimport mimetypes\nimport urllib2\n\nimport boto\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\nfrom flask import request, render_template, make_response, url_for, redirect, flash, json\nfrom flask_login import login_user\nfrom sqlalchemy import or_, and_\nfrom werkzeug.utils import secure_filename\nfrom config import RSA_PUBLIC_KEY_BASE64\nfrom hereboxweb import database, response_template, app, logger\nfrom hereboxweb.admin import admin\nfrom hereboxweb.admin.models import VisitTime\nfrom hereboxweb.auth.forms import LoginForm\nfrom hereboxweb.auth.login import HereboxLoginHelper\nfrom hereboxweb.auth.models import User, UserStatus\nfrom hereboxweb.book.models import GoodsType, Box, Goods, InStoreStatus, GoodsStatus, BoxStatus, Incoming, Outgoing\nfrom hereboxweb.payment.models import Purchase\nfrom hereboxweb.schedule.models import Reservation, ReservationStatus, ScheduleType, ScheduleStatus, NewReservation, \\\n ReservationType, RestoreReservation, DeliveryReservation, ReservationDeliveryType, Schedule, ReservationRevisitType, \\\n ExtendPeriod, ExtendPeriodStatus, PromotionCode, UnavailableSchedule\nfrom hereboxweb.schedule.reservation import RevisitOption\nfrom hereboxweb.utils import add_months, staff_required, Match\nfrom hereboxweb.admin import custom_filter\n\nUSERS_PER_PAGE = 15\nRESERVATIONS_PER_PAGE = 15\nSCHEDULES_PER_PAGE = 15\nPURCHASES_PER_PAGE = 30\nBOXES_PER_PAGE = 15\nGOODS_PER_PAGE = 15\nEXTEND_PERIOD_PER_PAGE = 10\nINCOMINGS_PER_PAGE = 30\n\n\ndef sync_upload_to_s3(filename, filepath, file):\n aws_s3 = boto.s3.connect_to_region(app.config['FLASKS3_REGION'],\n aws_access_key_id=app.config['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=app.config['AWS_SECRET_ACCESS_KEY'])\n s3_bucket = aws_s3.get_bucket(app.config['FLASKS3_BUCKET_NAME'])\n s3_key = Key(s3_bucket)\n s3_key.key = filepath\n mime = mimetypes.read_mime_types(filename)\n s3_key.set_metadata('Content-Type', mime)\n s3_key.set_contents_from_file(file)\n s3_key.make_public()\n\n\n@admin.route('/schedule_restriction', methods=['GET', 'POST', 'DELETE'])\n@staff_required\ndef schedule_restriction():\n # 스케줄 제한 조회\n if request.method == 'GET':\n today = datetime.date.today()\n u_schedules = database.session.query(UnavailableSchedule, VisitTime) \\\n .filter(UnavailableSchedule.schedule_time_id == VisitTime.id).filter(UnavailableSchedule.date >= today) \\\n .order_by(UnavailableSchedule.date)\n\n return render_template('admin_schedule_restriction.html', u_schedules=u_schedules,\n page_title=u'예약 제한', page_subtitle='Schedule Restriction')\n\n # 스케줄 제한 추가\n elif request.method == 'POST':\n date = request.form.get('date')\n time = request.form.get('time')\n u_schedule = UnavailableSchedule(date, time)\n database.session.add(u_schedule)\n try:\n database.session.commit()\n except:\n return redirect(url_for('admin.schedule_restriction'))\n return redirect(url_for('admin.schedule_restriction'))\n\n # 스케줄 제한 삭제\n elif request.method == 'DELETE':\n id = request.form.get('id')\n logger.debug(id)\n u_schedule = UnavailableSchedule.query.get(id)\n database.session.delete(u_schedule)\n try:\n logger.debug(\"TRY\")\n database.session.commit()\n return response_template(u'삭제되었습니다.', status=200)\n except:\n logger.debug(\"ERROR\")\n return response_template(u'문제가 발생했습니다.', status=500)\n\n\n@admin.route('/outgoing/', methods=['GET'])\n@staff_required\ndef outgoing_history(page=1):\n paginate = Outgoing.query.join(Goods) \\\n .order_by(Outgoing.created_at.desc()) \\\n .paginate(page, INCOMINGS_PER_PAGE, False)\n\n return render_template('admin_outgoing_history.html', page_title=u'출고기록',\n page_subtitle='Outgoing History',\n pagination=paginate)\n\n\n@admin.route('/incoming/', methods=['GET'])\n@staff_required\ndef incoming_history(page=1):\n paginate = Incoming.query.join(Goods) \\\n .order_by(Incoming.created_at.desc()) \\\n .paginate(page, INCOMINGS_PER_PAGE, False)\n\n return render_template('admin_incoming_history.html', page_title=u'입고기록',\n page_subtitle='Incoming History',\n pagination=paginate)\n\n\n@admin.route('/extend-period/', methods=['PUT'])\n@staff_required\ndef extend_period(extend_period_id):\n extend_period = ExtendPeriod.query.filter(ExtendPeriod.id == extend_period_id).first()\n\n extend_period.status = ExtendPeriodStatus.ACCEPTED\n extend_period.goods.expired_at = add_months(extend_period.goods.expired_at, extend_period.amount)\n\n try:\n database.session.commit()\n except:\n return response_template(u'문제가 발생했습니다.', status=500)\n return response_template(u'처리되었습니다', status=200)\n\n\n@admin.route('/extend-periods/', methods=['GET'])\n@staff_required\ndef extend_period_list(page):\n paginate = ExtendPeriod.query.filter(ExtendPeriod.status == ExtendPeriodStatus.WAITING) \\\n .order_by(ExtendPeriod.created_at.desc()) \\\n .paginate(page, EXTEND_PERIOD_PER_PAGE, False)\n\n return render_template('admin_extend_period_list.html', page_title=u'기간연장',\n page_subtitle='Extend Period',\n pagination=paginate)\n\n\n@admin.route('/old-extend-periods/', methods=['GET'])\n@staff_required\ndef old_extend_period_list(page):\n paginate = ExtendPeriod.query.filter(ExtendPeriod.status==ExtendPeriodStatus.ACCEPTED) \\\n .order_by(ExtendPeriod.created_at.desc()) \\\n .paginate(page, EXTEND_PERIOD_PER_PAGE, False)\n\n return render_template('admin_old_extend_period_list.html', page_title=u'지난 기간연장',\n page_subtitle='Old Extend Period',\n pagination=paginate\n )\n\n\n@admin.route('/goods/', methods=['GET', 'POST', 'DELETE'])\n@staff_required\ndef goods_detail(goods_id):\n goods = Goods.query.filter(Goods.goods_id==goods_id).first()\n if goods.goods_id.startswith('A'):\n goods.box = Box.query.get(goods.box_id)\n\n if request.method == 'POST':\n goods_type = request.form.get('goods_type')\n status = request.form.get('status')\n in_store = request.form.get('in_store')\n name = request.form.get('name')\n memo = request.form.get('memo')\n started_at = request.form.get('started_at')\n expired_at = request.form.get('expired_at')\n goods_photo = request.files.get('goods_photo')\n\n if goods_type and goods.goods_type != goods_type:\n goods.goods_type = goods_type\n if status and goods.status != status:\n goods.status = status\n if in_store and goods.in_store != in_store:\n goods.in_store = in_store\n if name and goods.name != name:\n goods.name = name\n if memo and goods.memo != memo:\n goods.memo = memo\n if started_at and goods.started_at != started_at:\n goods.started_at = started_at\n if expired_at and goods.expired_at != expired_at:\n expired_at = datetime.datetime.strptime(expired_at, '%Y-%m-%d').date()\n extended_period = (expired_at - goods.expired_at) / 30\n extend_period_history = ExtendPeriod(extended_period, goods.id, ExtendPeriodStatus.ACCEPTED)\n goods.expired_at = expired_at\n database.session.add(extend_period_history)\n attached_filepath = None\n if goods_photo:\n src_filename = goods_photo.filename\n last_filename = src_filename[src_filename.rindex('.') + 1:]\n reservation = Reservation.query.filter(Reservation.goods.any(id=goods.id)).first()\n filename = '%s_%s.%s' % (reservation.user_id, reservation.reservation_id, last_filename)\n attached_filepath = '%s/%s' % ('goods', filename)\n sync_upload_to_s3(filename, attached_filepath, goods_photo)\n goods.photo = attached_filepath\n\n try:\n database.session.commit()\n except:\n return response_template(u'문제가 발생했습니다.', status=500)\n\n if request.method == 'DELETE':\n if goods.box_id != None:\n box = Box.query.get(goods.box_id)\n box.status = BoxStatus.AVAILABLE\n goods.box_id = None\n goods.status = GoodsStatus.EXPIRED\n outgoing_history = Outgoing(goods.id)\n database.session.add(outgoing_history)\n\n try:\n database.session.commit()\n except:\n return response_template(u'문제가 발생했습니다.', status=500)\n\n return render_template('admin_goods.html', page_title=u'물품',\n page_subtitle='Goods',\n goods_detail=goods)\n\n\n@admin.route('/schedule/', methods=['GET', 'POST', 'DELETE'])\n@staff_required\ndef schedule_detail(schedule_id):\n schedule = Schedule.query.join(VisitTime, Schedule.schedule_time_id == VisitTime.id)\\\n .filter(Schedule.schedule_id==schedule_id).first()\n if request.method == 'POST':\n visit_date = request.form.get('visit_date')\n if visit_date and schedule.schedule_date != visit_date:\n schedule.schedule_date = visit_date\n database.session.commit()\n\n if request.method == 'DELETE':\n schedule.status = ScheduleStatus.CANCELED\n try:\n database.session.commit()\n return redirect(url_for('admin.schedules'))\n except:\n return response_template(u'문제가 발생했습니다.', status=500)\n\n reservation = schedule.reservation\n if reservation.reservation_id.startswith(ReservationType.PICKUP_NEW):\n if (reservation.revisit_option == ReservationRevisitType.IMMEDIATE) or \\\n (reservation.revisit_option == ReservationRevisitType.LATER and\n schedule.schedule_id.endswith('_1')):\n return render_template('admin_schedule.html', page_title=u'스케줄',\n page_subtitle='Schedule',\n schedule_detail=schedule,\n register_goods_popup=True)\n return render_template('admin_schedule.html', page_title=u'스케줄',\n page_subtitle='Schedule',\n schedule_detail=schedule)\n\n\n@admin.route('/goods//', methods=['GET'])\n@staff_required\ndef search_goods(keyword, page):\n paginate = Goods.query.join(Box).join(User).filter(\n or_(Match([Goods.goods_id, Goods.name, Goods.memo], keyword),\n Match([User.name, User.address1, User.address2], keyword),\n Match([Box.box_id], keyword))\n )\\\n .order_by(Goods.created_at.desc()) \\\n .paginate(page, GOODS_PER_PAGE, False)\n return render_template('admin_goods_list.html', page_title=u'물품조회',\n page_subtitle='Goods',\n pagination=paginate,\n search_keyword=keyword\n )\n\n\n@admin.route('/goods_list/', methods=['GET'])\n@staff_required\ndef goods_list(page):\n filtered_by = request.args.get('status')\n if filtered_by and filtered_by == 'must_be_expired':\n paginate = Goods.query.filter(\n and_(\n Goods.expired_at <= datetime.date.today(),\n Goods.status == GoodsStatus.ACTIVE\n )\n ).order_by(Goods.created_at.desc()) \\\n .paginate(page, GOODS_PER_PAGE, False)\n else:\n paginate = Goods.query.order_by(Goods.created_at.desc()) \\\n .paginate(page, GOODS_PER_PAGE, False)\n\n return render_template('admin_goods_list.html', page_title=u'물품조회',\n page_subtitle='Goods',\n pagination=paginate\n )\n\n\n@admin.route('/box//', methods=['GET'])\n@staff_required\ndef search_box(keyword, page):\n paginate = Box.query.filter(\n or_(Match([Box.box_id], keyword),)\n ).order_by(Box.created_at.desc()) \\\n .paginate(page, BOXES_PER_PAGE, False)\n\n for item in paginate.items:\n item.goods = Goods.query.filter(Goods.box_id == item.id).first()\n\n return render_template('admin_boxes.html', page_title=u'박스현황',\n page_subtitle='Boxes',\n pagination=paginate,\n search_keyword=keyword\n )\n\n\n@admin.route('/boxes/', methods=['GET'])\n@staff_required\ndef boxes(page):\n paginate = Box.query.order_by(Box.created_at.desc()) \\\n .paginate(page, BOXES_PER_PAGE, False)\n\n for item in paginate.items:\n item.goods = Goods.query.filter(Goods.box_id == item.id).first()\n\n return render_template('admin_boxes.html', page_title=u'박스현황',\n page_subtitle='Boxes',\n pagination=paginate\n )\n\n\n@admin.route('/old_schedules/', methods=['GET'])\n@staff_required\ndef old_schedules(page):\n paginate = Schedule.query.join(VisitTime, Schedule.schedule_time_id == VisitTime.id).filter(\n or_(Schedule.status == ScheduleStatus.CANCELED,\n Schedule.status == ScheduleStatus.COMPLETE,)\n ).order_by(Schedule.created_at.desc()) \\\n .paginate(page, SCHEDULES_PER_PAGE, False)\n\n return render_template('admin_old_schedules.html', page_title=u'지난 스케줄',\n page_subtitle='Old Schedules',\n pagination=paginate\n )\n\n\n@admin.route('/old_reservation//', methods=['GET'])\n@staff_required\ndef search_old_reservation(keyword, page):\n paginate = Reservation.query.join(Schedule)\\\n .join(User, Reservation.user_id == User.uid).filter(\n and_(Reservation.status == ReservationStatus.ACCEPTED,\n or_(\n Match([Schedule.schedule_id], keyword),\n Match([Reservation.reservation_id, Reservation.address,\n Reservation.contact, Reservation.user_memo], keyword),\n Match([User.name, User.address1, User.address2], keyword)\n ))\n ).order_by(Reservation.created_at.desc()) \\\n .paginate(page, RESERVATIONS_PER_PAGE, False)\n\n for item in paginate.items:\n item.parsed_delivery_time = VisitTime.query.get(item.delivery_time)\n\n return render_template('admin_old_reservations.html', page_title=u'지난 주문',\n page_subtitle='Old Reservations',\n pagination=paginate\n )\n\n\n@admin.route('/old_reservations/', methods=['GET'])\n@staff_required\ndef old_reservations(page):\n paginate = Reservation.query.filter(\n Reservation.status == ReservationStatus.ACCEPTED,\n ).order_by(Reservation.created_at.desc()) \\\n .paginate(page, RESERVATIONS_PER_PAGE, False)\n\n for item in paginate.items:\n item.parsed_delivery_time = VisitTime.query.get(item.delivery_time)\n\n return render_template('admin_old_reservations.html', page_title=u'지난 주문',\n page_subtitle='Old Reservations',\n pagination=paginate\n )\n\n\n@admin.route('/purchases/', methods=['GET'])\n@staff_required\ndef purchases(page):\n paginate = Purchase.query.order_by(Purchase.created_at.desc()) \\\n .paginate(page, PURCHASES_PER_PAGE, False)\n\n return render_template('admin_purchases.html', page_title=u'구매 히스토리',\n page_subtitle='Purchase',\n pagination=paginate\n )\n\n\n@admin.route('/schedule//', methods=['GET'])\n@staff_required\ndef search_schedule(keyword, page):\n schedule_status = request.args.get('status')\n if schedule_status not in ('waiting', 'old'):\n return redirect(url_for('admin.schedules', page=1))\n\n if schedule_status == 'waiting':\n paginate = Schedule.query.join(Schedule.customer) \\\n .join(Reservation) \\\n .join(VisitTime, Schedule.schedule_time_id == VisitTime.id) \\\n .filter(\n and_(\n Schedule.status == ScheduleStatus.WAITING,\n or_(Match([Schedule.schedule_id], keyword),\n Match([Reservation.reservation_id, Reservation.address,\n Reservation.contact, Reservation.user_memo], keyword),\n Match([User.name, User.address1, User.address2], keyword))\n )\n ).order_by(Schedule.created_at.desc()) \\\n .paginate(page, SCHEDULES_PER_PAGE, False)\n\n return render_template('admin_schedules.html', page_title=u'스케줄',\n page_subtitle='Schedules',\n pagination=paginate,\n search_keyword=keyword\n )\n\n elif schedule_status == 'old':\n paginate = Schedule.query.join(Schedule.customer) \\\n .join(Reservation) \\\n .join(VisitTime, Schedule.schedule_time_id == VisitTime.id) \\\n .filter(\n and_(\n or_(\n Schedule.status == ScheduleStatus.CANCELED,\n Schedule.status == ScheduleStatus.COMPLETE\n ),\n or_(Match([Schedule.schedule_id], keyword),\n Match([Reservation.reservation_id, Reservation.address,\n Reservation.contact, Reservation.user_memo], keyword),\n Match([User.name, User.address1, User.address2], keyword))\n )\n ).order_by(Schedule.created_at.desc()) \\\n .paginate(page, SCHEDULES_PER_PAGE, False)\n\n return render_template('admin_old_schedules.html', page_title=u'지난 스케줄',\n page_subtitle='Old Schedules',\n pagination=paginate,\n search_keyword=keyword\n )\n\n\n@admin.route('/schedules/', methods=['GET'])\n@staff_required\ndef schedules(page=1):\n paginate = Schedule.query.join(Schedule.customer).join(VisitTime, Schedule.schedule_time_id == VisitTime.id)\\\n .filter(\n Schedule.status == ScheduleStatus.WAITING\n ).order_by(Schedule.created_at.desc()) \\\n .paginate(page, SCHEDULES_PER_PAGE, False)\n\n return render_template('admin_schedules.html', page_title=u'스케줄',\n page_subtitle='Schedules',\n pagination=paginate\n )\n\n\n@admin.route('/reservation/', methods=['GET', 'POST', 'DELETE'])\n@staff_required\ndef reservation_detail(reservation_id):\n reservation = None\n\n # 연락처, 주소 변경\n def change_reservation():\n contact = request.form.get('contact')\n address = request.form.get('address')\n\n if contact and reservation.contact != contact:\n reservation.contact = contact\n if address and reservation.address != address:\n reservation.address = address\n\n try:\n database.session.commit()\n except:\n return response_template(u'오류가 발생했습니다.', status=500)\n\n def delete_reservation(url_to_go):\n for goods in reservation.goods:\n if goods.box_id != None:\n box = Box.query.get(goods.box_id)\n box.status = BoxStatus.AVAILABLE\n goods.box_id = None\n goods.status = GoodsStatus.EXPIRED\n for schedule in reservation.schedules:\n schedule.status = ScheduleStatus.CANCELED\n database.session.delete(reservation)\n try:\n database.session.commit()\n return redirect(url_for('admin.%s' % (url_to_go)))\n except:\n return response_template(u'문제가 발생했습니다.', status=500)\n\n # New\n if reservation_id.startswith(ReservationType.PICKUP_NEW):\n # 새로운 예약\n reservation = NewReservation.query.filter(Reservation.reservation_id == reservation_id).first()\n\n parsed_binding_products = ''\n binding_products = json.loads(reservation.binding_products)\n for key in binding_products.keys():\n parsed_binding_products += '%s: ' % key\n parsed_binding_products += '%s ' % binding_products[key]\n reservation.parsed_binding_products = parsed_binding_products\n\n reservation.parsed_revisit_option = 'Y' if reservation.revisit_option == ReservationRevisitType.LATER else 'N'\n reservation.parsed_fixed_rate = 'Y' if reservation.fixed_rate == 1 else 'N'\n\n reservation.parsed_delivery_time = VisitTime.query.get(reservation.delivery_time)\n reservation.parsed_recovery_time = VisitTime.query.get(reservation.recovery_time)\n\n promotion_code = PromotionCode.query.filter(PromotionCode.code == reservation.promotion).first()\n if promotion_code:\n reservation.promotion_name = u'%s(%s)' % (promotion_code.promotion.name,\n reservation.promotion)\n # 신규 - 저장\n if request.method == 'POST':\n standard_box_count = request.form.get('standard_box_count')\n nonstandard_goods_count = request.form.get('nonstandard_goods_count')\n\n if standard_box_count and reservation.standard_box_count != int(standard_box_count):\n reservation.standard_box_count = int(standard_box_count)\n if nonstandard_goods_count and reservation.nonstandard_goods_count != int(nonstandard_goods_count):\n reservation.nonstandard_goods_count = int(nonstandard_goods_count)\n\n change_reservation()\n\n # 신규 - 삭제\n if request.method == 'DELETE':\n delete_reservation('new_reservations')\n\n return render_template('admin_new_reservation.html', page_title=u'예약 정보',\n page_subtitle='Reservation',\n reservation_detail=reservation)\n # Pickup_again\n elif reservation_id.startswith(ReservationType.PICKUP_AGAIN):\n reservation = RestoreReservation.query.filter(Reservation.reservation_id==reservation_id).first()\n reservation.parsed_delivery_time = VisitTime.query.get(reservation.delivery_time)\n reservation.parsed_recovery_time = VisitTime.query.get(reservation.recovery_time)\n reservation.parsed_revisit_option = 'Y' if reservation.revisit_option == ReservationRevisitType.LATER else 'N'\n\n if request.method == 'POST':\n change_reservation()\n\n if request.method == 'DELETE':\n delete_reservation('restore_reservations')\n\n return render_template('admin_restore_reservation.html', page_title=u'예약 정보',\n page_subtitle='Reservation',\n reservation_detail=reservation)\n # Delivery\n elif reservation_id.startswith(ReservationType.DELIVERY):\n reservation = DeliveryReservation.query.filter(Reservation.reservation_id==reservation_id).first()\n reservation.parsed_delivery_time = VisitTime.query.get(reservation.delivery_time)\n reservation.parsed_delivery_option = 'Y' if reservation.delivery_option == ReservationDeliveryType.RESTORE else 'N'\n\n if request.method == 'POST':\n change_reservation()\n\n if request.method == 'DELETE':\n delete_reservation('delivery_reservations')\n\n return render_template('admin_delivery_reservation.html', page_title=u'예약 정보',\n page_subtitle='Reservation',\n reservation_detail=reservation)\n\n\n@admin.route('/user/', methods=['GET', 'POST'])\n@staff_required\ndef user_detail(user_id):\n user = User.query.get(user_id)\n if request.method == 'POST':\n name = request.form.get('name')\n email = request.form.get('email')\n address1 = request.form.get('address1')\n address2 = request.form.get('address2')\n status = request.form.get('status')\n\n if name and user.name != name:\n user.name = name\n if email and user.email != email:\n user.email = email\n if address1 and user.address1 != address1:\n user.address1 = address1\n if address2 and user.address2 != address2:\n user.address2 = address2\n if status and user.status != status:\n user.status = status\n database.session.commit()\n return render_template('admin_user.html', page_title=u'회원 정보',\n page_subtitle='User',\n user_detail=user)\n\n\n@admin.route('/delivery_reservations/', methods=['GET'])\n@staff_required\ndef delivery_reservations(page):\n paginate = DeliveryReservation.query.filter(\n DeliveryReservation.status == ReservationStatus.WAITING\n ).order_by(DeliveryReservation.created_at.desc()) \\\n .paginate(page, RESERVATIONS_PER_PAGE, False)\n\n for item in paginate.items:\n item.parsed_delivery_time = VisitTime.query.get(item.delivery_time)\n item.parsed_delivery_option = 'Y' if item.delivery_option == ReservationDeliveryType.RESTORE else 'N'\n\n return render_template('admin_delivery_reservations.html', page_title=u'배송',\n page_subtitle='Delivery Reservations',\n pagination=paginate\n )\n\n\n@admin.route('/restore_reservations/', methods=['GET'])\n@staff_required\ndef restore_reservations(page):\n paginate = RestoreReservation.query.filter(\n RestoreReservation.status == ReservationStatus.WAITING\n ).order_by(RestoreReservation.created_at.desc()) \\\n .paginate(page, RESERVATIONS_PER_PAGE, False)\n\n for item in paginate.items:\n item.parsed_delivery_time = VisitTime.query.get(item.delivery_time)\n item.parsed_revisit_option = 'Y' if item.revisit_option == RevisitOption.LATER else 'N'\n\n return render_template('admin_restore_reservations.html', page_title=u'재보관',\n page_subtitle='Restore Reservations',\n pagination=paginate\n )\n\n\n@admin.route('/new_reservations/', methods=['GET'])\n@staff_required\ndef new_reservations(page):\n paginate = NewReservation.query.filter(\n NewReservation.status == ReservationStatus.WAITING\n ).order_by(NewReservation.created_at.desc()) \\\n .paginate(page, RESERVATIONS_PER_PAGE, False)\n\n for item in paginate.items:\n parsed_binding_products = ''\n binding_products = json.loads(item.binding_products)\n for key in binding_products.keys():\n parsed_binding_products += '%s: ' % key\n parsed_binding_products += '%s ' % binding_products[key]\n item.parsed_binding_products = parsed_binding_products\n item.parsed_revisit_option = 'Y' if item.revisit_option == RevisitOption.LATER else 'N'\n\n return render_template('admin_new_reservations.html', page_title=u'신규픽업',\n page_subtitle='New Reservations',\n pagination=paginate\n )\n\n\n@admin.route('/', methods=['GET'])\n@staff_required\ndef admin_index():\n today = datetime.date.today()\n reservations_today = Reservation.query.filter(and_(Reservation.created_at >= today\n , Reservation.status == ReservationStatus.WAITING)).count()\n goods_expired_today = Goods.query.filter(and_(Goods.expired_at <= today,\n Goods.status == GoodsStatus.ACTIVE)).count()\n user_join_today = User.query.filter(User.created_at >= today).count()\n used_box_today = Box.query.filter(Box.status == BoxStatus.UNAVAILABLE).count()\n\n reservation_statistics = database.engine.execute(\n \"\"\"\n SELECT CONCAT(\n DATE_FORMAT(\n DATE_SUB(`created_at`, INTERVAL (DAYOFWEEK(`created_at`) - 1) DAY),\n \"%%m.%%d\"),\n ' ~ ',\n DATE_FORMAT(\n DATE_SUB(`created_at`, INTERVAL (DAYOFWEEK(`created_at`) - 7) DAY),\n \"%%m.%%d\"))\n AS `date`,\n COUNT(*) AS `count`\n FROM `reservation`\n GROUP BY `date`\n ORDER BY `date`\n \"\"\"\n )\n\n reservation_statistics_data = []\n for row in reservation_statistics:\n reservation_statistics_data.append({\n \"x\": row[0],\n \"y\": row[1]\n })\n\n return render_template('admin_dashboard.html', page_title=u'대시보드',\n page_subtitle='Overview',\n reservations_today=reservations_today,\n goods_expired_today=goods_expired_today,\n user_join_today=user_join_today,\n used_box_today=used_box_today,\n reservation_statistics=json.dumps(reservation_statistics_data))\n\n\n@admin.route('/users//', methods=['GET'])\n@staff_required\ndef search_user(keyword, page):\n paginate = User.query.filter(\n Match([User.name, User.address1, User.address2], keyword)\n ).order_by(User.created_at.desc()).paginate(page, USERS_PER_PAGE, False)\n\n return render_template('admin_users.html', page_title=u'회원정보',\n page_subtitle='Users',\n pagination=paginate,\n search_keyword=keyword\n )\n\n\n@admin.route('/users/', methods=['GET'])\n@staff_required\ndef admin_users(page):\n paginate = User.query.order_by(User.created_at.desc()).paginate(page, USERS_PER_PAGE, False)\n\n return render_template('admin_users.html', page_title=u'회원정보',\n page_subtitle='Users',\n pagination=paginate\n )\n\n\n@admin.route('/login', methods=['GET', 'POST'])\ndef admin_login():\n form = LoginForm()\n rsa_public_key = RSA_PUBLIC_KEY_BASE64\n\n if form.validate_on_submit():\n encoded_email = form.email.data\n encoded_password = form.password.data\n encoded_aes_key = request.form['decryptKey']\n encoded_aes_iv = request.form['iv']\n\n herebox_login_helper = HereboxLoginHelper(encoded_email, encoded_password,\n encoded_aes_key, encoded_aes_iv)\n\n try:\n decrypted_email, decrypted_password = herebox_login_helper.decrypt()\n query = database.session.query(User).filter(User.email == decrypted_email,\n User.status >= UserStatus.STAFF)\n user = query.first()\n\n if user.check_password(decrypted_password):\n flash(u'환영합니다')\n login_user(user)\n return redirect(url_for('admin.admin_index'))\n else:\n raise\n except:\n form.email.errors.append(u'이메일 주소 또는 비밀번호를 다시 확인해주세요.')\n\n form.email.data = ''\n response = make_response(render_template('admin_login.html', form=form))\n response.set_cookie('jsessionid', rsa_public_key, path='/admin/login')\n return response\n\n\n@admin.route('/reservation/accept', methods=['POST'])\n@staff_required\ndef accept_reservation():\n reservation_id = request.form.get('reservation_id')\n reservation = Reservation.query.filter(Reservation.reservation_id == reservation_id).first()\n if not reservation:\n return response_template(u'%s 주문을 찾을 수 없습니다.' % reservation_id, status=400)\n\n reservation.status = ReservationStatus.ACCEPTED\n reservation.updated_at = datetime.datetime.now()\n\n try:\n database.session.commit()\n except:\n return response_template(u'오류가 발생했습니다.', status=500)\n return response_template(u'정상 처리되었습니다.')\n\n\n@admin.route('/schedule/register/goods', methods=['POST'])\n@staff_required\ndef register_goods():\n schedule_id = request.form.get('schedule_id')\n schedule = Schedule.query.filter(Schedule.schedule_id == schedule_id).first()\n\n if not schedule:\n return response_template(u'%s 스케줄을 찾을 수 없습니다.' % schedule_id, status=400)\n\n reservation = schedule.reservation\n goods_type = request.form.get('goods_type') # A or B\n name = request.form.get('name')\n box_id = request.form.get('box_id')\n memo = request.form.get('memo')\n started_at = request.form.get('started_at')\n goods_photo = request.files.get('goods_photo')\n\n box = None\n if goods_type not in (GoodsType.STANDARD_BOX, GoodsType.NONSTANDARD_GOODS):\n return response_template(u'잘못된 규격입니다', status=400)\n\n if goods_type == GoodsType.STANDARD_BOX:\n box = Box.query.filter(Box.box_id == box_id, Box.status == BoxStatus.AVAILABLE).first()\n if not box:\n return response_template(u'%s 상자를 찾을 수 없습니다.' % box_id, status=400)\n\n box.status = BoxStatus.UNAVAILABLE\n\n try:\n started_at = datetime.datetime.strptime(started_at, \"%Y-%m-%d\")\n except:\n return response_template(u'잘못된 날짜형식입니다.', status=400)\n\n user_id = reservation.user_id\n expired_at = add_months(started_at, reservation.period)\n fixed_rate = reservation.fixed_rate\n\n attached_filepath = None\n if goods_photo:\n src_filename = goods_photo.filename\n last_filename = src_filename[src_filename.rindex('.') + 1:]\n filename = '%s_%s.%s' % (reservation.user_id, reservation.reservation_id, last_filename)\n attached_filepath = '%s/%s' % ('goods', filename)\n sync_upload_to_s3(filename, attached_filepath, goods_photo)\n\n new_goods = Goods(goods_type=goods_type,\n name=name,\n memo=memo,\n in_store=InStoreStatus.IN_STORE,\n box_id=box.id if box else None,\n user_id=user_id,\n started_at=started_at,\n expired_at=expired_at,\n fixed_rate=fixed_rate,\n photo=attached_filepath,\n status=GoodsStatus.ACTIVE)\n\n database.session.add(new_goods)\n reservation.goods.append(new_goods)\n\n try:\n database.session.commit()\n except:\n return response_template(u'오류가 발생했습니다.', status=500)\n\n if goods_type == GoodsType.STANDARD_BOX:\n new_goods.box_id = box.id\n\n try:\n database.session.commit()\n except:\n return response_template(u'오류가 발생했습니다.', status=500)\n return response_template(u'정상 처리되었습니다.')\n\n\n@admin.route('/schedule/complete', methods=['POST'])\n@staff_required\ndef complete_schedule():\n schedule_id = request.form.get('schedule_id')\n schedule = Schedule.query.filter(Schedule.schedule_id == schedule_id).first()\n\n if not schedule:\n return response_template(u'%s 스케줄을 찾을 수 없습니다.' % schedule_id, status=400)\n\n reservation = schedule.reservation\n if reservation.status == ReservationStatus.WAITING:\n return response_template(u'%s 주문이 접수된 상태이어야 합니다!' % reservation.reservation_id, status=400)\n\n if reservation.reservation_id.startswith(ReservationType.PICKUP_NEW):\n if (reservation.revisit_option == ReservationRevisitType.IMMEDIATE) or \\\n (reservation.revisit_option == ReservationRevisitType.LATER and\n schedule.schedule_id.endswith('_1')):\n goods_count = len(reservation.goods)\n if goods_count == 0:\n return response_template(u'%s 주문에 등록된 물품이 없습니다!' % reservation.reservation_id, status=400)\n for goods in reservation.goods:\n incoming_history = Incoming(goods.id)\n database.session.add(incoming_history)\n schedules = reservation.schedules\n for schedule in schedules:\n if schedule.schedule_type == ScheduleType.PICKUP_DELIVERY or \\\n schedule.schedule_type == ScheduleType.PICKUP_RECOVERY:\n schedule.status = ScheduleStatus.COMPLETE\n\n elif reservation.reservation_id.startswith(ReservationType.PICKUP_AGAIN):\n if (reservation.revisit_option == ReservationRevisitType.IMMEDIATE) or \\\n (reservation.revisit_option == ReservationRevisitType.LATER and\n schedule.schedule_id.endswith('_1')):\n for goods in reservation.goods:\n goods.in_store = InStoreStatus.IN_STORE\n incoming_history = Incoming(goods.id)\n database.session.add(incoming_history)\n schedules = reservation.schedules\n for schedule in schedules:\n if schedule.schedule_type == ScheduleType.RESTORE_DELIVERY or \\\n schedule.schedule_type == ScheduleType.RESTORE_RECOVERY:\n schedule.status = ScheduleStatus.COMPLETE\n\n elif reservation.reservation_id.startswith(ReservationType.DELIVERY):\n for goods in reservation.goods:\n if reservation.delivery_option == ReservationDeliveryType.EXPIRE:\n if goods.box_id != None:\n box = Box.query.get(goods.box_id)\n box.status = BoxStatus.AVAILABLE\n goods.box_id = None\n goods.status = GoodsStatus.EXPIRED\n else:\n goods.in_store = InStoreStatus.OUT_OF_STORE\n outgoing_history = Outgoing(goods.id)\n database.session.add(outgoing_history)\n\n schedule.status = ScheduleStatus.COMPLETE\n schedule.updated_at = datetime.datetime.now()\n\n try:\n database.session.commit()\n except:\n return response_template(u'오류가 발생했습니다.', status=500)\n return response_template(u'정상 처리되었습니다.')\n\n\n","repo_name":"iBluemind/herebox","sub_path":"hereboxweb/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":38870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40011524659","text":"# pip install phonenumbers\n# pip install folium\n# pip install opencage\n\nimport folium\nimport phonenumbers\nfrom phonenumbers import timezone,geocoder,carrier\nnumber= input(\"Enter Your Phone Number with Country code (+__) :\")\n\nphone = phonenumbers.parse(number)\ntime=timezone.time_zones_for_number(phone)\ncarrier= carrier.name_for_number(phone,\"en\")\nreg=geocoder.description_for_number(phone,\"en\")\n\nkey = '2a14c404034b4561b8e1bfe6c485b312'\n\nfrom opencage.geocoder import OpenCageGeocode\n\ngeocoder = OpenCageGeocode(key)\nquery = str(reg)\nres = geocoder.geocode(query)\n\nlat = res[0]['geometry']['lat']\nlng = res[0]['geometry']['lng']\n\nMymap = folium.Map(location=[lat, lng], zone_start = 9)\n\nfolium.Marker([lat, lng], popup=reg).add_to(Mymap)\n\nMymap.save(\"location_number.html\")\n\n\nprint(phone)\nprint(time)\nprint(carrier)\nprint(reg)\n# print(res)\nprint(lng,lat)\n","repo_name":"shubhamaradhayan/Get-Details-Of-PhoneNumber-using-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28068462665","text":"import os\nos.environ['KERAS_BACKEND'] = 'tensorflow'\nimport pickle\nimport numpy as np\n\nimport jsonlines\nimport pandas as pd\nfrom keras.models import load_model\nfrom keras.utils import np_utils\nfrom sklearn.metrics import precision_recall_fscore_support, confusion_matrix\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nintToLabel = {}\nintToLabel[0] = \"Support\"\nintToLabel[1] = \"Refute\"\nintToLabel[2] = \"Not Enough Info\"\n\nclass testModel:\n\n\tdef __init__(self, max_claims_length, max_sents_length, model_path, num_classes, dataset_name=None):\n\n\t\tself.max_claims_length = max_claims_length\n\t\tself.max_sents_length = max_sents_length\n\t\tself.data_name = dataset_name\n\t\tself.model_path = model_path\n\t\tself.num_classes = num_classes\n\n\t\tif dataset_name == \"fever_full_binary_dev\" or dataset_name == \"fever_full_binary_dev_claim_labelling\":\n\n\t\t\tif dataset_name == \"fever_full_binary_dev\":\n\t\t\t\ttest_data = \"/scratch/kkuma12s/github/fact-validation/thesis-code/Proof_Extraction/data/fever-full/\"+dataset_name+\".jsonl\"\n\t\t\telse:\n\t\t\t\tprint (\"data set name \", dataset_name)\n\t\t\t\ttest_data = \"/scratch/kkuma12s/github/fact-validation/thesis-code/Proof_Extraction/data/fever-full/claim_classification/\"+dataset_name+\".jsonl\"\n\n\t\t\tself.claims = []\n\t\t\tself.sents = []\n\t\t\tself.labels = []\n\n\t\t\twith jsonlines.open(test_data, mode='r') as f:\n\t\t\t\ttmp_dict = {}\n\t\t\t\tfor example in f:\n\t\t\t\t\tself.claims.append(example[\"claim\"])\n\t\t\t\t\tself.sents.append(example[\"sentence\"])\n\t\t\t\t\tself.labels.append(example[\"label\"])\n\n\t\t\t\ttmp_dict = {'claim':self.claims, 'sentence':self.sents, 'label':self.labels}\n\t\t\t\tself.test_data = pd.DataFrame(data=tmp_dict)\n\n\t\telse:\n\t\t\tself.test_data = pickle.load(open(\"./elmo/datasets/test_\"+str(self.data_name)+\".pkl\", \"rb\"))\n\n\tdef get_results_on_test_data(self, preprocess):\n\n\t\t# embeddings_name = \"fever_3\"\n\t\tembeddings_name = \"fever_full_binary_dev\"\n\t\t# embeddings are also compressed in elmo directory \n\t\t# claim_embeddings = pickle.load(open(\"/scratch/kkuma12s/elmo_embeddings/test_claim_elmo_emb_\"+embeddings_name+\".pkl\", \"rb\"))\n\t\t# sents_embeddings = pickle.load(open(\"/scratch/kkuma12s/elmo_embeddings/test_sents_elmo_emb_\"+embeddings_name+\".pkl\", \"rb\"))\n\t\t\n\t\tclaim_embeddings = pickle.load(open(\"/scratch/kkuma12s/elmo_embeddings/test_claim_elmo_emb_\"+embeddings_name+\".pkl\", \"rb\"))\n\t\tsents_embeddings = pickle.load(open(\"/scratch/kkuma12s/elmo_embeddings/test_sents_elmo_emb_\"+embeddings_name+\".pkl\", \"rb\"))\n\n\n\t\tif self.data_name == 'fever_sup':\n\t\t\tlabels = self.test_data[\"lablel\"]\n\t\t\t\n\t\telse:\n\t\t\tlabels = self.test_data[\"label\"]\n\n\t\ttest_claims_data, test_sents_data= preprocess.to_padding(claim_embeddings, sents_embeddings, labels, self.max_claims_length, self.max_sents_length)\n\t\t\n\t\tprint (\"test claims data shape \", test_claims_data.shape)\n\t\tmodel = load_model(self.model_path)\n\t\tbatch = 64\n\n\t\tif self.num_classes > 1:\n\t\t\ttest_labels = np.asarray(labels)\n\t\t\ttest_labels = np_utils.to_categorical(test_labels, self.num_classes)\n\t\t\tavg = 'weighted'\n\t\telse:\n\t\t\ttest_labels = np.asarray(labels)\n\t\t\tavg = 'binary'\n\n\t\tloss, accuracy = model.evaluate({'claims':test_claims_data, 'sentences': test_sents_data} ,\n\t\t \t\t\t\t\t\t\t\t\ttest_labels)\n\t\tprint (\"test loss \", loss)\n\t\tprint (\"test accuracy \", accuracy)\n\n\t\ty_pred = (np.asarray(model.predict({'claims': test_claims_data, 'sentences': test_sents_data} , batch_size=batch))).round()\n\t\t# print (\"y pred \", y_pred)\n\t\tprint (\"score of lstm \", precision_recall_fscore_support(test_labels, y_pred, average=avg)) \n\n\t\twith open(\"elmo_test_results.log\", \"w\") as f:\n\t\t\tf.write(\"test loss \"+str(loss))\n\t\t\tf.write(\"\\r\\n test accuracy \"+ str(accuracy))\n\t\t\tf.write(\"\\r\\n score of lstm \"+ str(precision_recall_fscore_support(test_labels, y_pred, average=avg)))\n\n\n\t\t# convert everything in format for plot confusion matrix\n\t\tplot_y_pred = np.argmax(y_pred, axis=1).tolist()\n\t\tplot_y_true = np.argmax(test_labels, axis=1).tolist()\n\n\t\ty_pred = [intToLabel[pred] for pred in plot_y_pred]\n\t\tt_labels = [intToLabel[t_pred] for t_pred in plot_y_true]\n\t\tmethod=\"ELMO\"\n\t\tfilename = \"elmo_cm.pdf\"\n\t\tself.save_confusion_matrix(t_labels, y_pred, filename, method)\n\n\n\tdef save_confusion_matrix(self, y_true, y_pred, filename, method, ymap=None, figsize=(7,7)):\n\n\t\t# confusion_Matrix = confusion_matrix(y_true, y_pred)\n\t\tclass_labels = [\"Support\", \"Refute\", \"Not Enough Info\"]\n\t\t# class_labels = [\"Support\", \"Refute\"]\n\n\t\tif ymap is not None:\n\t\t\ty_pred = [ymap[yi] for yi in y_pred]\n\t\t\ty_true = [ymap[yi] for yi in y_true]\n\t\t\t# labels = [ymap[yi] for yi in class_labels]\n\t\tcm = confusion_matrix(y_true, y_pred, labels=class_labels)\n\t\t\n\t\tcm_sum = np.sum(cm, axis=1, keepdims=True)\n\t\tcm_perc = cm / cm_sum.astype(float) * 100\n\t\tannot = np.empty_like(cm).astype(str)\n\t\tnrows, ncols = cm.shape\n\n\t\tfor i in range(nrows):\n\t\t\tfor j in range(ncols):\n\t\t\t\tc = cm[i, j]\n\t\t\t\tp = cm_perc[i, j]\n\t\t\t\tif i == j:\n\t\t\t\t s = cm_sum[i]\n\t\t\t\t annot[i, j] = '%.1f%%\\n%d/%d' % (p, c, s)\n\t\t\t\telif c == 0:\n\t\t\t\t annot[i, j] = ''\n\t\t\t\telse:\n\t\t\t\t annot[i, j] = '%.1f%%\\n%d' % (p, c)\n\n\t\tcm = pd.DataFrame(cm, index=class_labels, columns=class_labels)\n\t\tcm.index.name = 'Actual'\n\t\tcm.columns.name = 'Predicted'\n\t\tfig, ax = plt.subplots(figsize=figsize)\n\t\tsns.heatmap(cm, annot=annot, fmt='', ax=ax, annot_kws={\"size\": 20})\n\t\tplt.title('Confusion Matrix of Multi-class classifier using '+str(method))\n\t\tplt.savefig(filename)\n","repo_name":"DeFacto/EvidenceRetrieval-ClaimClassification","sub_path":"models/DeepLearningModels/elmo/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"30693491161","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom gevent import monkey\nmonkey.patch_all()\nimport sys\nsys.path.append(\"../\")\nimport time\nimport re\n# import pickle\nfrom random import randint\nfrom urllib.parse import unquote\nfrom urllib.parse import urlparse\nfrom lxml import etree\nimport json\n# import hashlib\n\nimport requests\nfrom retrying import retry\n# from selenium import webdriver\n\n# 破解验证码\nfrom captcha_crack import amazon_captcha_crack\nfrom utils.util import UaPond\nfrom utils.util import Sleep\nfrom conf.setting import BASE_TYPE\nfrom conf.setting import PROXY_HTTP, PROXY_HTTPS\nfrom conf.setting import PROXY_VERIFY\nfrom Crawler.BaseParser import BaseParser\nfrom Crawler.DataOutput import DataOutput\n# from Crawler.reviewsParser import ReviewsParser\nfrom utils.decorator import timer\n\n\nclass BaseDownload:\n def __init__(self):\n pass\n\n\ndef is_RobotCheck(html_code):\n pattern = re.compile('Robot Check', re.S)\n RobotCheck = pattern.findall(html_code)\n if len(RobotCheck) > 0:\n return True\n return False\n\n@timer\n@retry(stop_max_attempt_number=15)\ndef get_html_useRequest(url, ua, ip, cookie, debug_log, referer, ipQ, urlQ=None, timeout=90, retry=1, goodsUrl='', url_type='', asin=''):\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'close',\n 'User-Agent': ua,\n 'Host': 'www.amazon.com',\n 'Referer': referer,\n 'Upgrade-Insecure-Requests': '1',\n 'Cache-Control': 'max-age=0',\n }\n proxy = {'https': PROXY_HTTPS, 'http': PROXY_HTTP}\n print(proxy)\n html = ''\n cookies = {}\n status_code = 0\n session = requests\n print('\\nheaders: ', headers)\n is_error = False\n if url.startswith('https://www.amazon.com') or url.startswith('http://www.amazon.com'):\n try:\n get_parmas = dict(url=url, headers=headers, proxies=proxy, timeout=timeout)\n if 'proxy.crawlera.com' in proxy.get('https', ''):\n get_parmas['verify'] = PROXY_VERIFY\n response = session.get(**get_parmas)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200 or status_code == 302 or status_code == 404:\n response.encoding = 'utf-8'\n responseCookies = response.cookies\n if not cookie:\n cookies = responseCookies\n if status_code == 404:\n if url_type == 'goods' and asin:\n DataOutput.record_not_found_goods(asin)\n if url_type == 'tosell' and asin:\n DataOutput.record_not_found_tosell(asin)\n html = response.text\n else:\n html = response.text\n if \"Enter the characters you see below\" in html:\n raise Exception(\"Exception: Captcha\")\n if \"Enter the characters you see below\" in html:\n raise Exception(\"Exception: block\")\n if 'proxy.crawlera.com' not in proxy.get('https', ''):\n time.sleep(3)\n return html, cookies, is_error\n except Exception as e:\n if status_code != 404:\n is_error = True\n debug_log.error('[%s] get_html_useRequest下载 [%s] 时 [%s]' % (ip, url, e))\n if \"NotFound\" in str(e):\n raise Exception(\"NOT_FOUND\")\n else:\n debug_log.error('[%s] get_html_useRequest下载 [%s] url不合法' % (ip, url))\n return html, cookies, is_error\n\n\"\"\"\n@retry(stop_max_attempt_number=10)\ndef psot_useRequest(url, post_data, ua, ip, cookie, debug_log, referer, ipQ=None, urlQ=None, timeout=10, retry=0):\n '''cookie不能为空'''\n headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'accept-language': 'en-US,en;q=0.5',\n 'accept-encoding': 'gzip, deflate, br',\n 'user-agent': ua,\n 'host': 'www.amazon.com',\n 'referer': referer,\n 'cache-control': 'max-age=0',\n 'content-type': 'application/x-www-form-urlencoded',\n 'content-length': '%s' % (randint(510, 690)),\n 'upgrade-insecure-requests': '1',\n }\n proxy = {'https': PROXY_HTTPS, 'http': PROXY_HTTP}\n is_error = False\n html = ''\n session = requests.Session()\n session.cookies = cookie\n if url.startswith('https://www.amazon.com') or url.startswith('http://www.amazon.com'):\n try:\n json_data = json.dumps(post_data)\n get_parmas = dict(url=url, headers=headers, proxies=proxy, data=post_data, json=json_data, timeout=60)\n if 'proxy.crawlera.com' in proxy.get('https', ''):\n get_parmas['verify'] = PROXY_VERIFY\n response = session.post(**get_parmas)\n status_code = response.status_code\n if status_code == 200 or status_code == 302:\n response.encoding = 'utf-8'\n html = response.text\n if \"Enter the characters you see below\" in html:\n raise Exception(\"Exception: Captcha\")\n if \"Enter the characters you see below\" in html:\n raise Exception(\"Exception: block\")\n return html, is_error\n except Exception as e:\n is_error = True\n debug_log.error('[%s] psot_useRequest 下载 [%s] 时 [%s]' % (ip, url, e))\n if \"NotFound\" in str(e):\n raise Exception(\"NOT_FOUND\")\n else:\n debug_log.error('[%s] psot_useRequest下载 [%s] url不合法' % (ip, url))\n\n return html, is_error\n\n\ndef confirm_html(html_code, url, ua, ip, cookie, debug_log, referer, ipQ, retry=2):\n # 验证是否验证码\n result_html = html_code\n if is_RobotCheck(html_code):\n # 获取验证码参数\n action, amzn, amznr = get_verify_parameter(html_code, url)\n # 拼接图片请求referer\n referer1 = 'https://www.amazon.com%s?amzn=%s&amzn-r=%s&field-keywords=' % \\\n (action, amzn, amznr)\n # 先识别验证码图片\n debug_log.info('ip[%s] 第%s次破解验证码程序启动, 原url[%s], 验证码url[%s]' % (ip, 3 - retry + 1, url, referer1))\n verify_string = get_verify_img(html_code, ua, ip, cookie, debug_log, referer1, ipQ)\n if verify_string:\n print('verify_string: ', verify_string)\n # 拼接请求url\n # %s1: action, %s2: amzn, %s3: zmznr, %s4: verify_string,\n verify_url = 'https://www.amazon.com%s?amzn=%s&amzn-r=%s&field-keywords=%s' % \\\n (action, amzn, amznr, verify_string)\n print('verify_url: ', verify_url)\n result_html = get_verify_html(verify_url, ua, ip, cookie, debug_log, referer, ipQ)\n if retry > 0:\n return confirm_html(result_html, url, ua, ip, cookie, debug_log, referer, ipQ, retry=(retry - 1))\n else:\n if retry == 3:\n debug_log.info('url[%s] 没有遇到验证码 ip[%s]' % (url, ip))\n elif retry == 0:\n if is_RobotCheck(result_html):\n debug_log.war('ip[%s] 第%s次破解验证码依然失败, url[%s], 放弃破解' % (ip, 2 - retry + 1, url))\n else:\n debug_log.war('ip[%s] 第%s次破解验证码成功, url[%s]' % (ip, 2 - retry + 1, url))\n else:\n debug_log.war('ip[%s] 第%s次破解验证码成功, url[%s]' % (ip, 2 - retry + 1, url))\n\n\n return result_html\n\n\ndef get_verify_html(url, ua, ip, cookie, debug_log, referer, ipQ, timeout=10):\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'User-Agent': ua,\n 'Host': 'www.amazon.com',\n 'Referer': referer,\n 'Upgrade-Insecure-Requests': '1',\n }\n html = ''\n proxy = {'https': 'https://%s' % (ip), 'http': 'http://%s' % (ip)}\n if BASE_TYPE == 'develop':\n proxy = None\n print('request cookie: ', cookie)\n session = requests.Session()\n if cookie:\n session.cookies = cookie\n cookie_dict = requests.utils.dict_from_cookiejar(cookie)\n print('\\n', cookie_dict, type(cookie_dict), '\\n')\n cookies_list = []\n for k, v in cookie_dict.items():\n kv = '%s=%s' % (k, v)\n cookies_list.append(kv)\n cookie_str = '; '.join(cookies_list)\n headers['Cookie'] = cookie_str\n print('\\nCookie_dict: ', cookie_str, '\\n')\n print('\\nheaders: ', headers)\n SLEEP = Sleep.randint_sleep(9, 16)\n print('\\nrequest休眠时间: ', SLEEP, type(SLEEP))\n time.sleep(SLEEP)\n # ipQ.record_use_times(ip)\n if url.startswith('https://www.amazon.com') or url.startswith('http://www.amazon.com'):\n response = session.get(url, headers=headers, proxies=proxy, timeout=timeout)\n status_code = response.status_code\n if status_code == 200 or status_code == 302:\n response.encoding = 'utf-8'\n html = response.text\n else:\n debug_log.error('[%s] get_verify_html下载 [%s] url不合法' % (ip, url))\n\n return html\n\n\ndef get_verify_parameter(html_code, url):\n action = '/errors/validateCaptcha'\n htmEtree = etree.HTML(html_code)\n amzn = htmEtree.xpath('//input[@name=\"amzn\"]/@value')\n print('amzn: ', amzn)\n if amzn:\n amzn = unquote(amzn[0])\n else:\n amzn = ''\n\n if 'field-keywords' in url:\n amznr = htmEtree.xpath('//input[name=\"amzn-r\"]/@value')\n if amznr:\n amznr = unquote(amznr[0])\n else:\n url_parse = urlparse(url)\n amznr = unquote(url_parse.path + '?' +url_parse.query)\n\n\n return action, amzn, amznr\n\n\ndef get_img(url, ua, ip, cookie, debug_log, referer, ipQ, timeout=10):\n '''cookie不能为空'''\n headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'accept-language': 'en-US,en;q=0.5',\n 'accept-encoding': 'gzip, deflate, br',\n 'user-agent': ua,\n 'cache-control': 'max-age=0',\n 'content-type': 'application/x-www-form-urlencoded',\n 'upgrade-insecure-requests': '1',\n }\n proxy = {'https': 'https://%s' % (ip), 'http': 'http://%s' % (ip)}\n if BASE_TYPE == 'develop':\n proxy = None\n # print('img cookie: ', cookie)\n session = requests.Session()\n session.cookies = cookie\n cookie_dict = requests.utils.dict_from_cookiejar(cookie)\n # print('\\nimg cookie_dict: ', cookie_dict, type(cookie_dict), '\\n')\n cookies_list = []\n for k, v in cookie_dict.items():\n kv = '%s=%s' % (k, v)\n cookies_list.append(kv)\n cookie_str = '; '.join(cookies_list)\n headers['cookie'] = cookie_str\n SLEEP = Sleep.random_sleep() + 3 \n print('\\nget img 休眠时间: ', SLEEP, type(SLEEP))\n time.sleep(SLEEP)\n response = session.get(url, headers=headers, proxies=proxy, timeout=timeout)\n status_code = response.status_code\n print('img status_code', status_code)\n img = ''\n if status_code == 200 or status_code == 302:\n response.encoding = 'utf-8'\n img = response.content\n # print('img response.content: ', len(img), type(img))\n # print('\\nimg response.cookies: ', response.cookies, '\\n')\n return img\n\n\ndef get_verify_img(html_code, ua, ip, cookie, debug_log, referer, ipQ):\n xpath_list = [\n '//div[@class=\"a-row a-text-center\"]/img/@src',\n '//div[@class=\"a-box\"]/div/div/img/@src',\n '//div[@class=\"a-box\"]//img/@src',\n ]\n url = ''\n img_string = ''\n img = None\n img_url = BaseParser.get_new_data(xpath_list=xpath_list, html_code=html_code)\n if len(img_url) > 0:\n url = img_url[0]\n if url and cookie:\n img = get_img(url, ua, ip, cookie, debug_log, referer, ipQ)\n if img:\n print('img: ', len(img), type(img))\n img_str_list= amazon_captcha_crack.main(img)\n img_string = ''.join(img_str_list)\n return img_string\n\n\"\"\"\n\nif __name__ == '__main__':\n time1 = time.time()\n # from pprint import pprint\n #\n ip = '192.126.168.2:3128'\n proxy = dict(\n https='https://%s' % (ip),\n )\n print(proxy)\n # html = get_html('https://www.amazon.com/dp/B01C4N6IBA', ip, proxy=proxy)\n # print(type(html))\n # print(len(html))\n # parser = HtmlParser()\n # pprint(parser.parser_goods(html, 'B01C4N6IBA'))\n time2 = time.time()\n print(time2 - time1)\n # from conf.setting import DB_CONFIG, REDIS_CONFIG, BASE_DIR\n #\n UA = UaPond.get_new_ua()\n # print(DB_CONFIG)\n # print(REDIS_CONFIG)\n # print(BASE_DIR)\n print(UA)\n\n","repo_name":"xusu12/spider_exercise","sub_path":"amazonSpider1.1/Spider/Crawler/Downloader.py","file_name":"Downloader.py","file_ext":"py","file_size_in_byte":12909,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"70019356414","text":"\"\"\"\n\tExiste uma outra maneira bem legal de trabalhar com argumentos padroes de funcoes\n\tQuando vc define argumentos padroes, ao inserir valores a eles vc tera de obedece a sequencia em que\n\ta funcao foi definida. Vc pode inserir argumento em qualquer sequencia usando palavras-chaves mais o valor\n\"\"\"\n# nesta funcao eu defino tres argumentos padroes, url, Flash e CGI\n# note que name nao e um argumento padrao, omitir um valor para ele sera considerado um erro de compilacao\ndef gerar_webSite(name, url = \"www.deitel.com\", Flash = \"nao\", CGI = \"sim\"):\n\tprint(\"Gerando site pedido pelo\",name,\"usando a url\",url)\n\n\tif Flash == \"sim\":\n\t\tprint(\"Flash esta ativado\")\n\tif CGI == \"sim\":\n\t\tprint(\"Scripts CGI's estao ativados\")\n\tprint(\"\") # apenas pula uma linha\n\n\ngerar_webSite(\"Deitel\")\n\n# note que aqui pela definicao da funcao, o paramero url vem antes que Flash, digitando explicitamente o nome dos paramentros e\n# os valores de seus argumentos, voce nao precisara obedecer uma sequencia\n\ngerar_webSite(\"Deitel\", Flash = \"sim\", url = \"www.deitel.com/new\")\n\n# nao precisa ter um argumento padrao para acessar o parametro por meio de palavras-chaves\n# a unica obrigacao, ja que name nao tem um argumento padrao, e que name tem ao menos um valor\ngerar_webSite(CGI = \"nao\", name = \"Bruno\")\n\n\n# gerar_webSite(CGI=\"nao\",Flash=\"sim\") # claramente essa linha de código gerará um erro\n\t\n","repo_name":"BrunoCerberus/Algoritmo","sub_path":"argumentos_com_palavraschaves.py","file_name":"argumentos_com_palavraschaves.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"9167298713","text":"# -*- coding: utf-8 -*-\nimport sys\nimport time\nfrom random import shuffle\nimport telebot\nTOKEN = \"424408336:AAHFZIhJSk-yn3hEwLE6YcA_rCtxV4FZMrg\"\n\nbot = telebot.TeleBot(TOKEN)\n\nclass Gamer:\n firstName = ''\n lastName = ''\n id = 0\n def __init__(self, _firstName, _lastName, _id):\n self.firstName = _firstName or ''\n self.lastName = _lastName or ''\n self.id = _id or 0\n \n def check(self, _id):\n return (_id == self.id)\n\n def getFullName(self):\n return u' '.join((self.firstName, self.lastName))\n\n def __str__(self):\n return u' '.join((self.firstName, self.lastName))\n #return self.firstName + ' ' + self.lastName\n\nclass GameSession:\n gamers = []\n ctime = None\n alive = True\n chatID = None\n def __init__(self, _chatId): \n self.chatID = _chatId\n self.ctime = time.time()\n self.gamers = []\n self.alive = True \n \n def sendMsg(self, msg):\n bot.send_message(self.chatID, msg)\n\n def addNewPlayer(self, message):\n #вдруг нажали + дважды\n for g in self.gamers:\n if g.check(message.from_user.id):\n return\n pass\n first = (len(self.gamers) == 0) \n self.gamers.append(Gamer(message.from_user.first_name,message.from_user.last_name, message.from_user.id))\n if first:\n self.sendMsg('Собираем команды...')\n else:\n self.printCommandList()\n\n def removePlayer(self, message):\n pl = None\n for g in self.gamers:\n if g.check(message.from_user.id):\n pl = g\n if pl != None:\n ind = self.gamers.index(pl)\n try:\n self.sendMsg('Игрок отказался ('+ pl.getFullName() +')')\n except UnicodeDecodeError:\n self.sendMsg('Игрок отказался (ошибка при отображении имени)')\n del self.gamers[ind] \n\n def isAlive(self):\n if (time.time() - self.ctime) > 600:\n self.alive = False\n return self.alive\n\n def getGamers(self):\n return self.gamers\n\n def getGamersLen(self):\n return len(self.gamers)\n\n def getSessionOwner(self):\n if len(self.gamers) == 0:\n return None\n return self.gamers[0] \n\n def printCommandList(self):\n result = 'Собрали '+ str(len(self.gamers)) +': ' \n try:\n for g in self.gamers:\n result = result + g.getFullName() + '; '\n except UnicodeDecodeError:\n self.sendMsg('printCommandList -> UnicodeDecodeError')\n except:\n self.sendMsg('printCommandList -> error')\n self.sendMsg(result)\n \n def random(self):\n #рандомим \n first = self.gamers[0]\n del self.gamers[0]\n shuffle(self.gamers) \n playstr = 'Играют:\\n' + first.getFullName() + '\\n'\n index = 0\n for g in self.gamers:\n if index == 3:\n playstr = playstr + 'Отдыхают:\\n'\n playstr = playstr + g.getFullName() + '\\n'\n index = index + 1\n \n self.sendMsg(playstr)\n self.alive = False\n\ngs = None\n\ndef getGS(message):\n global gs\n if (gs == None) or (gs.isAlive() == False):\n gs = GameSession(message.chat.id)\n return gs\n\ndef deleteGS():\n global gs\n del gs\n gs = None\n\n@bot.message_handler(commands=['help'])\ndef commandHelp(message):\n bot.send_message(message.chat.id, 'Привет! Собираем команду командой \"/?\". Ставим \"/+\" для участия. Для рандома введите команду /random. Передумали? Команда /-. Текущую пати смотрим по команде /team. Если что-то пошло не так - можно использовать команду /clear. Так победим!')\n\n@bot.message_handler(commands=['clear'])\ndef commandClear(message):\n gs = getGS(message)\n owner = gs.getSessionOwner()\n if (owner != None) and (owner.id == message.from_user.id ):\n deleteGS()\n bot.send_message(message.chat.id, 'Убил текущую сессию')\n\t\n@bot.message_handler(commands=['random'])\ndef commandRandom(message):\n gs = getGS(message)\n #проверка на меньше 4\n if gs.getGamersLen() < 5:\n bot.send_message(message.chat.id, 'Не хватает игроков! Нужно 5 и более.')\n return\n owner = gs.getSessionOwner()\n if (owner == None) or (owner.id != message.from_user.id ):\n bot.send_message(message.chat.id, 'Рандомить может только хозяин сессии.')\n return\n #рандомим \n gs.random() \n deleteGS()\n\n@bot.message_handler(commands=['?', 'new', '+', 'play'])\ndef commandPlay(message):\n gs = getGS(message)\n gs.addNewPlayer(message)\n\n@bot.message_handler(commands=['minus','-'])\ndef commandMinus(message):\n gs = getGS(message)\n gs.removePlayer(message)\n\n@bot.message_handler(commands=['team'])\ndef commandTeam(message):\n gs = getGS(message)\n gs.printCommandList()\n\n@bot.message_handler(commands=['killbot'])\ndef commandExit(message):\n bot.send_message(message.chat.id, 'Kill Bot')\n sys.exit()\n\n@bot.message_handler(commands=['dbg'])\ndef commandDbg(message):\n bot.send_message(message.chat.id, message)\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","repo_name":"grammidin4eg/short_projects","sub_path":"telegram_tennis_random_bot/TennisRandomBot.py","file_name":"TennisRandomBot.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1329201049","text":"import errno\n\nimport rospy\n\nfrom baxter_core_msgs.msg import (\n CameraControl,\n CameraSettings,\n)\nfrom baxter_core_msgs.srv import (\n CloseCamera,\n ListCameras,\n OpenCamera,\n)\n\n\nclass CameraController(object):\n \"\"\"\n Interface class for controlling camera settings on the Baxter robot.\n \"\"\"\n\n # Valid resolutions\n MODES = [\n (1280, 800),\n (960, 600),\n (640, 400),\n (480, 300),\n (384, 240),\n (320, 200),\n ]\n\n # Used to represent when the camera is using automatic controls.\n # Valid for exposure, gain and white balance.\n CONTROL_AUTO = -1\n\n def __init__(self, name):\n \"\"\"\n Constructor.\n\n @param name: camera identifier. You can get a list of valid\n identifiers by calling the ROS service /cameras/list.\n\n Expected names are right_hand_camera, left_hand_camera\n and head_camera. However if the cameras are not\n identified via the parameter server, they are simply\n indexed starting at 0.\n \"\"\"\n self._id = name\n\n list_svc = rospy.ServiceProxy('/cameras/list', ListCameras)\n rospy.wait_for_service('/cameras/list', timeout=10)\n if not self._id in list_svc().cameras:\n raise AttributeError(\n (\"Cannot locate a service for camera name '{0}'. \"\n \"Close a different camera first and try again.\".format(self._id)))\n\n self._open_svc = rospy.ServiceProxy('/cameras/open', OpenCamera)\n self._close_svc = rospy.ServiceProxy('/cameras/close', CloseCamera)\n\n self._settings = CameraSettings()\n self._settings.width = 320\n self._settings.height = 200\n self._settings.fps = 20\n self._open = False\n\n def _reload(self):\n self.open()\n\n def _get_value(self, control, default):\n lookup = [c.value for c in self._settings.controls if c.id == control]\n try:\n return lookup[0]\n except IndexError:\n return default\n\n def _set_control_value(self, control, value):\n lookup = [c for c in self._settings.controls if c.id == control]\n try:\n lookup[0].value = value\n except IndexError:\n self._settings.controls.append(CameraControl(control, value))\n\n @property\n def resolution(self):\n \"\"\"\n Camera resolution as a tuple. (width, height). Valid resolutions are\n listed as tuples in CameraController.MODES\n \"\"\"\n return (self._settings.width, self._settings.height)\n\n @resolution.setter\n def resolution(self, res):\n res = tuple(res)\n if len(res) != 2:\n raise AttributeError(\"Invalid resolution specified\")\n\n if not res in self.MODES:\n raise ValueError(\"Invalid camera mode %dx%d\" % (res[0], res[1]))\n\n self._settings.width = res[0]\n self._settings.height = res[1]\n self._reload()\n\n @property\n def fps(self):\n \"\"\"\n Camera frames per second\n \"\"\"\n return self._settings.fps\n\n @fps.setter\n def fps(self, fps):\n self._settings.fps = fps\n self._reload()\n\n @property\n def exposure(self):\n \"\"\"\n Camera exposure. If autoexposure is on, returns\n CameraController.CONTROL_AUTO\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_EXPOSURE,\n self.CONTROL_AUTO)\n\n @exposure.setter\n def exposure(self, exposure):\n \"\"\"\n Camera Exposure. Valid range is 0-100 or CameraController.CONTROL_AUTO\n \"\"\"\n exposure = int(exposure)\n if (exposure < 0 or exposure > 100) and exposure != self.CONTROL_AUTO:\n raise ValueError(\"Invalid exposure value\")\n\n self._set_control_value(CameraControl.CAMERA_CONTROL_EXPOSURE,\n exposure)\n self._reload()\n\n @property\n def gain(self):\n \"\"\"\n Camera gain. If autogain is on, returns CameraController.CONTROL_AUTO\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_GAIN,\n self.CONTROL_AUTO)\n\n @gain.setter\n def gain(self, gain):\n \"\"\"\n Camera gain. Range is 0-79 or CameraController.CONTROL_AUTO\n \"\"\"\n gain = int(gain)\n if (gain < 0 or gain > 79) and gain != self.CONTROL_AUTO:\n raise ValueError(\"Invalid gain value\")\n\n self._set_control_value(CameraControl.CAMERA_CONTROL_GAIN, gain)\n self._reload()\n\n @property\n def white_balance_red(self):\n \"\"\"\n White balance red. If autocontrol is on, returns\n CameraController.CONTROL_AUTO\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_WHITE_BALANCE_R,\n self.CONTROL_AUTO)\n\n @white_balance_red.setter\n def white_balance_red(self, value):\n \"\"\"\n White balance red. Range is 0-4095 or CameraController.CONTROL_AUTO\n \"\"\"\n value = int(value)\n if (value < 0 or value > 4095) and value != self.CONTROL_AUTO:\n raise ValueError(\"Invalid white balance value\")\n\n self._set_control_value(CameraControl.CAMERA_CONTROL_WHITE_BALANCE_R,\n value)\n self._reload()\n\n @property\n def white_balance_green(self):\n \"\"\"\n White balance green. If autocontrol is on, returns\n CameraController.CONTROL_AUTO\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_WHITE_BALANCE_G,\n self.CONTROL_AUTO)\n\n @white_balance_green.setter\n def white_balance_green(self, value):\n \"\"\"\n White balance green. Range is 0-4095 or CameraController.CONTROL_AUTO\n \"\"\"\n value = int(value)\n if (value < 0 or value > 4095) and value != self.CONTROL_AUTO:\n raise ValueError(\"Invalid white balance value\")\n\n self._set_control_value(CameraControl.CAMERA_CONTROL_WHITE_BALANCE_G,\n value)\n self._reload()\n\n @property\n def white_balance_blue(self):\n \"\"\"\n White balance blue. If autocontrol is on, returns\n CameraController.CONTROL_AUTO\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_WHITE_BALANCE_B,\n self.CONTROL_AUTO)\n\n @white_balance_blue.setter\n def white_balance_blue(self, value):\n \"\"\"\n White balance blue. Range is 0-4095 or CameraController.CONTROL_AUTO\n \"\"\"\n value = int(value)\n if (value < 0 or value > 4095) and value != self.CONTROL_AUTO:\n raise ValueError(\"Invalid white balance value\")\n\n self._set_control_value(CameraControl.CAMERA_CONTROL_WHITE_BALANCE_B,\n value)\n self._reload()\n\n @property\n def window(self):\n \"\"\"\n Camera windowing, returns a tuple, (x, y)\n \"\"\"\n x = self._get_value(CameraControl.CAMERA_CONTROL_WINDOW_X,\n self.CONTROL_AUTO)\n if (x == self.CONTROL_AUTO):\n return (tuple(map(lambda x: x / 2, self.resolution)) if\n self.half_resolution else\n self.resolution)\n else:\n return (x, self._get_value(CameraControl.CAMERA_CONTROL_WINDOW_Y,\n self.CONTROL_AUTO))\n\n @window.setter\n def window(self, win):\n \"\"\"\n Set camera window. The max size is a function of the current camera\n resolution and if half_resolution is enabled or not.\n \"\"\"\n x, y = tuple(win)\n cur_x, cur_y = self.resolution\n limit_x = 1280 - cur_x\n limit_y = 800 - cur_y\n\n if self.half_resolution:\n limit_x /= 2\n limit_y /= 2\n\n if x < 0 or x > limit_x:\n raise ValueError(\"Max X window is %d\" % (limit_x,))\n\n if y < 0 or y > limit_y:\n raise ValueError(\"Max Y window is %d\" % (limit_y,))\n\n self._set_control_value(CameraControl.CAMERA_CONTROL_WINDOW_X, x)\n self._set_control_value(CameraControl.CAMERA_CONTROL_WINDOW_Y, y)\n self._reload()\n\n @property\n def flip(self):\n \"\"\"\n Camera flip. Returns True if flip is enabled on the camera.\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_FLIP, False)\n\n @flip.setter\n def flip(self, value):\n self._set_control_value(CameraControl.CAMERA_CONTROL_FLIP,\n int(value != 0))\n self._reload()\n\n @property\n def mirror(self):\n \"\"\"\n Camera mirror. Returns True if mirror is enabled on the camera.\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_MIRROR, False)\n\n @mirror.setter\n def mirror(self, value):\n self._set_control_value(CameraControl.CAMERA_CONTROL_MIRROR,\n int(value != 0))\n self._reload()\n\n @property\n def half_resolution(self):\n \"\"\"\n Return True if binning/half resolution is enabled on the camera.\n \"\"\"\n return self._get_value(CameraControl.CAMERA_CONTROL_RESOLUTION_HALF,\n False)\n\n @half_resolution.setter\n def half_resolution(self, value):\n self._set_control_value(CameraControl.CAMERA_CONTROL_RESOLUTION_HALF,\n int(value != 0))\n self._reload()\n\n def open(self):\n \"\"\"\n Open the camera currently under control.\n \"\"\"\n if self._id == 'head_camera':\n self._set_control_value(CameraControl.CAMERA_CONTROL_FLIP, True)\n self._set_control_value(CameraControl.CAMERA_CONTROL_MIRROR, True)\n ret = self._open_svc(self._id, self._settings)\n if ret.err != 0:\n raise OSError(ret.err, \"Failed to open camera\")\n self._open = True\n\n def close(self):\n \"\"\"\n Close, if necessary the camera.\n \"\"\"\n ret = self._close_svc(self._id)\n if ret.err != 0 and ret.err != errno.EINVAL:\n raise OSError(ret.err, \"Failed to close camera\")\n self._open = False\n","repo_name":"RethinkRobotics/baxter_interface","sub_path":"src/baxter_interface/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":10252,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"78"} +{"seq_id":"34054643465","text":"import argparse\nimport datetime\nimport math\nimport os\nimport subprocess\nimport sys\nimport time\nfrom collections import defaultdict, deque\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import datasets\nfrom torchvision import transforms\n\n\ndef init_distributed_mode(args):\n # launched with torch.distributed.launch\n if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.gpu = int(os.environ['LOCAL_RANK'])\n # launched with submitit on a slurm cluster\n elif 'SLURM_PROCID' in os.environ:\n args.rank = int(os.environ['SLURM_PROCID'])\n args.gpu = args.rank % torch.cuda.device_count()\n # launched naively with `python main_dino.py`\n # we manually add MASTER_ADDR and MASTER_PORT to env variables\n elif torch.cuda.is_available():\n print('Will run the code on one GPU.')\n args.rank, args.gpu, args.world_size = 0, 0, 1\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29500'\n else:\n print('Does not support training without GPU.')\n sys.exit(1)\n\n dist.init_process_group(\n backend='gloo' if 'win' in sys.platform else 'nccl',\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n\n torch.cuda.set_device(args.gpu)\n print('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url), flush=True)\n dist.barrier()\n setup_for_distributed(args.rank == 0)\n\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef fix_random_seeds(seed=42):\n \"\"\"\n Fix random seeds.\n \"\"\"\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\ndef get_sha():\n cwd = os.path.dirname(os.path.abspath(__file__))\n\n def _run(command):\n return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()\n sha = 'N/A'\n diff = \"clean\"\n branch = 'N/A'\n try:\n sha = _run(['git', 'rev-parse', 'HEAD'])\n subprocess.check_output(['git', 'diff'], cwd=cwd)\n diff = _run(['git', 'diff-index', 'HEAD'])\n diff = \"has uncommited changes\" if diff else \"clean\"\n branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n except Exception:\n pass\n message = f\"sha: {sha}, status: {diff}, branch: {branch}\"\n return message\n\n\ndef has_batchnorms(model):\n bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)\n for name, module in model.named_modules():\n if isinstance(module, bn_types):\n return True\n return False\n\n\ndef build_transform(args):\n if not args.resize_all_inputs and args.dataset == \"ImageNet\":\n return transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n elif args.resize_all_inputs and args.dataset == \"ImageNet\":\n return transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n elif args.dataset == \"CIFAR10\":\n return transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),\n ])\n elif args.dataset == \"CIFAR100\":\n return transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n print(f\"Does not support dataset: {args.dataset}\")\n sys.exit(1)\n\n\ndef build_dataset(is_train, args):\n transform = build_transform(args)\n if args.dataset == 'CIFAR10':\n return datasets.CIFAR10(args.data_path, download=True, train=is_train, transform=transform)\n if args.dataset == 'CIFAR100':\n return datasets.CIFAR100(args.data_path, download=True, train=is_train, transform=transform)\n elif args.dataset == 'ImageNet':\n root = os.path.join(args.data_path, 'train' if is_train else 'val')\n dataset = datasets.ImageFolder(root, transform=transform)\n return dataset\n print(f\"Does not support dataset: {args.dataset}\")\n sys.exit(1)\n\n\nclass GradientReverse(torch.autograd.Function):\n scale = 1.0\n\n @staticmethod\n def forward(ctx, x):\n # autograd checks for changes in tensor to determine if backward should be called\n return x.view_as(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return GradientReverse.scale * grad_output.neg()\n\n\ndef grad_reverse(x, scale=1.0):\n GradientReverse.scale = scale\n return GradientReverse.apply(x)\n\n\nclass GradientRescale(torch.autograd.Function):\n scale = 1.0\n\n @staticmethod\n def forward(ctx, x):\n # autograd checks for changes in tensor to determine if backward should be called\n return x.view_as(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return GradientRescale.scale * grad_output\n\n\ndef grad_rescale(x, scale=1.0):\n GradientRescale.scale = scale\n return GradientRescale.apply(x)\n\n\ndef image_grid(images, original_images, epoch, plot_size=16):\n \"\"\"Return a 5x5 grid of the MNIST images as a matplotlib figure.\"\"\"\n # Create a figure to contain the plot.\n figure = plt.figure(figsize=(20, 50))\n figure.tight_layout()\n num_images = min(len(original_images), plot_size)\n plt.subplots_adjust(hspace=0.5)\n\n g1 = images[0]\n g2 = images[1]\n\n titles = [f\"orig@{epoch} epoch\", \"global 1\", \"global 2\", \"local 1\", \"local 2\"]\n total = 0\n for i in range(num_images): # orig_img in enumerate(original_images, 1):\n orig_img = original_images[i]\n g1_img = g1[i]\n g2_img = g2[i]\n all_images = [orig_img, g1_img, g2_img]\n for j in range(3):\n total += 1\n\n plt.subplot(num_images, 3, total, title=titles[j])\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n\n img = all_images[j].cpu().detach().numpy()\n\n if img.shape[0] == 3:\n # CIFAR100 and ImageNet case\n img = np.moveaxis(img, 0, -1)\n else:\n # MNIST case\n img = img.squeeze()\n\n plt.imshow(np.clip(img, 0, 1))\n\n return figure\n\n\ndef theta_heatmap(theta, epoch):\n figure, ax = plt.subplots()\n # figure.tight_layout()\n sns.heatmap(theta, annot=True)\n ax.set_title(f'Theta @ {epoch} epoch')\n return figure\n\n\nclass SummaryWriterCustom(SummaryWriter):\n def __init__(self, log_dir, plot_size):\n super().__init__(log_dir=log_dir)\n self.plot_size = plot_size\n matplotlib.use('Agg')\n\n def write_image_grid(self, tag, images, original_images, epoch, global_step):\n fig = image_grid(images=images, original_images=original_images, epoch=epoch, plot_size=self.plot_size)\n self.add_figure(tag, fig, global_step=global_step)\n\n def write_theta_heatmap(self, tag, theta, epoch, global_step):\n fig = theta_heatmap(theta, epoch)\n self.add_figure(tag, fig, global_step=global_step)\n\n\ndef summary_writer_write_images_thetas(summary_writer, stn_images, images, thetas, epoch, it):\n theta_g1 = thetas[0][0].cpu().detach().numpy()\n theta_g2 = thetas[1][0].cpu().detach().numpy()\n summary_writer.write_image_grid(tag=\"images\", images=stn_images, original_images=images, epoch=epoch, global_step=it)\n summary_writer.write_theta_heatmap(tag=\"theta_g1\", theta=theta_g1, epoch=epoch, global_step=it)\n summary_writer.write_theta_heatmap(tag=\"theta_g2\", theta=theta_g2, epoch=epoch, global_step=it)\n\n theta_g_euc_norm = np.linalg.norm(np.double(theta_g2 - theta_g1), 2)\n summary_writer.add_scalar(tag=\"theta global eucl. norm.\", scalar_value=theta_g_euc_norm, global_step=it)\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.6f} ({global_avg:.6f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n def __str__(self):\n return self.fmt.format(\n median=self.median,\n avg=self.avg,\n global_avg=self.global_avg,\n max=self.max,\n value=self.value)\n\n\ndef reduce_dict(input_dict, average=True):\n \"\"\"\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return input_dict\n with torch.no_grad():\n names = []\n values = []\n # sort the keys so that they are consistent across processes\n for k in sorted(input_dict.keys()):\n names.append(k)\n values.append(input_dict[k])\n values = torch.stack(values, dim=0)\n dist.all_reduce(values)\n if average:\n values /= world_size\n reduced_dict = {k: v for k, v in zip(names, values)}\n return reduced_dict\n\n\nclass MetricLogger(object):\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n type(self).__name__, attr))\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(\n \"{}: {}\".format(name, str(meter))\n )\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = ''\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt='{avg:.6f}')\n data_time = SmoothedValue(fmt='{avg:.6f}')\n space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n if torch.cuda.is_available():\n log_msg = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}',\n 'max mem: {memory:.0f}'\n ])\n else:\n log_msg = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}'\n ])\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == len(iterable) - 1:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n if torch.cuda.is_available():\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB))\n else:\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time)))\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('{} Total time: {} ({:.6f} s / it)'.format(\n header, total_time_str, total_time / len(iterable)))\n\n\ndef adjust_learning_rate(optimizer, init_lr, epoch, args):\n \"\"\"Decay the learning rate based on schedule\"\"\"\n cur_lr = init_lr * 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))\n for param_group in optimizer.param_groups:\n if 'fix_lr' in param_group and param_group['fix_lr']:\n param_group['lr'] = init_lr\n else:\n param_group['lr'] = cur_lr\n\n\nclass ColorAugmentation(object):\n def __init__(self):\n color_jitter = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)\n gaussian_blur = transforms.GaussianBlur(1, (0.1, 2.0))\n self.augmentation = transforms.Compose([\n transforms.RandomApply([color_jitter], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomApply([gaussian_blur], p=1.0),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n transforms.ConvertImageDtype(torch.float32),\n ])\n\n def __call__(self, images):\n return [self.augmentation(img) for img in images]\n\n\ndef bool_flag(s):\n \"\"\"\n Parse boolean arguments from the command line.\n \"\"\"\n FALSY_STRINGS = {\"off\", \"false\", \"0\"}\n TRUTHY_STRINGS = {\"on\", \"true\", \"1\"}\n if s.lower() in FALSY_STRINGS:\n return False\n elif s.lower() in TRUTHY_STRINGS:\n return True\n else:\n raise argparse.ArgumentTypeError(\"invalid value for a boolean flag\")\n\n\ndef restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):\n \"\"\"\n Re-start from checkpoint\n \"\"\"\n if not os.path.isfile(ckp_path):\n return\n print(\"Found checkpoint at {}\".format(ckp_path))\n\n # open checkpoint file\n checkpoint = torch.load(ckp_path, map_location=\"cpu\")\n\n # key is what to look for in the checkpoint file\n # value is the object to load\n # example: {'state_dict': model}\n for key, value in kwargs.items():\n if key in checkpoint and value is not None:\n try:\n msg = value.load_state_dict(checkpoint[key], strict=False)\n print(\"=> loaded '{}' from checkpoint '{}' with msg {}\".format(key, ckp_path, msg))\n except TypeError:\n try:\n msg = value.load_state_dict(checkpoint[key])\n print(\"=> loaded '{}' from checkpoint: '{}'\".format(key, ckp_path))\n except ValueError:\n print(\"=> failed to load '{}' from checkpoint: '{}'\".format(key, ckp_path))\n else:\n print(\"=> key '{}' not found in checkpoint: '{}'\".format(key, ckp_path))\n\n # re load variable important for the run\n if run_variables is not None:\n for var_name in run_variables:\n if var_name in checkpoint:\n run_variables[var_name] = checkpoint[var_name]\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]\n","repo_name":"rapanti/simsiam-stn","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72073938811","text":"\nimport time\n\ndef is_prime(n):\n\n if n == 1:\n return False\n for i in range(2,n):\n if n % i == 0:\n return False\n return True\n\n\n#for num in range(1,21):\n\n# print(num,is_prime(num)) #yes,its correctly identifies the prime numbers....but how fast is this function??????\n\n\nt0 = time.time()\n\nfor n in range(1,10000):\n\n is_prime(n)\n\nt1 = time.time()\n\nprint('time required for this functions is ',t1 - t0)\n","repo_name":"fahad1226/Software-Dev.-with-Python","sub_path":"Data Structure/primenumbers.py","file_name":"primenumbers.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5835556171","text":"import pygame\nimport sys\nimport cv2\nimport numpy as np\nimport pickle\nimport threading\nfrom character import character \nfrom zetsu import zetsu\nfrom handSignChecker import handSignChecker\nfrom explosion import explosion\nfrom minion import minion\nfrom sharingan import Sharingan\n\nclass handTracker:\n import mediapipe as mp\n def __init__(self):\n self.hands = self.mp.solutions.hands.Hands(static_image_mode=False,max_num_hands=1,min_detection_confidence=0.5,min_tracking_confidence=0.5)\n self.width=1280\n self.height=720\n self.cam=cv2.VideoCapture(0,cv2.CAP_DSHOW)\n self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)\n self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT,self.height)\n self.cam.set(cv2.CAP_PROP_FPS, 30)\n self.keyPoints=[0,4,5,9,13,17,8,12,16,20]\n with open('handsigns.pkl','rb') as f:\n self.gestNames=pickle.load(f)\n self.knownGestures=pickle.load(f)\n \n def findDistances(self,handData):\n distMatrix=np.zeros([len(handData),len(handData)],dtype='float')\n palmSize=((handData[0][0]-handData[9][0])**2+(handData[0][1]-handData[9][1])**2)**(1./2.)\n for row in range(0,len(handData)):\n for column in range(0,len(handData)):\n distMatrix[row][column]=(((handData[row][0]-handData[column][0])**2+(handData[row][1]-handData[column][1])**2)**(1./2.))/palmSize\n return distMatrix\n \n def Marks(self,frame):\n myHands=[]\n frameRGB=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n results=self.hands.process(frameRGB)\n if results.multi_hand_landmarks != None:\n for handLandMarks in results.multi_hand_landmarks:\n myHand=[]\n for landMark in handLandMarks.landmark:\n myHand.append((int(landMark.x*self.width),int(landMark.y*self.height)))\n myHands.append(myHand)\n return myHands\n \n def findGesture(self,unknownGesture,knownGestures,keyPoints,gestNames,tol):\n errorArray=[]\n for i in range(0,len(gestNames),1):\n error=self.findError(knownGestures[i],unknownGesture,keyPoints)\n errorArray.append(error)\n errorMin=errorArray[0]\n minIndex=0\n for i in range(0,len(errorArray),1):\n if errorArray[i]=tol:\n gesture='Unknown'\n return gesture\n \n def findError(self,gestureMatrix,unknownMatrix,keyPoints):\n error=0\n for row in keyPoints:\n for column in keyPoints:\n error=error+abs(gestureMatrix[row][column]-unknownMatrix[row][column])\n return error\n \n def run(self):\n global handsigns\n while True:\n ignore, frame = self.cam.read()\n frame=cv2.resize(frame,(self.width,self.height))\n handData=self.Marks(frame)\n if handData!=[]:\n unknownGesture=self.findDistances(handData[0])\n myGesture=self.findGesture(unknownGesture,self.knownGestures,self.keyPoints,self.gestNames,10)\n if jutsu_perform == True and (myGesture != 'Unknown') and len(handsigns) < 4 and (myGesture not in handsigns):\n if len(handsigns) == 0: \n if myGesture == \"one\": \n screen.blit(ges1,(180,580))\n pygame.display.update(ges1.get_rect())\n elif myGesture == \"two\": \n screen.blit(ges2,(180,580))\n pygame.display.update(ges2.get_rect())\n elif myGesture == \"three\": \n screen.blit(ges3,(180,580))\n pygame.display.update(ges3.get_rect())\n elif myGesture == \"four\": \n screen.blit(ges4,(180,590))\n pygame.display.update(ges4.get_rect())\n elif myGesture == \"five\": \n screen.blit(ges5,(180,590))\n pygame.display.update(ges5.get_rect())\n elif len(handsigns) == 1:\n if myGesture == \"one\": \n screen.blit(ges1,(370,580))\n pygame.display.update(ges1.get_rect())\n elif myGesture == \"two\": \n screen.blit(ges2,(370,580))\n pygame.display.update(ges2.get_rect())\n elif myGesture == \"three\": \n screen.blit(ges3,(370,580))\n pygame.display.update(ges3.get_rect())\n elif myGesture == \"four\": \n screen.blit(ges4,(370,595))\n pygame.display.update(ges4.get_rect())\n elif myGesture == \"five\": \n screen.blit(ges5,(370,595))\n pygame.display.update(ges5.get_rect())\n elif len(handsigns) == 2:\n if myGesture == \"one\": \n screen.blit(ges1,(560,580))\n pygame.display.update(ges1.get_rect())\n elif myGesture == \"two\": \n screen.blit(ges2,(560,580))\n pygame.display.update(ges2.get_rect())\n elif myGesture == \"three\": \n screen.blit(ges3,(560,580))\n pygame.display.update(ges3.get_rect())\n elif myGesture == \"four\": \n screen.blit(ges4,(560,595))\n pygame.display.update(ges4.get_rect())\n elif myGesture == \"five\": \n screen.blit(ges5,(560,595))\n pygame.display.update(ges5.get_rect())\n elif len(handsigns) == 3:\n if myGesture == \"one\": \n screen.blit(ges1,(750,580))\n pygame.display.update(ges1.get_rect())\n elif myGesture == \"two\": \n screen.blit(ges2,(750,580))\n pygame.display.update(ges2.get_rect())\n elif myGesture == \"three\": \n screen.blit(ges3,(750,580))\n pygame.display.update(ges3.get_rect())\n elif myGesture == \"four\": \n screen.blit(ges4,(750,595))\n pygame.display.update(ges4.get_rect())\n elif myGesture == \"five\": \n screen.blit(ges5,(750,595))\n pygame.display.update(ges5.get_rect())\n handsigns.append(myGesture) \n if len(handsigns) == 4:\n if handSignTracker.compareHandSign(handsigns) == 1: #fire\n screen.blit(fireball,(940,590))\n pygame.display.update(fireball.get_rect())\n elif handSignTracker.compareHandSign(handsigns) == 2: #chidori\n screen.blit(chidori,(940,590))\n pygame.display.update(chidori.get_rect())\n else: #wrong handsign sequence\n screen.blit(wrong,(940,590))\n pygame.display.update(wrong.get_rect()) \n \n##############################################################Main Game########################################################################\nhandsigns = [] #set of handsigns\n\n#screen setup\nwidth = 1200\nheight = 675\nscreen = pygame.display.set_mode((width,height))\npygame.display.set_caption('Sasuke Adventure')\nclock = pygame.time.Clock()\npygame.font.init()\nbackground = pygame.image.load(\"animation/background.jpg\").convert_alpha()\n\n#sounds\npygame.mixer.pre_init(44100, -16, 2, 512) #to improve sound quality\npygame.mixer.init()\nbackground_sound = pygame.mixer.Sound(\"sound/background.mp3\")\nbackground_sound.set_volume(0.05)\nchidori_sound = pygame.mixer.Sound(\"sound/chidori.mp3\")\nchidori_sound.set_volume(0.1)\nkaton_sound = pygame.mixer.Sound(\"sound/katon.mp3\")\nkaton_sound.set_volume(0.3)\nswing_sound = pygame.mixer.Sound(\"sound/swing.wav\")\nswing_sound.set_volume(0.05)\nsharingan_sound = pygame.mixer.Sound(\"sound/sharingan.mp3\")\nsharingan_sound.set_volume(0.1)\n\n#images\nintro = pygame.image.load(\"animation/intro.jpg\").convert_alpha()\nstart = pygame.image.load(\"animation/start.png\").convert_alpha()\nend = pygame.image.load(\"animation/end.jpg\").convert_alpha()\nmainButton = pygame.image.load(\"animation/main.png\").convert_alpha()\nges1 = pygame.image.load(\"gestures/ges1.png\").convert_alpha()\nges2 = pygame.image.load(\"gestures/ges2.png\").convert_alpha()\nges3 = pygame.image.load(\"gestures/ges3.png\").convert_alpha()\nges4 = pygame.image.load(\"gestures/ges4.png\").convert_alpha()\nges5 = pygame.image.load(\"gestures/ges5.png\").convert_alpha()\nfireball = pygame.image.load(\"animation/fire2.png\").convert_alpha()\nchidori = pygame.image.load(\"animation/chidori.png\").convert_alpha()\nwrong = pygame.image.load(\"animation/wrong.png\").convert_alpha()\nsfx = pygame.image.load(\"animation/sfx.png\").convert_alpha()\nsfx_disabled = pygame.image.load(\"animation/sfx_disabled.png\").convert_alpha()\nsfx_rect = None\n\n#player controls\nmana_empty = False\nleft_move = False\nright_move = False\nfire_shoot = False\nup_move = False\ndown_move = False\nscore = 0\nmusic_on = True\nintro_screen = True\nend_screen = False\n\n#font and letters\nHP_font = pygame.font.Font(\"font.TTF\",20)\nHP = HP_font.render(\"HP\",True,\"Dark Green\")\nMANA = HP_font.render(\"Chakra\",True,\"Blue\")\nScore = HP_font.render(\"Score:\",True,\"Red\")\nCredit = HP_font.render(\"Developer: Patrick Duong\",True,\"Red\")\ntryAgain = HP_font.render(\"You failed. Try again!\",True,\"#B43757\")\nwin = HP_font.render(\"You won!\",True,\"Yellow\")\nwin2 = HP_font.render(\"Thank you for playing!\",True,\"Yellow\")\n\n#basic attack\nbasic_attack = False\nbasic_attack_dur = 0\n\n#fire style jutsu\nfire_shoot_stance = False\nfire_shoot_stance_dur = 0\njutsu_perform = False\n\n#chidori jutsu\nchidori_stance = False\nchidori_dur = 0\nfacing_right = True #helpful for chidori move\nchidori_right = False\nchidori_left = False\n\n#sharingan activation\nsharingan_on = False\nleft_sharingan = Sharingan(500,350)\nright_sharingan = Sharingan(650,350)\n\n#enemies controls\nenemySpeed = 3\nsasukeAttack = False\nidle = True\n\n#enemies states\nzetsu_1_alive = True\nzetsu_2_alive = True\nzetsu_3_alive = True\nminion_1_alive = True\nminion_2_alive = True\nminion_3_alive = True\nminion_4_alive = True\nminion_5_alive = True\nminion_6_alive = True\nminion_7_alive = True\n\n#players and enemies creation\nsasuke = character(20, 200, width, height, screen)\nzetsu_1 = zetsu(1215,235,width,height,screen)\nzetsu_2 = zetsu(1215,335,width,height,screen)\nzetsu_3 = zetsu(1215,435,width,height,screen)\n\nminion_1 = minion(1150,200,width,height,screen)\nminion_2 = minion(1150,300,width,height,screen)\nminion_3 = minion(1150,400,width,height,screen)\nminion_4 = minion(1150,200,width,height,screen)\nminion_5 = minion(1150,300,width,height,screen)\nminion_6 = minion(1150,400,width,height,screen)\nminion_7 = minion(1150,500,width,height,screen)\n\n#sprite groups\nfire_explode_sprite_group = pygame.sprite.Group()\nwater_explode_sprite_group = pygame.sprite.Group()\nleft_sharingan_group = pygame.sprite.Group()\nright_sharingan_group = pygame.sprite.Group()\nsharingan_dur = 0\n\n#set comparator\nhandSignTracker = handSignChecker()\n\n#camera on to detect hand gestures\nhandtrack = handTracker()\nmy_thread = threading.Thread(target=handtrack.run,daemon=True)\nmy_thread.start()\n\nrun = True\npygame.mixer.Channel(2).play(background_sound,-1)\nwhile run: \n while intro_screen:\n screen.blit(intro,(0,0))\n screen.blit(Credit,(29,10))\n startButton = screen.blit(start,(480,300))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if startButton.collidepoint(event.pos):\n pygame.draw.rect(screen, (0,0,0), pygame.Rect(0, 575, 1200, 100))\n intro_screen = False \n pygame.display.update()\n \n clock.tick(60)\n screen.blit(background,(0,0))\n screen.blit(HP,(5,5))\n screen.blit(MANA,(5,40))\n screen.blit(Score,(5,70))\n scoreText = HP_font.render(f\"{score}\",True,\"Red\")\n screen.blit(scoreText, (100,70))\n \n if music_on:\n sfx_rect = screen.blit(sfx,(1150,5))\n background_sound.set_volume(0.05)\n else:\n sfx_rect = screen.blit(sfx_disabled,(1150,5))\n background_sound.set_volume(0)\n \n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$player and enemies control$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n sasuke.update()\n sasuke.draw_character()\n sasuke.health_bar_draw()\n sasuke.mana_bar_draw()\n \n sasuke.fire_sprite_update()\n \n if sharingan_on and sasuke.mana_left() >= 5:\n sharingan_sound.play(loops=0, maxtime=0, fade_ms=0)\n sasuke.activateSharingan()\n left_sharingan_group.add(left_sharingan)\n right_sharingan_group.add(right_sharingan)\n sharingan_dur += 1\n if sharingan_dur == 50:\n sharingan_dur = 0\n sharingan_on = False\n left_sharingan.deactivate()\n right_sharingan.deactivate()\n left_sharingan_group.empty()\n right_sharingan_group.empty()\n \n if sasuke.checkAlive():\n #check mana\n if sasuke.getSharinganStatus() == True and sasuke.mana_left() > 0: \n sasuke.mana_passive_lost() \n if sasuke.mana_left() <= 0: \n sasuke.deactivateSharingan()\n mana_empty = True\n enemySpeed = 3\n elif sasuke.mana_left() >= 1000:\n mana_empty = False\n if mana_empty == True:\n sasuke.mana_passive_gain()\n \n #sasuke actions\n if fire_shoot_stance:\n pygame.mixer.Channel(0).play(katon_sound)\n if fire_shoot_stance_dur == 20:\n fire_shoot_stance_dur = 0\n fire_shoot_stance = False\n sasuke.action_updater(2)\n fire_shoot_stance_dur += 1\n if fire_shoot:\n sasuke.fireJutsu()\n sasuke.mana_active_lost()\n fire_shoot = False\n jutsu_perform = False #set false to disable hand sign detection. User has to press c every time to perform a jutsu\n handsigns.clear() #clear handsigns list to prepare for next jutsu\n elif chidori_stance:\n chidori_sound.play()\n if chidori_dur <= 40:\n sasuke.action_updater(3)\n elif chidori_dur > 40 and chidori_dur < 90: \n basic_attack = True\n if facing_right:\n chidori_right = True\n else: \n chidori_left = True\n sasuke.action_updater(4)\n sasuke.chidori_move(chidori_left,chidori_right)\n else: \n sasuke.mana_active_lost()\n chidori_stance = False\n jutsu_perform = False\n chidori_dur = 0\n chidori_left = False\n chidori_right = False\n handsigns.clear()\n basic_attack = False\n chidori_dur += 1\n elif basic_attack:\n if basic_attack_dur == 25:\n basic_attack = False\n basic_attack_dur = 0\n sasuke.action_updater(5)\n basic_attack_dur += 1\n elif left_move or right_move or up_move or down_move:\n sasuke.action_updater(1)\n else:\n sasuke.action_updater(0)\n if not chidori_stance:\n sasuke.character_movements(left_move, right_move,down_move,up_move)\n else: \n end_screen = True\n\n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$hand signs detection controls$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n if len(handsigns) == 4 and jutsu_perform == True: \n if handSignTracker.compareHandSign(handsigns) == 1: #fire style\n if sasuke.mana_left() > 100 and mana_empty == False:\n fire_shoot_stance = True\n fire_shoot = True\n else: \n jutsu_perform = False\n handsigns.clear()\n elif handSignTracker.compareHandSign(handsigns) == 2: #chidori\n if sasuke.mana > 100 and mana_empty == False:\n chidori_stance = True\n else:\n jutsu_perform = False\n handsigns.clear()\n else: \n jutsu_perform = False\n handsigns.clear() #the user performs wrong handsigns, so clear handsigns list\n \n #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$keyboard detection$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n \n #keys free\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_w:\n idle = True\n up_move = False\n if event.key == pygame.K_s:\n idle = True\n down_move = False\n if event.key == pygame.K_a:\n left_move = False\n if event.key == pygame.K_d:\n right_move = False\n \n #keys pushed\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n idle = False\n up_move = True\n if event.key == pygame.K_s:\n idle = False\n down_move = True\n if event.key == pygame.K_a:\n left_move = True\n facing_right = False\n if event.key == pygame.K_d:\n right_move = True\n facing_right = True\n if event.key == pygame.K_e:\n swing_sound.play()\n basic_attack = True\n if event.key == pygame.K_c: #press c to start recording handsigns. Press again to cancel\n pygame.draw.rect(screen, (0,0,0), pygame.Rect(0, 575, 1200, 100))\n if jutsu_perform == True:\n jutsu_perform = False\n handsigns.clear()\n else: \n jutsu_perform = True\n if event.key == pygame.K_r:\n if mana_empty == False:\n sharingan_on = True\n enemySpeed = 2\n \n if event.type == pygame.MOUSEBUTTONDOWN:\n if sfx_rect.collidepoint(event.pos) and music_on == True:\n music_on = False\n elif sfx_rect.collidepoint(event.pos) and music_on == False:\n music_on = True\n \n #$$$$$$$$$$$$$$$$$$$$$$$explosion and sprite collision controls$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n fire_explode_sprite_group.draw(screen)\n fire_explode_sprite_group.update()\n water_explode_sprite_group.draw(screen)\n water_explode_sprite_group.update()\n \n left_sharingan_group.draw(screen)\n left_sharingan_group.update()\n right_sharingan_group.draw(screen)\n right_sharingan_group.update()\n \n if score >= 1 and score < 5: #level 2\n if pygame.sprite.spritecollide(minion_1,sasuke.getFireSprite(),False) and minion_1_alive: #when minion 1 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n minion_1.takeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.spritecollide(sasuke,minion_1.getWaterSprite(),False): #when sasuke gets hit by water of minion 1\n water_explode_sprite_group.add(explosion(minion_1.getWaterX()-10,minion_1.getWaterY(),2))\n minion_1.explicitWaterKill()\n sasuke.takeWaterDamage() \n minion_1.setWaterCount() \n if pygame.sprite.collide_rect_ratio(1.2)(sasuke,minion_1): #when sasuke swings minion or use chidori on minion 1\n if basic_attack: \n if minion_1.getHealth() > 0:\n minion_1.takeSwingDamage()\n if sasuke.getFire() != None: #when fire hits water of minion 1\n if pygame.sprite.spritecollide(sasuke.getFire(),minion_1.getWaterSprite(),False):\n water_explode_sprite_group.add(explosion(minion_1.getWaterX()+50,minion_1.getWaterY(),2))\n sasuke.explicitFireKill()\n minion_1.explicitWaterKill()\n minion_1.setWaterCount()\n if minion_1.getHealth() > 0: #check minion 1 state\n minion_1.movements(pygame.time.get_ticks(),enemySpeed)\n minion_1.animate_updater()\n minion_1.draw_character()\n minion_1.water_sprite_update()\n else: \n minion_1.kill()\n if minion_1_alive:\n score += 1\n minion_1_alive = False\n \n if pygame.sprite.spritecollide(minion_2,sasuke.getFireSprite(),False) and minion_2_alive: #when minion 2 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n minion_2.takeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.spritecollide(sasuke,minion_2.getWaterSprite(),False): #when sasuke gets hit by water of minion 2\n water_explode_sprite_group.add(explosion(minion_2.getWaterX()-10,minion_2.getWaterY(),2))\n minion_2.explicitWaterKill()\n sasuke.takeWaterDamage() \n minion_2.setWaterCount()\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke,minion_2): #when sasuke swings minion or use chidori on minion 2\n if basic_attack: \n if minion_2.getHealth() > 0:\n minion_2.takeSwingDamage()\n if sasuke.getFire() != None: #when fire hits water of minion 2\n if pygame.sprite.spritecollide(sasuke.getFire(),minion_2.getWaterSprite(),False):\n water_explode_sprite_group.add(explosion(minion_2.getWaterX()+50,minion_2.getWaterY(),2))\n sasuke.explicitFireKill()\n minion_2.explicitWaterKill()\n minion_2.setWaterCount()\n if minion_2.getHealth() > 0: #check minion 2 state\n minion_2.movements(pygame.time.get_ticks(),enemySpeed)\n minion_2.animate_updater()\n minion_2.draw_character()\n minion_2.water_sprite_update()\n else: \n minion_2.kill()\n if minion_2_alive:\n score += 1\n minion_2_alive = False\n \n if pygame.sprite.spritecollide(minion_3,sasuke.getFireSprite(),False) and minion_3_alive: #when minion 3 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n minion_3.takeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.spritecollide(sasuke,minion_3.getWaterSprite(),False): #when sasuke gets hit by water of minion 3\n water_explode_sprite_group.add(explosion(minion_3.getWaterX()-10,minion_3.getWaterY(),2))\n minion_3.explicitWaterKill()\n sasuke.takeWaterDamage() \n minion_3.setWaterCount()\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke,minion_3): #when sasuke swings minion or use chidori on minion 3\n if basic_attack: \n if minion_3.getHealth() > 0:\n minion_3.takeSwingDamage()\n if sasuke.getFire() != None: #when fire hits water of minion 3\n if pygame.sprite.spritecollide(sasuke.getFire(),minion_3.getWaterSprite(),False):\n water_explode_sprite_group.add(explosion(minion_3.getWaterX()+50,minion_3.getWaterY(),2))\n sasuke.explicitFireKill()\n minion_3.explicitWaterKill()\n minion_3.setWaterCount()\n if minion_3.getHealth() > 0: #check minion 3 state\n minion_3.movements(pygame.time.get_ticks(),enemySpeed)\n minion_3.animate_updater()\n minion_3.draw_character()\n minion_3.water_sprite_update()\n else: \n minion_3.kill()\n if minion_3_alive:\n score += 1\n minion_3_alive = False\n \n if pygame.sprite.spritecollide(zetsu_2,sasuke.getFireSprite(),False) and zetsu_2_alive: #when zetsu 2 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n zetsu_2.enemyTakeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke, zetsu_2): #when sasuke swings zetsu or use chidori on zetsu 2\n if basic_attack:\n if zetsu_2.getHealth() > 0:\n zetsu_2.enemyTakeSwingDamage()\n else: \n if zetsu_2.getHealth() > 0: \n sasuke.takeSwingDamge() \n if zetsu_2.getHealth() > 0: #check zetsu 2 state\n zetsu_2.animate_updater()\n zetsu_2.move_towards_player(sasuke,enemySpeed)\n zetsu_2.draw_character()\n else:\n zetsu_2.kill()\n if zetsu_2_alive:\n score += 1\n zetsu_2_alive = False \n\n elif score >= 5: #level 3\n if pygame.sprite.spritecollide(minion_4,sasuke.getFireSprite(),False) and minion_4_alive: #when minion 4 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n minion_4.takeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.spritecollide(sasuke,minion_4.getWaterSprite(),False): #when sasuke gets hit by water of minion 4\n water_explode_sprite_group.add(explosion(minion_4.getWaterX()-10,minion_4.getWaterY(),2))\n minion_4.explicitWaterKill()\n sasuke.takeWaterDamage() \n minion_4.setWaterCount()\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke,minion_4): #when sasuke swings minion or use chidori on minion 4\n if basic_attack: \n if minion_4.getHealth() > 0:\n minion_4.takeSwingDamage()\n if sasuke.getFire() != None: #when fire hits water of minion 4\n if pygame.sprite.spritecollide(sasuke.getFire(),minion_4.getWaterSprite(),False):\n water_explode_sprite_group.add(explosion(minion_4.getWaterX()+50,minion_4.getWaterY(),2))\n sasuke.explicitFireKill()\n minion_4.explicitWaterKill()\n minion_4.setWaterCount()\n if minion_4.getHealth() > 0: #check minion 4 state\n minion_4.movements(pygame.time.get_ticks(),enemySpeed)\n minion_4.animate_updater()\n minion_4.draw_character()\n minion_4.water_sprite_update()\n else: \n minion_4.kill()\n if minion_4_alive:\n score += 1\n minion_4_alive = False\n \n if pygame.sprite.spritecollide(minion_5,sasuke.getFireSprite(),False) and minion_5_alive: #when minion 5 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n minion_5.takeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.spritecollide(sasuke,minion_5.getWaterSprite(),False): #when sasuke gets hit by water\n water_explode_sprite_group.add(explosion(minion_5.getWaterX()-10,minion_5.getWaterY(),2))\n minion_5.explicitWaterKill()\n sasuke.takeWaterDamage() \n minion_5.setWaterCount() \n if pygame.sprite.collide_rect_ratio(1.2)(sasuke,minion_5): #when sasuke swings minion or use chidori on minion 5\n if basic_attack: \n if minion_5.getHealth() > 0:\n minion_5.takeSwingDamage()\n if sasuke.getFire() != None: #when fire hits water of minion 5\n if pygame.sprite.spritecollide(sasuke.getFire(),minion_5.getWaterSprite(),False):\n water_explode_sprite_group.add(explosion(minion_5.getWaterX()+50,minion_5.getWaterY(),2))\n sasuke.explicitFireKill()\n minion_5.explicitWaterKill()\n minion_5.setWaterCount()\n if minion_5.getHealth() > 0: #check minion 5 state\n minion_5.movements(pygame.time.get_ticks(),enemySpeed)\n minion_5.animate_updater()\n minion_5.draw_character()\n minion_5.water_sprite_update()\n else: \n minion_5.kill()\n if minion_5_alive:\n score += 1\n minion_5_alive = False\n if score == 10: #won the game\n end_screen = True\n \n if pygame.sprite.spritecollide(minion_6,sasuke.getFireSprite(),False) and minion_6_alive: #when minion 6 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n minion_6.takeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.spritecollide(sasuke,minion_6.getWaterSprite(),False): #when sasuke gets hit by water of minion 6\n water_explode_sprite_group.add(explosion(minion_6.getWaterX()-10,minion_6.getWaterY(),2))\n minion_6.explicitWaterKill()\n sasuke.takeWaterDamage() \n minion_6.setWaterCount()\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke,minion_6): #when sasuke swings minion or use chidori on minion 6\n if basic_attack: \n if minion_6.getHealth() > 0:\n minion_6.takeSwingDamage()\n if sasuke.getFire() != None: #when fire hits water of minion 6\n if pygame.sprite.spritecollide(sasuke.getFire(),minion_6.getWaterSprite(),False):\n water_explode_sprite_group.add(explosion(minion_6.getWaterX()+50,minion_6.getWaterY(),2))\n sasuke.explicitFireKill()\n minion_6.explicitWaterKill()\n minion_6.setWaterCount()\n if minion_6.getHealth() > 0: #check minion 6 state\n minion_6.movements(pygame.time.get_ticks(),enemySpeed)\n minion_6.animate_updater()\n minion_6.draw_character()\n minion_6.water_sprite_update()\n else: \n minion_6.kill()\n if minion_6_alive:\n score += 1\n minion_6_alive = False\n \n if pygame.sprite.spritecollide(minion_7,sasuke.getFireSprite(),False) and minion_7_alive: #when minion 7 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n minion_7.takeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.spritecollide(sasuke,minion_7.getWaterSprite(),False): #when sasuke gets hit by water of minion 7\n water_explode_sprite_group.add(explosion(minion_7.getWaterX()-10,minion_7.getWaterY(),2))\n minion_7.explicitWaterKill()\n sasuke.takeWaterDamage() \n minion_7.setWaterCount()\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke,minion_7): #when sasuke swings minion or use chidori on minion 7\n if basic_attack: \n if minion_7.getHealth() > 0:\n minion_7.takeSwingDamage()\n if sasuke.getFire() != None: #when fire hits water of minion 7\n if pygame.sprite.spritecollide(sasuke.getFire(),minion_7.getWaterSprite(),False):\n water_explode_sprite_group.add(explosion(minion_7.getWaterX()+50,minion_7.getWaterY(),2))\n sasuke.explicitFireKill()\n minion_7.explicitWaterKill()\n minion_7.setWaterCount()\n if minion_7.getHealth() > 0: #check minion 7 state\n minion_7.movements(pygame.time.get_ticks(),enemySpeed)\n minion_7.animate_updater()\n minion_7.draw_character()\n minion_7.water_sprite_update()\n else: \n minion_7.kill()\n if minion_7_alive:\n score += 1\n minion_7_alive = False\n \n if pygame.sprite.spritecollide(zetsu_3,sasuke.getFireSprite(),False) and zetsu_3_alive: #when zetsu 3 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n zetsu_3.enemyTakeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke, zetsu_3): #when sasuke swings zetsu or use chidori on zetsu 3\n if basic_attack:\n if zetsu_3.getHealth() > 0:\n zetsu_3.enemyTakeSwingDamage()\n else: \n if zetsu_3.getHealth() > 0: \n sasuke.takeSwingDamge() \n if zetsu_3.getHealth() > 0: #check zetsu 3 state\n zetsu_3.animate_updater()\n zetsu_3.move_towards_player(sasuke,enemySpeed)\n zetsu_3.draw_character()\n else:\n zetsu_3.kill()\n if zetsu_3_alive:\n score += 1\n zetsu_3_alive = False \n\n elif score == 0: #level \n if pygame.sprite.spritecollide(zetsu_1,sasuke.getFireSprite(),False) and zetsu_1_alive: #when zetsu 1 gets hit by fire\n fire_explode_sprite_group.add(explosion(sasuke.getFireX()+50,sasuke.getFireY(),1))\n zetsu_1.enemyTakeFireDamage()\n sasuke.explicitFireKill() #after explosion, kill the fire sprite explicitly\n if pygame.sprite.collide_rect_ratio(1.2)(sasuke, zetsu_1): #when sasuke swings zetsu or use chidori on zetsu 1\n if basic_attack:\n if zetsu_1.getHealth() > 0:\n zetsu_1.enemyTakeSwingDamage()\n else: \n if zetsu_1.getHealth() > 0: \n sasuke.takeSwingDamge() \n if zetsu_1.getHealth() > 0: #check zetsu 1 state\n zetsu_1.animate_updater()\n zetsu_1.move_towards_player(sasuke,enemySpeed)\n zetsu_1.draw_character()\n else:\n zetsu_1.kill()\n if zetsu_1_alive:\n score += 1\n zetsu_1_alive = False\n \n while end_screen: \n screen.blit(end,(0,0))\n mainButtonRect = screen.blit(mainButton,(540,565))\n if sasuke.checkAlive() == False:\n screen.blit(tryAgain,(450,70))\n elif sasuke.checkAlive() == True:\n screen.blit(win,(550,40))\n screen.blit(win2,(450,80))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if mainButtonRect.collidepoint(event.pos):\n end_screen = False\n intro_screen = True \n \n #reset all control constants\n score = 0\n handsigns.clear()\n jutsu_perform = False\n fire_shoot = False\n fire_shoot_stance = False\n basic_attack_dur = 0\n fire_shoot_stance_dur = 0\n sharingan_on = False\n mana_empty = False\n \n chidori_stance = False\n chidori_dur = 0\n facing_right = True\n chidori_right = False\n chidori_left = False\n \n left_move = False\n right_move = False\n up_move = False\n down_move = False\n \n enemySpeed = 3\n sasukeAttack = False\n idle = True\n \n zetsu_1_alive = True\n zetsu_2_alive = True\n zetsu_3_alive = True\n minion_1_alive = True\n minion_2_alive = True\n minion_3_alive = True\n minion_4_alive = True\n minion_5_alive = True\n minion_6_alive = True\n minion_7_alive = True\n sasuke = character(20, 200, width, height, screen)\n zetsu_1 = zetsu(1215,235,width,height,screen)\n zetsu_2 = zetsu(1215,335,width,height,screen)\n zetsu_3 = zetsu(1215,435,width,height,screen)\n minion_1 = minion(1150,200,width,height,screen)\n minion_2 = minion(1150,300,width,height,screen)\n minion_3 = minion(1150,400,width,height,screen)\n minion_4 = minion(1150,200,width,height,screen)\n minion_5 = minion(1150,300,width,height,screen)\n minion_6 = minion(1150,400,width,height,screen)\n minion_7 = minion(1150,500,width,height,screen)\n pygame.display.update()\n pygame.display.update()\npygame.quit()\nsys.exit()","repo_name":"PatrickDuong3001/Sasuke-Adventure","sub_path":"mainGame.py","file_name":"mainGame.py","file_ext":"py","file_size_in_byte":39721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"34140476053","text":"\ntry:\n from scipy.misc import logsumexp\nexcept ImportError:\n from scipy.special import logsumexp\nimport numpy\n\n\ndef calculate_expectation(results, prediction_index=0):\n values = [result['PREDICTIONS'][prediction_index] for result in results]\n logweights = [result['LOGWEIGHT'] for result in results]\n values_by_logweights, sign = logsumexp(a=logweights, b=values, return_sign=True)\n logsum_weights = logsumexp(a=logweights)\n expectation = numpy.exp(values_by_logweights - logsum_weights)\n if sign == 0.0:\n return 0.0\n elif sign == -1.0:\n return expectation * -1.0\n else:\n assert sign == 1.0\n return expectation\n\n\ndef calculate_ess(results):\n logweights = [result['LOGWEIGHT'] for result in results]\n logsum_weights_squared = 2.0 * logsumexp(a=logweights)\n logsum_squared_weights = logsumexp(a=[2.0 * val for val in logweights])\n\n ESS = numpy.exp(\n logsum_weights_squared - logsum_squared_weights\n )\n\n # enum = numpy.power(sum([numpy.exp(logw) for logw in logweights]), 2.0)\n # denom = sum([numpy.power(numpy.exp(logw), 2.0) for logw in logweights])\n # ESS = enum / denom\n\n return ESS\n","repo_name":"babylonhealth/multiverse","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"27464794378","text":"\"\"\"\nDesign and implement a data structure for Least Recently Used (LRU) cache.\nIt should support the following operations: get and set.\n\nget(key) - Get the value (will always be positive) of the key if the key exists\nin the cache, otherwise return -1.\nset(key, value) - Set or insert the value if the key is not already present. When the\ncache reached its capacity, it should invalidate the least recently used item before inserting a new item.\n\n\"\"\"\n\n\nclass LRUCache(object):\n\n # Method 1: Using OrderedDict\n def __init__(self, capacity):\n self.dic = collections.OrderedDict()\n self.remain = capacity\n\n def get(self, key):\n if key not in self.dic:\n return -1\n v = self.dic.pop(key)\n self.dic[key] = v # set key as the newest one\n return v\n\n def set(self, key, value):\n if key in self.dic:\n self.dic.pop(key)\n else:\n if self.remain > 0:\n self.remain -= 1\n else: # self.dic is full\n self.dic.popitem(last=False)\n self.dic[key] = value\n\n # Method 2: Using dequeue and dict\n\n def __init__(self, capacity):\n self.deque = collections.deque([])\n self.dic = {}\n self.capacity = capacity\n\n def get(self, key):\n if key not in self.dic:\n return -1\n self.deque.remove(key)\n self.deque.append(key)\n return self.dic[key]\n\n def set(self, key, value):\n if key in self.dic:\n self.deque.remove(key)\n elif len(self.dic) == self.capacity:\n v = self.deque.popleft() # remove the Least Recently Used element\n self.dic.pop(v)\n self.deque.append(key)\n self.dic[key] = value","repo_name":"DanishKhan14/DumbCoder","sub_path":"Python/designDS/LRU.py","file_name":"LRU.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7553589359","text":"# -*- enconding: utf-8 -*-\n#!/usr/bin/python\nimport re\nimport os\n\nimport sys\nVERBOSE = 1\nadd =[ ]\nnew =[ ]\nf = [ ]\ndef Traductor(data):\n #expresiones de busqueda\n r = r'[a-z]+-?[a-z]+ ?'\n id = r'\\w+(_\\d\\w)*'\n paren = r'\\(\\)'\n int = r'int+[ ?]*'\n igual = r'[ ?*] = +[ ?]*'\n cin = r'cin+[ ?]*'\n m = r'[\\>\\>]+[ ?]*'\n #cout\n cout = r'cout+[ ?]*'\n dobmen = r'[\\<\\<]+[ ?]*'\n coment = r'[\"]'\n coutp = str(cout)+str(dobmen)+str(coment)+str(id)+str(coment)\n\n #------------------------------------------------------------\n # uniones de expreciones\n #-----------------------------------------------------------\n t_funcion = str(int)+str(r)+str(paren)\n t_var_int = str(int)+str(id)\n t_c_in = str(cin)+str(m)+str(id)\n #------------------------------------------------------------\n #text = \"int solo (){ int i int i=9; int i; i=c+b+d cout << cout<< } INT hh (){} \"\n #text = data.replace(r'INT', 'int')\n\n#funciones\n for m in re.finditer(t_funcion, data):\n tx=data\n a = '%s' % (m.group(0))\n x = a.replace(r'int ', 'function ')\n # print x\n b= tx.replace(a,x)\n data = b\n#var int\n for m in re.finditer(t_var_int, data):\n tx=data\n a = '%s' % (m.group(0))\n x = a.replace(r'int ', 'var ')\n # print x\n b= tx.replace(a,x)\n data = b\n#cin\n for m in re.finditer(t_c_in, data):\n tx=data\n ausiliar =\"\"\n a = '%s' % (m.group(0))\n comillas = \"'\"\n for n in re.finditer(id, a):\n e = '%s' % (n.group(0))\n if e == 'cin':\n pass\n else:\n ausiliar = e\n x = a.replace(ausiliar,'')\n x = x.replace('>>','')\n x = x.replace('cin','$(\"#consola\").append(\"
\")')\n b= tx.replace(a,x)\n data = b\n data = data.replace(r'<')\n for n in re.finditer(coutp, data):\n tx=data\n auxiliar= \" \"\n a = '%s' % (n.group(0))\n comillas = '\"'\n x = a.replace(r'cout' , '$(\"#consola\").append(')\n x= x.replace (r'<<', ' ')\n x= x.replace (r'\"', '')\n for m in re.finditer(id, a):\n e = '%s' % (m.group(0))\n if e == 'cout':\n pass\n else:\n auxiliar = e\n x= x.replace(auxiliar,comillas+auxiliar+comillas+')')\n b= tx.replace(a,x)\n data = b\n pass\n \n\n respuesta = {}\n respuesta['estado'] = 'correcto'\n respuesta['codigo'] = data\n respuesta['error']=\"aqui hay un error\"\n return respuesta\n\n'''if __name__ == '__main__':\n\n if (len(sys.argv) > 1):\n fin = sys.argv[1]\n else:\n fin = 'fuente/c++.cpp'\n f = open(fin,'r')\n data = f.read()\n Traductor(data)'''\n\n\n","repo_name":"JaimeRamirez91/usocode","sub_path":"app/traductor.py","file_name":"traductor.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"41478774467","text":"with open('words.txt', 'r') as file:\n \n word_list = file.read().split(' ')\n\nword = []\nposition = []\ncount = {}\nfor i in range(len(word_list)):\n if word_list[i] not in word:\n word.append(word_list[i].strip('\\n'))\n position.append(i)\n count[word_list[i].strip('\\n')] = 1\n else:\n rep = word_list[i].strip('\\n')\n count[rep] += 1\n\nresult = []\nfor i in range(len(word)):\n a = [word[i], position[i], count[word[i]]]\n result.append(a)\n\nfor i in range(len(result)):\n print(result[i])\n\n\n","repo_name":"kaka-lin/ML-Courses","sub_path":"ML-Course-NTU-Lee/hw0/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22109362154","text":"import sys\nimport os\npath = os.path.dirname(sys.modules[__name__].__file__)\npath = os.path.join(path, '..')\nsys.path.insert(0, path)\nfrom PyTib.NGrams import NGrams\nfrom PyTib import Segment\nfrom PyTib.common import write_file, open_file\n\nraw = open_file('raw_text.txt')\n# the ngrams have been selected manually\ngood_ngrams = open_file('ngrams_filtered.txt')\n\nsegmented = Segment().segment(raw, ant_segment=True, unknown=False, space_at_punct=True)\nngrams = NGrams().ngrams(segmented.split(' '), freq=5, min=2, max=4)\n\nngram_text = [a.split('\\t')[1] for a in ngrams.split('\\n')]\nwrite_file('ngrams_full.txt', ngrams)\n\nreplacements = []\nfor num, a in enumerate(good_ngrams.split('\\n')):\n replacements.append((a, str(num+1), a.replace(' ', '')))\ngood_ngrams = sorted(replacements, key=lambda x: len(x[0]), reverse=True)\n\njoined = segmented\nfor n in good_ngrams:\n joined = joined.replace(n[0], '+'+n[2]+'+')\nprint('The syllables in the ngrams have been put together:')\n#print(joined)\n\nnumbered = segmented\nfor n in good_ngrams:\n numbered = numbered.replace(n[0], n[1])\nprint('The ngrams have been replaced by numbers:')\n#print(numbered)\n\n\nnumbers = [str(a) for a in range(50)]\nnum_structure = ['.' if a not in numbers else a for a in numbered.split(' ')]\nprint('All other syllables have been replaced by a dot:')\n#print(' '.join(num_structure))\n\ntib_structure = []\nfor a in numbered.split(' '):\n if a in numbers:\n for r in replacements:\n if a == r[1]:\n tib_structure.append(a+r[2])\n else:\n tib_structure.append('.')\nprint('The numbers have been replaced by their corresponding ngrams:')\n#print(' '.join(tib_structure))\n\n","repo_name":"drupchen/sandbox","sub_path":"Classes/kangyur_modules/module_discovery.py","file_name":"module_discovery.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5701470499","text":"import functools\nfrom prompt_toolkit.application import run_in_terminal\n\ndef flatten(items):\n for item in items:\n if isinstance(item, (list, tuple)):\n yield from flatten(item)\n else:\n yield item\n\ndef output_reader(handler, callback, *args):\n for line in iter(handler.readline, b\"\"):\n if not line:\n break\n run_in_terminal(functools.partial(callback, line, *args))\n","repo_name":"vsant-dev/aries-agent","sub_path":"lib/agent/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20073874120","text":"# Written by Mutlu Polatcan\n# 05.05.2020\n# -------------------------------------------------\n# TODO Write environment variables types and default values\nimport json\nfrom os import environ\nfrom dateutil import tz\nfrom celery.schedules import crontab\nfrom flask_appbuilder.security.manager import AUTH_DB, AUTH_LDAP, AUTH_OID, AUTH_REMOTE_USER\nfrom cachelib import SimpleCache, RedisCache, MemcachedCache\nfrom s3cache.s3cache import S3Cache\nfrom superset.stats_logger import DummyStatsLogger, StatsdStatsLogger\n\nENV_VAR_TYPE_CASTER = {\n int: lambda value: int(value),\n float: lambda value: float(value),\n bool: lambda value: str(value).lower() == \"true\",\n list: lambda value: value.split(\",\"),\n str: lambda value: value\n}\n\n\ndef get_env(env_var, default=None, cast: type = str):\n value = environ.get(env_var, None)\n\n if value:\n return ENV_VAR_TYPE_CASTER[cast](value)\n else:\n return default\n\n# --------------------------------------------------------------------\n\n\nDRUID_TIMEZONES = {\"utc\": tz.tzutc(), \"local\": tz.tzlocal()}\n\nAUTH_TYPES = {\"oid\": AUTH_OID, \"db\": AUTH_DB, \"ldap\": AUTH_LDAP, \"remote_user\": AUTH_REMOTE_USER}\n\nMETADATA_DB_PREFIXES = {\"postgresql\": \"postgresql+psycopg2\", \"mysql\": \"mysql\", \"sqllite\": \"sqllite\"}\n\nMETADATA_DB_DEFAULT_PORTS = {\"postgresql\": 5432, \"mysql\": 3306}\n\nBROKER_PREFIXES = {\"redis\": \"redis\", \"rabbitmq\": \"pyamqp\"}\n\nBROKER_DEFAULT_PORTS = {\"redis\": 6379, \"rabbitmq\": 5672}\n\nSUPERSET_RESULTS_BACKENDS = {\n \"simple\": lambda: SimpleCache(\n threshold=get_env(\"SUPERSET_SIMPLE_RESULTS_BACKEND_THRESHOLD\", default=10, cast=int),\n default_timeout=get_env(\"SUPERSET_SIMPLE_RESULTS_BACKEND_DEFAULT_TIMEOUT\", default=300, cast=float)\n ),\n \"redis\": lambda: RedisCache(\n host=get_env(\"SUPERSET_REDIS_RESULTS_BACKEND_HOST\"),\n port=get_env(\"SUPERSET_REDIS_RESULTS_BACKEND_PORT\", default=6379, cast=int),\n password=get_env(\"SUPERSET_REDIS_RESULTS_BACKEND_PASSWORD\"),\n key_prefix=get_env(\"SUPERSET_REDIS_RESULTS_BACKEND_KEY_PREFIX\", default=\"superset_results\"),\n db=get_env(\"SUPERSET_REDIS_RESULTS_BACKEND_DB\", default=0, cast=int),\n default_timeout=get_env(\"SUPERSET_REDIS_RESULTS_BACKEND_DEFAULT_TIMEOUT\", default=300, cast=float)\n ),\n \"memcached\": lambda: MemcachedCache(\n servers=get_env(\"SUPERSET_MEMCACHED_RESULTS_BACKEND_SERVERS\", default=[], cast=list),\n default_timeout=get_env(\"SUPERSET_MEMCACHED_RESULTS_BACKEND_DEFAULT_TIMEOUT\", default=300, cast=float),\n key_prefix=get_env(\"SUPERSET_MEMCACHED_RESULTS_BACKEND_KEY_PREFIX\", default=\"superset_results\")\n ),\n \"s3\": lambda: S3Cache(\n s3_bucket=get_env(\"SUPERSET_S3_RESULTS_BACKEND_BUCKET_NAME\"),\n key_prefix=get_env(\"SUPERSET_S3_RESULTS_BACKEND_KEY_PREFIX\", default=\"superset_results\")\n )\n}\n\nCELERY_RESULT_BACKENDS_URIS = {\n \"redis\": \"redis://{password}{host}:{port}/{db}\".format(\n password=\"{}@\".format(get_env(\"CELERY_REDIS_RESULT_BACKEND_PASSWORD\"))\n if get_env(\"CELERY_REDIS_RESULT_BACKEND_PASSWORD\") else \"\",\n host=get_env(\"CELERY_REDIS_RESULT_BACKEND_HOST\"),\n port=get_env(\"CELERY_REDIS_RESULT_BACKEND_PORT\", default=6379),\n db=get_env(\"CELERY_REDIS_RESULT_BACKEND_DB\", default=1)\n ),\n \"memcached\": \"cache+memcached://{servers}/\".format(\n servers=\";\".join(get_env(\"CELERY_MEMCACHED_RESULT_BACKEND_SERVERS\", default=[], cast=list))\n )\n}\n\nSTATS_LOGGERS = {\n \"dummy\": lambda: DummyStatsLogger(prefix=get_env(\"DUMMY_STATS_LOGGER_PREFIX\", default=\"superset\")),\n \"statsd\": lambda: StatsdStatsLogger(\n host=get_env(\"STATSD_STATS_LOGGER_HOST\", default=\"localhost\"),\n port=get_env(\"STATSD_STATS_LOGGER_PORT\", default=8125, cast=int),\n prefix=get_env(\"STATSD_STATS_LOGGER_PREFIX\", default=\"superset\")\n )\n}\n\n\ndef get_db_or_broker_uri(env_var_prefix, default_prefixes, default_ports):\n type = get_env(\"{}_TYPE\".format(env_var_prefix))\n\n try:\n username = get_env(\"{}_USERNAME\".format(env_var_prefix))\n password = get_env(\"{}_PASSWORD\".format(env_var_prefix))\n\n return \"{prefix}://{username}{password}{host}:{port}/{db}\".format(\n prefix=default_prefixes[type],\n username=\"{}{}\".format(username, \":\" if password else \"@\") if username else \"\",\n password=\"{}@\".format(password) if password else \"\",\n host=get_env(\"{}_HOST\".format(env_var_prefix)),\n port=get_env(\"{}_PORT\".format(env_var_prefix), default=default_ports.get(type, None)),\n db=get_env(\"{}_DATABASE\".format(env_var_prefix))\n )\n except Exception:\n raise Exception(\"Wrong type \\\"{}\\\"\".format(type))\n\n\ndef get_cache_config(env_var_prefix):\n def set_config(config_dict, config_key, default=None, cast: type = str):\n value = get_env(\"{}_{}\".format(env_var_prefix, config_key), default=default, cast=cast)\n\n if value:\n config_dict[config_key] = value\n\n cache_config = {}\n\n for cache_config_info in [(\"TYPE\", \"null\", str), (\"NO_NULL_WARNING\", bool), (\"DEFAULT_TIMEOUT\", int),\n (\"THRESHOLD\", int), (\"KEY_PREFIX\", str), (\"MEMCACHED_SERVERS\", str),\n (\"MEMCACHED_PASSWORD\", str), (\"REDIS_HOST\", str),\n (\"REDIS_PORT\", 6379, int), (\"REDIS_PASSWORD\", str), (\"REDIS_DB\", 0, int), (\"DIR\", str)]:\n set_config(\n config_dict=cache_config,\n config_key=\"CACHE_{}\".format(cache_config_info[0]),\n default=cache_config_info[1] if len(cache_config_info) > 2 else None,\n cast=cache_config_info[-1]\n )\n\n if cache_config[\"CACHE_TYPE\"] == \"redis\":\n redis_password = cache_config.get(\"CACHE_REDIS_PASSWORD\", None)\n\n cache_config[\"CACHE_REDIS_URL\"] = \"redis://{password}{host}:{port}/{db}\".format(\n password=\"{}@\".format(redis_password) if redis_password else \"\",\n host=cache_config.get(\"CACHE_REDIS_HOST\", \"\"),\n port=cache_config.get(\"CACHE_REDIS_PORT\", \"\"),\n db=cache_config.get(\"CACHE_REDIS_DB\", \"\")\n )\n\n return cache_config\n\n\ndef get_celery_beat_schedule():\n celery_beat_schedule = {\n \"email_reports.schedule_hourly\": {\n \"task\": \"email_reports.schedule_hourly\",\n \"schedule\": crontab(minute=get_env(\"EMAIL_REPORTS_SCHEDULE_HOURLY_MINUTE\", default=\"1\"), hour=\"*\")\n },\n }\n\n if get_env(\"ENABLE_CACHE_WARMUP\", default=False, cast=bool):\n cache_warmups = json.loads(get_env(\"CACHE_WARMUPS\"))\n\n for idx, cache_warmup in enumerate(cache_warmups):\n cache_warmup_id = \"cache-warmup-{}\".format(idx)\n\n celery_beat_schedule[cache_warmup_id] = {\n \"task\": \"cache-warmup\",\n \"schedule\": crontab(*cache_warmup[\"schedule\"].split()),\n \"kwargs\": cache_warmup[\"kwargs\"]\n }\n\n return celery_beat_schedule\n\n\n# ------------------------------------------------------\nAPP_ICON = get_env(\"APP_ICON\", default=\"/static/assets/images/superset-logo-horiz.png\")\nAPP_ICON_WIDTH = get_env(\"APP_ICON_WIDTH\", default=126, cast=int)\nAPP_NAME = get_env(\"APP_NAME\", default=\"Superset\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nAUTH_TYPE = AUTH_TYPES.get(get_env(\"AUTH_TYPE\", default=\"db\"))\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nBABEL_DEFAULT_LOCALE = get_env(\"BABEL_DEFAULT_LOCALE\", default=\"en\")\nBABEL_DEFAULT_FOLDER = get_env(\"BABEL_DEFAULT_FOLDER\", default=\"superset/translations\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nBACKUP_COUNT = get_env(\"BACKUP_COUNT\", default=30, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nBUG_REPORT_URL = get_env(\"BUG_REPORT_URL\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nCACHE_CONFIG = get_cache_config(\"CACHE_CONFIG\")\nCACHE_DEFAULT_TIMEOUT = get_env(\"CACHE_DEFAULT_TIMEOUT\", default=60 * 60 * 24, cast=int)\n# ------------------------------------------------------\n\n\n# ------------------------------------------------------\nclass CeleryConfig:\n BROKER_URL = get_db_or_broker_uri(\"CELERY_BROKER\", BROKER_PREFIXES, BROKER_DEFAULT_PORTS)\n CELERY_IMPORTS = (\"superset.sql_lab\", \"superset.tasks\")\n CELERY_RESULT_BACKEND = CELERY_RESULT_BACKENDS_URIS.get(get_env(\"CELERY_RESULT_BACKEND_TYPE\", default=\"null\"), \"\")\n CELERYD_LOG_LEVEL = get_env(\"CELERYD_LOG_LEVEL\", default=\"DEBUG\")\n CELERY_ACKS_LATE = get_env(\"CELERY_ACKS_LATE\", default=False, cast=bool)\n CELERY_ANNOTATIONS = {\n \"sql_lab.get_sql_results\": {\n \"rate_limit\": get_env(\"CELERY_SQLLAB_GET_RESULTS_RATE_LIMIT\", default=\"100/s\")\n },\n \"email_reports.send\": {\n \"rate_limit\": get_env(\"CELERY_EMAIL_REPORTS_SEND_RATE_LIMIT_IN_SECS\", default=\"1/s\"),\n \"time_limit\": get_env(\"CELERY_EMAIL_REPORTS_TIME_LIMIT\", default=120, cast=int),\n \"soft_time_limit\": get_env(\"CELERY_EMAIL_REPORTS_SOFT_TIME_LIMIT\", default=150, cast=int),\n \"ignore_result\": get_env(\"CELERY_EMAIL_REPORTS_IGNORE_RESULT\", default=True, cast=bool)\n }\n }\n CELERY_BEAT_SCHEDULE = get_celery_beat_schedule()\n\n\nCELERY_CONFIG = CeleryConfig\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nCORS_OPTIONS = {\n \"origins\": get_env(\"CORS_OPTIONS_ORIGINS\", default=[\"*\"], cast=list),\n \"methods\": get_env(\"CORS_OPTIONS_METHODS\", default=[\"GET\", \"HEAD\", \"POST\", \"OPTIONS\", \"PUT\", \"PATCH\", \"DELETE\"], cast=list),\n \"expose_headers\": get_env(\"CORS_OPTIONS_EXPOSE_HEADERS\", default=[], cast=list),\n \"allow_headers\": get_env(\"CORS_OPTIONS_ALLOW_HEADERS\", default=[\"*\"], cast=list),\n \"send_wildcard\": get_env(\"CORS_OPTIONS_SEND_WILDCARD\", default=False, cast=bool),\n \"vary_header\": get_env(\"CORS_OPTIONS_VARY_HEADER\", default=True, cast=bool)\n}\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nCSV_TO_HIVE_UPLOAD_S3_BUCKET = get_env(\"CSV_TO_HIVE_UPLOAD_S3_BUCKET\")\nCSV_TO_HIVE_UPLOAD_DIRECTORY = get_env(\"CSV_TO_HIVE_UPLOAD_DIRECTORY\", default=\"EXTERNAL_HIVE_TABLES/\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nDEBUG = get_env(\"DEBUG\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nDEFAULT_RELATIVE_START_TIME = get_env(\"DEFAULT_RELATIVE_START_TIME\", default=\"today\")\nDEFAULT_RELATIVE_END_TIME = get_env(\"DEFAULT_RELATIVE_END_TIME\", default=\"today\")\nDEFAULT_SQLLAB_LIMIT = get_env(\"DEFAULT_SQLLAB_LIMIT\", default=1000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nDISPLAY_MAX_ROW = get_env(\"DISPLAY_MAX_ROW\", default=10000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nDOCUMENTATION_URL = get_env(\"DOCUMENTATION_URL\")\nDOCUMENTATION_TEXT = get_env(\"DOCUMENTATION_TEXT\", default=\"Documentation\")\nDOCUMENTATION_ICON = get_env(\"DOCUMENTATION_ICON\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nDRUID_ANALYSIS_TYPES = get_env(\"DRUID_ANALYSIS_TYPES\", default=[\"cardinality\"], cast=list)\nDRUID_DATA_SOURCE_BLACKLIST = get_env(\"DRUID_DATA_SOURCE_BLACKLIST\", default=[], cast=list)\nDRUID_IS_ACTIVE = get_env(\"DRUID_IS_ACTIVE\", default=False, cast=bool)\nDRUID_METADATA_LINKS_ENABLED = get_env(\"DRUID_METADATA_LINKS_ENABLED\", default=False, cast=bool)\nDRUID_TZ = DRUID_TIMEZONES.get(get_env(\"DRUID_TZ\", default=\"utc\"),\n tz.gettz(get_env(\"DRUID_TZ\", default=\"utc\")))\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nEMAIL_ASYNC_TIME_LIMIT_SEC = get_env(\"EMAIL_ASYNC_TIME_LIMIT_SEC\", default=300, cast=int)\nEMAIL_NOTIFICATIONS = get_env(\"EMAIL_NOTIFICATIONS\", default=False, cast=bool)\nEMAIL_REPORT_BCC_ADDRESS = get_env(\"EMAIL_REPORT_BCC_ADDRESS\")\nEMAIL_REPORT_FROM_ADDRESS = get_env(\"EMAIL_REPORT_FROM_ADDRESS\", default=\"reports@superset.org\")\nEMAIL_REPORTS_CRON_RESOLUTION = get_env(\"EMAIL_REPORTS_CRON_RESOLUTION\", default=15, cast=int)\nEMAIL_REPORTS_USER = get_env(\"EMAIL_REPORTS_USER\", default=\"admin\")\nEMAIL_REPORTS_SUBJECT_PREFIX = get_env(\"EMAIL_REPORTS_SUBJECT_PREFIX\", default=\"[Report] \")\nEMAIL_REPORTS_WEBDRIVER = get_env(\"EMAIL_REPORTS_WEBDRIVER\", default=\"firefox\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_ACCESS_REQUEST = get_env(\"ENABLE_ACCESS_REQUEST\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_CHUNK_ENCODING = get_env(\"ENABLE_CHUNK_ENCODING\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_CORS = get_env(\"ENABLE_CORS\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_FLASK_COMPRESS = get_env(\"ENABLE_FLASK_COMPRESS\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_JAVASCRIPT_CONTROLS = get_env(\"ENABLE_JAVASCRIPT_CONTROLS\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_PROXY_FIX = get_env(\"ENABLE_PROXY_FIX\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_REACT_CRUD_VIEWS = get_env(\"ENABLE_REACT_CRUD_VIEWS\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_SCHEDULED_EMAIL_REPORTS = get_env(\"ENABLE_SCHEDULED_EMAIL_REPORTS\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nENABLE_TIME_ROTATE = get_env(\"ENABLE_TIME_ROTATE\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nFAB_ADD_SECURITY_PERMISSION_VIEW = get_env(\"FAB_ADD_SECURITY_PERMISSION_VIEW\", default=False, cast=bool)\nFAB_ADD_SECURITY_PERMISSION_VIEWS_VIEW = get_env(\"FAB_ADD_SECURITY_PERMISSION_VIEWS_VIEW\", default=False, cast=bool)\nFAB_ADD_SECURITY_VIEW_MENU_VIEW = get_env(\"FAB_ADD_SECURITY_VIEW_MENU_VIEW\", default=False, cast=bool)\nFAB_ADD_SECURITY_VIEWS = get_env(\"FAB_ADD_SECURITY_VIEWS\", default=True, cast=bool)\nFAB_API_SWAGGER_UI = get_env(\"FAB_API_SWAGGER_UI\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nFEATURE_FLAGS = {\n \"CLIENT_CACHE\": get_env(\"FEATURE_FLAG_CLIENT_CACHE\", default=False, cast=bool),\n \"ENABLE_EXPLORE_JSON_CSRF_PROTECTION\": get_env(\"FEATURE_FLAG_ENABLE_EXPLORE_JSON_CSRF_PROTECTION\",\n default=False,\n cast=bool),\n \"KV_STORE\": get_env(\"FEATURE_FLAG_KV_STORE\", default=False, cast=bool),\n \"PRESTO_EXPAND_DATA\": get_env(\"FEATURE_FLAG_PRESTO_EXPAND_DATA\", default=False, cast=bool),\n \"THUMBNAILS\": get_env(\"FEATURE_FLAG_THUMBNAILS\", default=False, cast=bool),\n \"REDUCE_DASHBOARD_BOOTSTRAP_PAYLOAD\": get_env(\"FEATURE_FLAG_REDUCE_DASHBOARD_BOOTSTRAP_PAYLOAD\",\n default=True,\n cast=bool),\n \"SHARE_QUERIES_VIA_KV_STORE\": get_env(\"FEATURE_FLAG_SHARE_QUERIES_VIA_KV_STORE\", default=False, cast=bool),\n \"SIP_38_VIZ_REARCHITECTURE\": get_env(\"FEATURE_FLAG_SIP_38_VIZ_REARCHITECTURE\", default=False, cast=bool),\n \"TAGGING_SYSTEM\": get_env(\"FEATURE_FLAG_TAGGING_SYSTEM\", default=False, cast=bool),\n \"SQLLAB_BACKEND_PERSISTENCE\": get_env(\"FEATURE_FLAG_SQLLAB_BACKEND_PERSISTENCE\", default=False, cast=bool),\n \"LIST_VIEWS_SIP34_FILTER_UI\": get_env(\"FEATURE_FLAG_LIST_VIEWS_SIP34_FILTER_UI\", default=False, cast=bool)\n}\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nFILTER_SELECT_ROW_LIMIT = get_env(\"FILTER_SELECT_ROW_LIMIT\", default=10000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nFLASK_USE_RELOAD = get_env(\"FLASK_USE_RELOAD\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nHIVE_POLL_INTERVAL = get_env(\"HIVE_POLL_INTERVAL\", default=5, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nINTERVAL = get_env(\"INTERVAL\", default=1, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nLOG_FORMAT = get_env(\"LOG_FORMAT\", default=\"%(asctime)s:%(levelname)s:%(name)s:%(message)s\")\nLOG_LEVEL = get_env(\"LOG_LEVEL\", default=\"DEBUG\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nLOGO_TARGET_PATH = get_env(\"LOGO_TARGET_PATH\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nMAX_TABLE_NAMES = get_env(\"MAX_TABLE_NAMES\", default=3000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nMAPBOX_API_KEY = get_env(\"MAPBOX_API_KEY\", default=\"\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nPERMISSION_INSTRUCTIONS_LINK = get_env(\"PERMISSION_INSTRUCTIONS_LINK\", default=\"\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nPREVENT_UNSAFE_DB_CONNECTIONS = get_env(\"PREVENT_UNSAFE_DB_CONNECTIONS\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nPROXY_FIX_CONFIG = {\n \"x_for\": get_env(\"PROXY_FIX_CONFIG_X_FOR\", default=1, cast=int),\n \"x_proto\": get_env(\"PROXY_FIX_CONFIG_X_PROTO\", default=1, cast=int),\n \"x_host\": get_env(\"PROXY_FIX_CONFIG_X_HOST\", default=1, cast=int),\n \"x_prefix\": get_env(\"PROXY_FIX_CONFIG_X_PREFIX\", default=1, cast=int)\n}\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nPUBLIC_ROLE_LIKE_GAMMA = get_env(\"PUBLIC_ROLE_LIKE_GAMMA\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nRESULTS_BACKEND = SUPERSET_RESULTS_BACKENDS.get(get_env(\"SUPERSET_RESULTS_BACKEND_TYPE\", default=\"null\"), lambda: None)()\nRESULTS_BACKEND_USE_MSGPACK = get_env(\"SUPERSET_RESULTS_BACKEND_USE_MSGPACK\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nROLLOVER = get_env(\"ROLLOVER\", default=\"midnight\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nROW_LIMIT = get_env(\"ROW_LIMIT\", default=50000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSAMPLES_ROW_LIMIT = get_env(\"SAMPLES_ROW_LIMIT\", default=1000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSCHEDULED_EMAIL_DEBUG_MODE = get_env(\"SCHEDULED_EMAIL_DEBUG_MODE\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSECRET_KEY = get_env(\"SECRET_KEY\", default=\"\\1\\2thisismysecretkey\\1\\2\\e\\y\\y\\h\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSEND_FILE_MAX_AGE_DEFAULT = get_env(\"SEND_FILE_MAX_AGE_DEFAULT\", default=31536000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSESSION_COOKIE_HTTPONLY = get_env(\"SESSION_COOKIE_HTTPONLY\", default=True, cast=bool)\nSESSION_COOKIE_SAMESITE = get_env(\"SESSION_COOKIE_SAMESITE\", default=\"Lax\")\nSESSION_COOKIE_SECURE = get_env(\"SESSION_COOKIE_SECURE\", default=False, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSHOW_STACKTRACE = get_env(\"SHOW_STACKTRACE\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSILENCE_FAB = get_env(\"SILENCE_FAB\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSIP_15_DEFAULT_TIME_RANGE_ENDPOINTS = get_env(\"SIP_15_DEFAULT_TIME_RANGE_ENDPOINTS\",\n default=[\"unknown\", \"inclusive\"],\n cast=list)\nSIP_15_ENABLED = get_env(\"SIP_15_ENABLED\", default=True, cast=bool)\nSIP_15_GRACE_PERIOD_END = get_env(\"SIP_15_GRACE_PERIOD_END\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSMTP_HOST = get_env(\"SMTP_HOST\", default=\"localhost\")\nSMTP_MAIL_FROM = get_env(\"SMTP_MAIL_FROM\", default=\"superset@superset.org\")\nSMTP_PASSWORD = get_env(\"SMTP_PASSWORD\", default=\"superset\")\nSMTP_PORT = get_env(\"SMTP_PORT\", default=25, cast=int)\nSMTP_STARTTLS = get_env(\"SMTP_STARTTLS\", default=True, cast=bool)\nSMTP_SSL = get_env(\"SMTP_SSL\", default=False, cast=bool)\nSMTP_USER = get_env(\"SMTP_USER\", default=\"superset\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSSL_CERT_PATH = get_env(\"SSL_CERT_PATH\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSTATS_LOGGER = STATS_LOGGERS.get(get_env(\"STATS_LOGGER_TYPE\", default=\"dummy\"), STATS_LOGGERS[\"dummy\"])()\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSUPERSET_DASHBOARD_POSITION_DATA_LIMIT = get_env(\"SUPERSET_DASHBOARD_POSITION_DATA_LIMIT\", default=65535, cast=int)\nSUPERSET_DASHBOARD_PERIODICAL_REFRESH_LIMIT = get_env(\"SUPERSET_DASHBOARD_PERIODICAL_REFRESH_LIMIT\", default=0, cast=int)\nSUPERSET_DASHBOARD_PERIODICAL_REFRESH_WARNING_MESSAGE = get_env(\"SUPERSET_DASHBOARD_PERIODICAL_REFRESH_WARNING_MESSAGE\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSUPERSET_LOG_VIEW = get_env(\"SUPERSET_LOG_VIEW\", default=True, cast=bool)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSUPERSET_WEBSERVER_ADDRESS = get_env(\"SUPERSET_WEBSERVER_ADDRESS\", default=\"0.0.0.0\")\nSUPERSET_WEBSERVER_PORT = get_env(\"SUPERSET_WEBSERVER_PORT\", default=8088, cast=int)\nSUPERSET_WEBSERVER_PROTOCOL = get_env(\"SUPERSET_WEBSERVER_PROTOCOL\", default=\"http\")\nSUPERSET_WEBSERVER_TIMEOUT = get_env(\"SUPERSET_WEBSERVER_TIMEOUT\", default=60, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSQL_MAX_ROW = get_env(\"SQL_MAX_ROW\", default=100000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSQLALCHEMY_TRACK_MODIFICATIONS = get_env(\"SQLALCHEMY_TRACK_MODIFICATIONS\", default=False, cast=bool)\nSQLALCHEMY_DATABASE_URI = get_db_or_broker_uri(\"METADATA_DB\", METADATA_DB_PREFIXES, METADATA_DB_DEFAULT_PORTS)\nSQLALCHEMY_EXAMPLES_URI = get_db_or_broker_uri(\"METADATA_DB\", METADATA_DB_PREFIXES, METADATA_DB_DEFAULT_PORTS)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nSQLLAB_ASYNC_TIME_LIMIT_SEC = get_env(\"SQLLAB_ASYNC_TIME_LIMIT_SEC\", default=21600, cast=int)\nSQLLAB_CTAS_NO_LIMIT = get_env(\"SQLLAB_CTAS_NO_LIMIT\", default=False, cast=bool)\nSQLLAB_SAVE_WARNING_MESSAGE = get_env(\"SQLLAB_SAVE_WARNING_MESSAGE\")\nSQLLAB_SCHEDULE_WARNING_MESSAGE = get_env(\"SQLLAB_SCHEDULE_WARNING_MESSAGE\")\nSQLLAB_TIMEOUT = get_env(\"SQLLAB_TIMEOUT\", default=30, cast=int)\nSQLLAB_VALIDATION_TIMEOUT = get_env(\"SQLLAB_VALIDATION_TIMEOUT\", default=10, cast=int)\nSQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = get_env(\"SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT\", default=10, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nTABLE_NAMES_CACHE_CONFIG = get_cache_config(\"TABLE_NAMES_CACHE_CONFIG\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nTALISMAN_ENABLED = get_env(\"TALISMAN_ENABLED\", default=False, cast=bool)\nTALISMAN_CONFIG = {\n \"content_security_policy\": get_env(\"TALISMAN_CONFIG_CONTENT_SECURITY_POLICY\"),\n \"force_https\": get_env(\"TALISMAN_CONFIG_FORCE_HTTPS\", default=True, cast=bool),\n \"force_https_permanent\": get_env(\"TALISMAN_CONFIG_FORCE_HTTPS_PERMANENT\", default=False, cast=bool)\n}\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nTHUMBNAIL_CACHE_CONFIG = get_cache_config(\"THUMBNAIL_CACHE_CONFIG\")\nTHUMBNAIL_SELENIUM_USER = get_env(\"THUMBNAIL_SELENIUM_USER\", default=\"Admin\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nTIME_ROTATE_LOG_LEVEL = get_env(\"TIME_ROTATE_LOG_LEVEL\", default=\"DEBUG\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nTROUBLESHOOTING_LINK = get_env(\"TROUBLESHOOTING_LINK\", default=\"\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nVIZ_TYPE_BLACKLIST = get_env(\"VIZ_TYPE_BLACKLIST\", default=[], cast=list)\nVIZ_ROW_LIMIT = get_env(\"VIZ_ROW_LIMIT\", default=10000, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nWARNING_MSG = get_env(\"WARNING_MSG\")\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nWEBDRIVER_BASEURL = get_env(\"WEBDRIVER_BASEURL\", default=\"http://0.0.0.0:8080/\")\nWEBDRIVER_WINDOW = {\n \"dashboard\": (get_env(\"WEBDRIVER_WINDOW_DASHBOARD_WIDTH\", default=1600, cast=int),\n get_env(\"WEBDRIVER_WINDOW_DASHBOARD_HEIGHT\", default=2000, cast=int)),\n \"slice\": (get_env(\"WEBDRIVER_WINDOW_SLICE_WIDTH\", default=3000, cast=int),\n get_env(\"WEBDRIVER_WINDOW_SLICE_HEIGHT\", default=1200, cast=int))\n}\n# ------------------------------------------------------\n\n\n# ------------------------------------------------------\nWTF_CSRF_ENABLED = get_env(\"WTF_CSRF_ENABLED\", default=True, cast=bool)\nWTF_CSRF_EXEMPT_LIST = get_env(\"WTF_CSRF_EXEMPT_LIST\", default=[\"superset.views.core.log\"], cast=list)\nWTF_CSRF_TIME_LIMIT = get_env(\"WTF_CSRF_TIME_LIMIT\", default=604800, cast=int)\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nQUERY_SEARCH_LIMIT = get_env(\"QUERY_SEARCH_LIMIT\", default=1000, cast=int)\n# ------------------------------------------------------\n","repo_name":"mpolatcan/superset-docker","sub_path":"src/base/superset_config.py","file_name":"superset_config.py","file_ext":"py","file_size_in_byte":28116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23742329406","text":"import cv2\r\nimport pandas as pd\r\n\r\nimg_path = 'img1.jpg'\r\ncsv_path = 'colors.csv'\r\n\r\n# reading csv file\r\nindex = ['color', 'color_name', 'hex', 'R', 'G', 'B']\r\ndf = pd.read_csv(csv_path, names=index, header=None)\r\n\r\n# reading image\r\nimg = cv2.imread(img_path)\r\nimg = cv2.resize(img, (800, 600))\r\n\r\n# global variables\r\nclicked = False\r\nr = g = b = xpos = ypos = 0\r\n\r\n# function to get the color name of the most matching color by calculating minimum distance from all colors\r\ndef get_color_name(R, G, B):\r\n global cname\r\n minimum = 1000\r\n for i in range(len(df)):\r\n d = abs(R - int(df.loc[i, 'R'])) + abs(G - int(df.loc[i, 'G'])) + abs(B - int(df.loc[i, 'B']))\r\n if d <= minimum:\r\n minimum = d\r\n cname = df.loc[i, 'color_name']\r\n\r\n return cname\r\n\r\n# function to get x,y coordinates when the mouse is double clicked\r\ndef draw_function(event, x, y, flags, params):\r\n if event == cv2.EVENT_LBUTTONDBLCLK:\r\n global b, g, r, xpos, ypos, clicked\r\n clicked = True\r\n xpos = x\r\n ypos = y\r\n b, g, r = img[y, x]\r\n b = int(b)\r\n g = int(g)\r\n r = int(r)\r\n\r\n# creating window\r\ncv2.namedWindow('image')\r\ncv2.setMouseCallback('image', draw_function)\r\n\r\nwhile True:\r\n cv2.imshow('image', img)\r\n if clicked:\r\n # to create a rectangle with filled up with detected color\r\n cv2.rectangle(img, (20, 20), (600, 60), (b, g, r), -1) # (cv2.rectangle(image, startpoint, endpoint, color, thickness))\r\n\r\n # to create the string with the name of the color along with the RGB values respectively\r\n text = get_color_name(r, g, b) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b)\r\n\r\n cv2.putText(img, text, (50, 50), 2, 0.8, (255, 255, 255), 2, cv2.LINE_AA) # (cv2.putText(img,text,start,font,fontScale,color,thickness,lineType ))\r\n\r\n # to display the text in black if very light colors are detected\r\n if r + g + b >= 600:\r\n cv2.putText(img, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA)\r\n\r\n # giving the conditions on when the window must close\r\n if cv2.waitKey(20) & 0xFF == 27:\r\n break\r\n\r\ncv2.destroyAllWindows()\r\n","repo_name":"nivethitha2468/color_identify","sub_path":"Color_Identification in images/Color_Identificaton.py","file_name":"Color_Identificaton.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15037900915","text":"import numpy as np\nimport pytest\n\nfrom arctic3d.modules.clustering import (\n cluster_similarity_matrix,\n filter_clusters,\n get_clustering_dict,\n get_residue_dict,\n)\n\n\n@pytest.fixture\ndef int_matrix():\n return np.array([0.9972, 0.3742, 0.9736, 0.9996, 0.8841, 0.9991])\n\n\n@pytest.fixture\ndef int_names():\n return [\"int_1\", \"int_2\", \"int_3\", \"int_4\"]\n\n\ndef test_cluster_similarity_matrix(int_matrix, int_names):\n \"\"\"Test correct clustering.\"\"\"\n clusters = cluster_similarity_matrix(int_matrix, int_names)\n expected_clusters = [1, 2, 1, 3]\n assert (clusters == expected_clusters).all()\n\n\ndef test_complete_strategy_clustering(int_matrix, int_names):\n \"\"\"Test clustering with complete strategy.\"\"\"\n clusters = cluster_similarity_matrix(\n int_matrix, int_names, linkage_strategy=\"complete\", threshold=0.9\n )\n expected_clusters = [1, 2, 1, 2]\n assert (clusters == expected_clusters).all()\n\n\ndef test_get_cl_dict():\n \"\"\"Test correct retrieval of cl_dict.\"\"\"\n clusters_list = [1, 1, 2, 3, 3, 4, 2]\n ligands_list = [\"int1\", \"int2\", \"p53\", \"00\", \"int47\", \"antibody\", \"dimer\"]\n expected_cl_dict = {\n 1: [\"int1\", \"int2\"],\n 2: [\"p53\", \"dimer\"],\n 3: [\"00\", \"int47\"],\n 4: [\"antibody\"],\n }\n observed_cl_dict = get_clustering_dict(clusters_list, ligands_list)\n assert expected_cl_dict, observed_cl_dict\n\n\ndef test_get_res_dict():\n \"\"\"Test correct retrieval of res_dict.\"\"\"\n interface_dict = {\n \"int_1\": [1, 2, 3],\n \"int_2\": [3, 4, 5],\n \"int_3\": [27, 28, 29],\n }\n cl_dict = {1: [\"int_1\", \"int_2\"], 2: [\"int_3\"]}\n expected_res_dict = {1: [1, 2, 3, 4, 5], 2: [27, 28, 29]}\n expected_res_probs = {\n 1: {1: 0.5, 2: 0.5, 3: 1.0, 4: 0.5, 5: 0.5},\n 2: {27: 1.0, 28: 1.0, 29: 1.0},\n }\n observed_res_dict, observed_res_probs = get_residue_dict(\n cl_dict, interface_dict\n )\n assert expected_res_dict == observed_res_dict\n assert expected_res_probs == observed_res_probs\n\n\ndef test_filter_clusters():\n \"\"\"Test correct filtering of clusters.\"\"\"\n example_cl_dict = {\n 1: [\"int_1\", \"int_2\"],\n 2: [\"int_3\"],\n }\n example_res_dict = {1: [1, 2, 3, 4, 5], 2: [27, 28, 29]}\n example_res_probs = {\n 1: {1: 0.5, 2: 0.5, 3: 1.0, 4: 0.5, 5: 0.5},\n 2: {27: 1.0, 28: 1.0, 29: 1.0},\n }\n obs_cl_dict, obs_res_dict, obs_res_probs = filter_clusters(\n example_cl_dict, example_res_dict, example_res_probs, 4\n )\n exp_cl_dict = {1: [\"int_1\", \"int_2\"]}\n exp_res_dict = {1: [1, 2, 3, 4, 5]}\n exp_res_probs = {\n 1: {1: 0.5, 2: 0.5, 3: 1.0, 4: 0.5, 5: 0.5},\n }\n assert exp_cl_dict == obs_cl_dict\n assert exp_res_dict == obs_res_dict\n assert exp_res_probs == obs_res_probs\n","repo_name":"haddocking/arctic3d","sub_path":"tests/test_clustering.py","file_name":"test_clustering.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"34753354761","text":"#!/usr/bin/python3\n\"\"\"saving in json format\"\"\"\nimport json\n\n\ndef save_to_json_file(my_obj, filename):\n \"\"\"aving in json format\n Args:\n my_obj: python object\n filename: the filename\n Return: e\n \"\"\"\n with open(filename, mode=\"w+\", encoding=\"utf-8\") as myfile:\n json_rep = json.dumps(my_obj)\n myfile.write(json_rep)\n","repo_name":"mosesgitonga/alx-higher_level_programming","sub_path":"0x0B-python-input_output/5-save_to_json_file.py","file_name":"5-save_to_json_file.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"429385227","text":"import argparse\nimport csv\nimport random\nimport sys\nfrom datetime import datetime, timedelta\nfrom operator import itemgetter\nfrom pprint import pprint\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom utils import utils_fct\n\n\ndef init():\n scope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\", \"https://www.googleapis.com/auth/drive\"]\n creds = ServiceAccountCredentials.from_json_keyfile_name(\n \"credentials.json\", scope)\n gspread_client = gspread.authorize(creds)\n return gspread_client\n\n\ndef get_client_data(gspread_client, client_name):\n sheet = gspread_client.open_by_key(\"1ZUzfATNwthThvURTfHStZ7QpkCExmMR1xTs3x2ZPXEw\").sheet1\n client_database = sheet.get_all_records()\n client_data = []\n for client in client_database:\n if client_name == client['customerName']:\n client_data.append(client)\n if len(client_data) == 1:\n return client_data[0]\n elif len(client_data) > 1:\n print(\"Il y a {} sources de lead disponibles :\\n\".format(len(client_data)))\n for i, customer in enumerate(client_data, 1):\n print(\"{} : {}\".format(i, customer['leadSource']))\n leadsource_nb = int(\n input(\"Entrez le numéro de la source que vous souhaitez utiliser:\\n\"))\n try:\n return client_data[leadsource_nb - 1]\n except IndexError as error:\n print(\"La valeur sélectionnée : {} semble incorrecte.\\nErreur {}\".format(\n leadsource_nb, error))\n return 0\n\n\ndef write_lead_API(gspread_client, customer_data, csv_source):\n lead_sheet = gspread_client.open_by_key(\n customer_data['sourceId']).get_worksheet(1)\n with open(csv_source) as csv_file:\n leads_csv = csv.reader(csv_file, delimiter=',')\n leads = [lead for lead in leads_csv]\n utils_fct.insert_lead_in_sheet(lead_sheet, leads)\n\n\ndef write_lead_CSV(customer_data, lead_data, lead_nb, options):\n csv_filename = 'lead-extract_' + \\\n customer_data['customerName'] + '_' + \\\n str(datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")) + '.csv'\n with open(csv_filename, 'w') as f:\n fieldnames = ['Date', '1) Isolation pour', '2) Quel(s) type(s) de surface à isoler ?',\n '3) Nom', '4) Prénom', '5) Code postal', '6) Numéro de téléphone', '7) Email', 'sent']\n thewriter = csv.DictWriter(f, fieldnames)\n thewriter.writeheader()\n\n lead_source = utils_fct.convert_date(lead_data)\n if options.premium is True:\n premium_lead_data = sorted(\n lead_source, key=itemgetter('Date'), reverse=True)\n lead_source = premium_lead_data\n elif options.rand is True:\n random.shuffle(lead_data)\n\n assigned_lead = 0\n mails = set()\n for lead in lead_source:\n if lead['7) Email'] in mails:\n print(\"Attention, un doublon a été détecté et ignoré : {}\".format(lead['7) Email']))\n else: \n lead['sent'] = customer_data['customerName']\n lead['6) Numéro de téléphone'] = str(\n lead['6) Numéro de téléphone']).zfill(10)\n thewriter.writerow(lead)\n assigned_lead += 1\n mails.add(lead['7) Email'])\n if assigned_lead == lead_nb:\n break\n\n return csv_filename\n\n\ndef get_available_leads(gspread_client, customer_data, options):\n valid_lead = []\n available_leads = 0\n sheet = gspread_client.open_by_key(customer_data['sourceId']).sheet1\n ws = sheet.get_all_records()\n postal_code_set = utils_fct.get_postal_code_set(customer_data)\n for lead in ws:\n if utils_fct.is_valid_lead(lead, customer_data, postal_code_set) == True:\n valid_lead.append(lead)\n available_leads += 1\n print(\"Il y a {} leads disponibles, combien souhaitez-vous en extraire ?\".format(available_leads))\n lead_nb = int(\n input(\"Entrez un nombre (max:{}): \\n\".format(available_leads)))\n if lead_nb > 0:\n lead_csv = write_lead_CSV(customer_data, valid_lead, lead_nb, options)\n if utils_fct.ask_user() is True:\n write_lead_API(gspread_client, customer_data, lead_csv)\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"client_name\", type=str,\n help=\"Nom du client pour qui on veut extraire des leads.\\n\")\n parser.add_argument(\n \"--premium\", help=\"Selectionne les leads les plus récents.\", action=\"store_true\")\n parser.add_argument(\n \"--rand\", help=\"Selectionne les leads aléatoirement parmi ceux disponibles.\", action=\"store_true\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n options = get_args()\n gspread_client = init()\n customer_data = get_client_data(gspread_client, options.client_name)\n if customer_data == 0:\n sys.exit(\"Le client {} est introuvable dans la base de client.\".format(\n options.client_name))\n get_available_leads(gspread_client, customer_data, options)\n\n pass\n","repo_name":"nvergnac/nx_creative","sub_path":"distrib.py","file_name":"distrib.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20511983724","text":"#!/usr/bin/env python\n\n\n# this script finds all nearby SNPs, methylation probes and acetylation peaks within some window of a gene start site\n# the input is a gene name and windows for SNps / peaks. ie. python genome_algebra.py LSM1 200000 200000\n# this script outputs data matrices for nearby probes and peaks, as this can be done faster in python\n# it ouputs names of nearby SNPs, for which data matrix will be constructed using R, which is faster than python\n# the rationale being that specific rows and columns can be read from large data matrices faster in python and R (respectively)\n\nimport sys\nimport numpy\nimport csv\n\n# define function for checking min value of windows (relative to gene position)\n\ndef minChecker(min):\n\tif min < 0:\n\t\treturn 0\n\telse:\n\t\treturn(int(min))\n\n\n# file paths for my Broad account directory\n\nfilename1 = \"/home/unix/jtopham/data/ensembl_mapper2.txt\" \t\t # maps gene names to genomic position\nfilename2 = \"/home/unix/jtopham/data/snp_mapper/snpArrayAffy6.txt\" # maps SNP ids to genomic position\nfilename3 = \"/home/unix/jtopham/data/methylation/filtered_meth_mapper.txt\" # maps CpG sites to genomic position\nfilename4 = \"/home/unix/jtopham/data/acetylation/peakInfo.csv\" # maps ChipSeq peaks to genomic position \nfilename5 = \"/home/unix/jtopham/data/methylation/probe_list.txt\" # list of probes, in order, from ill450k array\nfilename6 = \"/home/unix/jtopham/data/methylation/ill450kMeth_all_740_imputed.txt\" # methylation data matrix\nfilename7 = \"/home/unix/jtopham/data/acetylation/peakList.txt\"\t\t # list of peaks, in order, from chipSeq assay\nfilename8 = \"/home/unix/jtopham/data/acetylation/chipSeqResiduals.csv\" # acetylation data matrix\n\n# boiler plate:\n\nargs = sys.argv \t\t # arguments should be: gene name, SNP window, probe window\nthis_gene = args[1]\nnearby_snps = []\nnearby_snps.append(\"SNP\\tLocation\")\nnearby_probes = []\nnearby_peaks = []\n\n\n# get start position of gene\n\ngene_position = -1\nwith open(filename1, \"r\") as ins: \t\t\t \n\tfor line in ins:\n\t\tif line.split()[1] == this_gene:\n\t\t\tgene_position = int(line.split()[3])\n\t\t\tgene_chr = \"chr\" + line.split()[2]\n\t\t\tbreak\n\nif gene_position == -1:\n\tsys.exit('Error: gene maps to no positon')\n\n\n# arrange windows for feature search space\n\ng_window = int(args[2])\nm_window = int(args[3]) # set methylation window here\na_window = 1000000 # set acetylation window here (1mb)\n\nm_min_range = gene_position - m_window/2\nm_max_range = gene_position + m_window/2\n\na_min_range = gene_position - int(a_window)/2\na_max_range = gene_position + int(a_window)/2\n\ng_min_range = gene_position - g_window/2\ng_max_range = gene_position + g_window/2\n\ng_min_range = minChecker(g_min_range)\nm_min_range = minChecker(m_min_range)\na_min_range = minChecker(a_min_range)\n\n\n# find SNPs within window of gene start position\n\nwith open (filename2, \"r\") as ins: \n\tfor line in ins:\n\t\tif line.split()[1] == gene_chr:\n\t\t\tif int(line.split()[2]) >= g_min_range and int(line.split()[2]) <= g_max_range:\n\t\t\t\tnearby_snps.append(line.split()[8] + '\\t' + line.split()[1])\n\nif len(nearby_snps) == 0:\n\tsys.exit('Error: no SNPs found within supplied window')\n\n\n\n# find methylation probes within window of gene start position\n\nwith open(filename3, \"r\") as ins:\n\tfor line in ins:\n\t\tif line.split()[1].isdigit():\n\t\t\tif int(line.split()[1]) >= m_min_range and int(line.split()[1]) <= m_max_range:\n\t\t\t\tnearby_probes.append(line.split()[0])\n\nif len(nearby_probes) == 0:\n\tsys.exit('Error: no methylation probes found within supplied window')\n\n\n\n# find acetyltion peaks within some window of gene start position\n\nwith open(filename4, \"r\") as ins:\n\tfor line in ins:\n\t\tif line.split()[1].isdigit():\n\t\t\tthis_mean = (int(line.split()[1]) + int(line.split()[2])) / 2\n\t\t\tif this_mean >= a_min_range and this_mean <= a_max_range: # find acetylation peaks within chosen window of SNP\n\t\t\t\tnearby_peaks.append(line.split()[0])\n\nif len(nearby_peaks) == 0:\n\tsys.exit('Error: no acetylation peaks found within 1mb window')\n\n\n# std.out summary of what was found\n\n\nprint('Window search summary for ', this_gene, ': ')\nprint('Probes found:\\t', len(nearby_probes))\nprint('Peaks found:\\t', len(nearby_peaks))\nprint('SNPs found:\\t', len(nearby_snps))\n\n\n\n# retrieve methylation matrix faster than R\n\n## determine indeces of nearby probes within data matrix\n## read-in only these indeces, write to tsv file\n\n\nmIndeces = []\nfp = open(filename5)\nfor i, line in enumerate(fp):\n\tif line.split()[0] in nearby_probes:\n\t\tmIndeces.append(i)\nfp.close()\n\nif len(mIndeces)<1:\n\tsys.exit('No nearby probes were found in methylation array')\n\nmIndeces.sort()\n\nmeth_matrix = []\nfp = open(filename6)\nfor i, line in enumerate(fp):\n\tif i == 0:\n\t\tmeth_matrix.append(line.split())\n\tif i in mIndeces:\n\t\tmeth_matrix.append(line.split())\n\tif i > mIndeces[-1]:\n\t\tbreak\nfp.close()\n\nmFile = \"z_\" + this_gene + \"_methylation.txt\" # write methylation matrix to file\nwith open(mFile, \"wt\") as f:\n\twriter = csv.writer(f, delimiter = '\\t')\n\twriter.writerows(meth_matrix)\n\n\n\n# retrieve acetylation matrix faster than R\n\n## retrieve indeces of nearby peaks in data matrix\n## read-in only these indeces and write to tsv file\n\n\npIndeces = []\nfp = open (filename7)\nfor i, line in enumerate(fp):\n\tif line.split()[0] in nearby_peaks:\n\t\tpIndeces.append(i)\nfp.close()\n\nif len(pIndeces)<1:\n\tsys.exit('No nearby peaks were found in acetylation array')\n\npIndeces.sort()\n\nacet_matrix = []\nfp = open(filename8)\nfor i, line in enumerate(fp):\n\tif i == 0:\n\t\tacet_matrix.append(line.split())\n\tif i in pIndeces:\n\t\tacet_matrix.append(line.split())\n\tif i > pIndeces[-1]:\n\t\tbreak\nfp.close()\n\naFile = \"z_\" + this_gene + \"_acetylation.txt\"\nwith open(aFile,\"wt\") as f:\n\twriter = csv.writer(f,delimiter = '\\t')\n\twriter.writerows(acet_matrix)\n\n\n# output names of nearby SNPs to file\n# data matrix for these will be retrieved by R (faster)\n\n\ngFile = \"z_\" + this_gene + \"_genotype.txt\" \t\t\t\t \t\t\t\t \n\nwith open(gFile, \"w\") as f:\n\tf.write(\"\\n\".join(nearby_snps))\n\tf.write(\"\\n\")\n\tf.close()\n\n","repo_name":"jtopham/jtopham_dev","sub_path":"projects/SNP_explorer/genome_algebra.py","file_name":"genome_algebra.py","file_ext":"py","file_size_in_byte":6234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14026259157","text":"numero = int(input(\"Digite um número: \"))\nnome = str(input(\"Digite o seu nome: \"))\n\nif numero >= 0 and numero <= 100:\n print(f\"{numero} está no intervalo entre 0 e 100\")\n\nif numero < 0 or numero > 100:\n print(f\"{numero} não está no intervalo entre 0 e 100\")\n\nres_a = \"Existe a letra 'A' em seu nome\" if 'a' in nome else \"Não existe a letra 'A' em seu nome\"\nres_b = \"Não existe a letra 'X' em seu nome\" if 'x' not in nome else \"Existe a letra 'X' em seu nome\"\n\nprint(res_a)\nprint(res_b)\n","repo_name":"xandaosilva/curso-python","sub_path":"capitulo02/aula11/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36712592116","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n# https://en.wikipedia.org/wiki/Hurst_exponent\r\n__all__ = ['hurst', 'get_hurst_df']\r\n\r\n\r\ndef hurst(norm_spread):\r\n \"\"\"\r\n Calculates Hurst exponent.\r\n https://en.wikipedia.org/wiki/Hurst_exponent\r\n :param norm_spread: An array like object used to calculate half-life.\r\n \"\"\"\r\n # Create the range of lag values\r\n lags = range(2, 100)\r\n\r\n # Calculate the array of the variances of the lagged differences\r\n diffs = [np.subtract(norm_spread[l:], norm_spread[:-l]) for l in lags]\r\n tau = [np.sqrt(np.std(diff)) for diff in diffs]\r\n\r\n # Use a linear fit to estimate the Hurst Exponent\r\n poly = np.polyfit(np.log(lags), np.log(tau), 1)\r\n\r\n # Return the Hurst exponent from the polyfit output\r\n H = poly[0] * 2.0\r\n\r\n return H\r\n\r\ndef get_hurst_df(df):\r\n \"\"\"\r\n :param df: dataframe, 후보들의 spread가 있어야 함.\r\n :return: hurst of each spread\r\n \"\"\"\r\n hurst_df = pd.DataFrame(index=['hurst'])\r\n for i in df.columns:\r\n h = hurst(df[i].values)\r\n hurst_df[i] = h\r\n return hurst_df.T.hurst","repo_name":"jo-cho/copula_practice","sub_path":"pairs_selection/hurst.py","file_name":"hurst.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38485670584","text":"import nbformat\nimport IPython\nimport sys\nfrom io import StringIO\nfrom IPython.utils import io\nfrom bisect import bisect\nimport copy\n\nclass notebook:\n def __init__(self, path, version = 4):\n self.runtime = IPython.InteractiveShell()\n\n with open(path+\".ipynb\", \"r\") as f1: # Open our source file\n self.json = nbformat.read(f1, 4)\n self.total = len(self.json.cells)\n self.text_count = 0\n self.code_count = 0\n self.code_cells = []\n self.text_cells = []\n i = 0\n while i < self.total:\n if self.json.cells[i].cell_type == \"markdown\":\n self.text_cells.append(i)\n self.text_count = self.text_count + 1\n if self.json.cells[i].cell_type == \"code\":\n self.code_cells.append(i)\n self.code_count = self.code_count + 1\n i = i + 1\n \n\n def read_code_cell(self, i):\n if abs(i) >= self.code_count:\n return \"Index out of range\"\n \n return self.json.cells[self.code_cells[i]].source \n def read_text_cell(self, i):\n if abs(i) >= self.text_count:\n return \"Index out of range\"\n return self.json.cells[self.text_cells[i]].source\n\n def read_cell(self, i):\n if abs(i) >= self.total:\n return \"Index out of range\"\n return self.json.cells[i].source\n\n def insert_code_cell(self, source, i):\n i1 = i\n if i < 0:\n i1 = self.total + i\n if i1 > self.total:\n i1 = self.total\n if i1 < 0:\n i1 = 0\n new_cell = nbformat.v4.new_code_cell(source = source)\n self.json.cells.insert(i1, new_cell)\n self.total = self.total + 1\n i2 = bisect(self.code_cells, i1)\n self.code_cells.insert(i2, i1)\n self.code_count = self.code_count + 1\n while i2 < self.code_count:\n self.code_cells[i2] = self.code_cells[i2] + 1\n i2 = i2 + 1\n\n i2 = bisect(self.text_cells, i1)\n while i2 < self.text_count:\n self.text_cells[i2] = self.text_cells[i2] + 1\n i2 = i2 + 1\n\n \n def insert_text_cell(self, source, i):\n i1 = i\n if i < 0:\n i1 = self.total + i\n if i1 > self.total:\n i1 = self.total\n if i1 < 0:\n i1 = 0\n new_cell = nbformat.v4.new_markdown_cell(source = source)\n self.json.cells.insert(i1, new_cell)\n self.total = self.total + 1\n i2 = bisect(self.code_cells, i1)\n while i2 < self.code_count:\n self.code_cells[i2] = self.code_cells[i2] + 1\n i2 = i2 + 1\n\n i2 = bisect(self.text_cells, i1)\n self.text_cells.insert(i2, i1)\n self.text_count = self.text_count + 1\n while i2 < self.text_count:\n self.text_cells[i2] = self.text_cells[i2] + 1\n i2 = i2 + 1\n\n def delete_cell(self, i):\n i1 = i\n if i < 0:\n i1 = self.total + i\n if i1 > self.total:\n i1 = self.total\n if i1 < 0:\n i1 = 0\n\n popped = self.json.cells.pop(i1)\n self.total = self.total - 1\n i2 = bisect(self.code_cells, i1)\n if popped.cell_type == \"code\":\n self.code_cells.pop(i2)\n self.code_count = self.code_count - 1\n while i2 < self.code_count:\n self.code_cells[i2] = self.code_cells[i2] - 1\n i2 = i2 + 1\n\n i2 = bisect(self.text_cells, i1)\n if popped.cell_type == \"markdown\":\n self.text_cells.pop(i2)\n self.text_count = self.text_count - 1\n while i2 < self.text_count:\n self.text_cells[i2] = self.text_cells[i2] - 1\n i2 = i2 + 1\n\n def move_cell(self, i1, i2):\n if i1 < 0:\n i1 = self.total + i\n if i1 > self.total:\n i1 = self.total\n if i1 < 0:\n i1 = 0\n if i2 < 0:\n i2 = self.total + i\n if i2 > self.total:\n i2 = self.total\n if i2 < 0:\n i2 = 0\n if i2 > i1:\n i2 = i2 - 1\n\n type = self.get_cell_type(i1)\n source = self.json.cells[i1].source\n if type == \"markdown\":\n self.insert_text_cell(source, i2) \n else:\n self.insert_code_cell(source, i2)\n\n\n\n def run_cell(self, i, relative = False):\n i1 = i\n if relative:\n i1 = self.code_cells[i]\n if self.json.cells[i1].cell_type == \"code\":\n errors = False\n with io.capture_output() as captured:\n info = self.runtime.run_cell(self.json.cells[i1].source)\n if info.error_before_exec != None or info.error_in_exec != None:\n errors = True\n name = \"\"\n type = \"stream\"\n if errors:\n name = \"stderr\"\n else:\n name = \"stdout\"\n self.json.cells[i1].outputs.append({\"name\": name, \"type\": type, \"text\": captured})\n \n return captured\n else:\n return \"Error: Cannot run a markdown cell\"\n\n def save_to_file (self, path):\n with open(path+\".ipynb\", \"w\") as f1: # Open our source file\n nbformat.write(self.json, f1, version = nbformat.current_nbformat)\n\n\n def get_cell_type(self, i):\n i1 = i\n if i < 0:\n i1 = self.total + i\n if i1 > self.total:\n i1 = self.total\n if i1 < 0:\n i1 = 0\n return self.json.cells[i1].cell_type \n\n def reset_runtime(self):\n self.runtime = IPython.InteractiveShell()\n\n def merge_cells(self, i1, i2):\n if i1 < 0:\n i1 = self.total + i\n if i1 > self.total:\n i1 = self.total\n if i1 < 0:\n i1 = 0\n if i2 < 0:\n i2 = self.total + i\n if i2 > self.total:\n i2 = self.total\n if i2 < 0:\n i2 = 0\n if self.get_cell_type(i1) == self.get_cell_type(i2):\n source = self.json.cells[i2].source\n self.json.cells[i1].source = self.json.cells[i1].source + \"\\n\" + source\n self.delete_cell(i2)\n return True\n else:\n return False\n\n def get_cell_outputs(self, i):\n if self.get_cell_type(i) != \"code\":\n return []\n else:\n return self.json.cells[i].outputs","repo_name":"Crimso777/Mercury-Notebooks","sub_path":"Collaborator/colab.py","file_name":"colab.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"70671926653","text":"#!/bin/python3\n\nimport sys\n\nt = int(input().strip())\nfor a0 in range(t):\n n = int(input().strip())\n r = n\n # Continue to reduce the remaining number down until it is a multiple of 3\n while(r % 3 != 0): \n r = r - 5 \n\n print(\"-1\") if(r < 0) else print(r*\"5\" + (n-r)*\"3\")\n","repo_name":"engineersamuel/hackerrank","sub_path":"algorithms/implementation/p02_sherlock_and_the_beast/sherlock_and_the_beast.py","file_name":"sherlock_and_the_beast.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14844877269","text":"\"\"\"\nTests for QuantXV2 format\n\"\"\"\nimport pytest\n\nfrom dascore.io.core import read\nfrom dascore.utils.downloader import fetch\nfrom dascore.utils.misc import register_func\n\nPATCH_FIXTURES = []\n\n\n@pytest.fixture(scope=\"session\")\ndef quantx_v2_example_path():\n \"\"\"Return the path to the example QuantXV2 file.\"\"\"\n out = fetch(\"opta_sense_quantx_v2.h5\")\n assert out.exists()\n return out\n\n\n@pytest.fixture(scope=\"session\")\n@register_func(PATCH_FIXTURES)\ndef quantx_v2_das_patch(quantx_v2_example_path):\n \"\"\"Read the QuantXV2 data, return contained DataArray\"\"\"\n out = read(quantx_v2_example_path, \"quantx\")[0]\n attr_time = out.attrs[\"time_max\"]\n coord_time = out.coords[\"time\"].max()\n assert attr_time == coord_time\n return out\n\n\nclass TestReadQuantXV2:\n \"\"\"Tests for reading the QuantXV2 format.\"\"\"\n\n def test_precision_of_time_array(self, quantx_v2_das_patch):\n \"\"\"\n Ensure the time array is in ns, not native ms, in order to\n be consistent with other patches.\n \"\"\"\n time = quantx_v2_das_patch.coords[\"time\"]\n dtype = time.dtype\n assert \"[ns]\" in str(dtype)\n","repo_name":"d-chambers/dascore","sub_path":"tests/test_io/test_quantx/test_quantx_core.py","file_name":"test_quantx_core.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"373301347","text":"n, m = list(map(int, input().split()))\n\nr = 1\n\nfor i in range(0, 7) :\n r = n * r\n if i <= 5 and r >= m :\n print(\"Dangerous\")\n break\n if i >= 5 and r <= m :\n print(\"Safe\")\n break\n\n","repo_name":"jinaur/codeup","sub_path":"2746.py","file_name":"2746.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"28480570661","text":"\"\"\"给定两个大小为 m 和 n 的有序数组 nums1 和 nums2。\n\n请你找出这两个有序数组的中位数,并且要求算法的时间复杂度为 O(log(m + n))。\n\n你可以假设 nums1 和 nums2 不会同时为空。\n\n示例 1:\n\nnums1 = [1, 3]\nnums2 = [2]\n\n则中位数是 2.0\n示例 2:\n\nnums1 = [1, 2]\nnums2 = [3, 4]\n\n则中位数是 (2 + 3)/2 = 2.5\n\"\"\"\ndef jiang(nums1,nums2):\n l = sorted(nums1+nums2)\n if len(l)%2 == 0:\n return (l[int(len(l)/2)-1]+l[int(len(l)/2)])/2\n else:\n return l[len(l)//2]\nprint(jiang([1,3],[2,4]))","repo_name":"gschen/where2go-python-test","sub_path":"1906101038江来洪/day20191129/力扣练习题_01.py","file_name":"力扣练习题_01.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"22614912749","text":"from lettuce import *\nfrom django.contrib.auth.models import User\n\nfrom survey.features.page_objects.accounts import LoginPage\n\nfrom survey.features.page_objects.aggregates import AggregateStatusPage, DownloadExcelPage\nfrom survey.features.page_objects.households import NewHouseholdPage\nfrom survey.features.page_objects.investigators import NewInvestigatorPage, InvestigatorsListPage\nfrom survey.features.page_objects.root import HomePage\nfrom survey.features.page_objects.users import NewUserPage\nfrom survey.models.users import UserProfile\n\n\n@step(u'Given I have a user')\ndef given_i_have_a_user(step):\n world.user = User.objects.create_user(\n 'Rajni',\n 'rajni@kant.com',\n 'I_Rock',\n first_name='Rajin',\n last_name=\"Kant\")\n profile = UserProfile.objects.create(\n user=world.user, mobile_number='2222222223')\n\n\n@step(u'And I visit the login page')\ndef and_i_visit_the_login_page(step):\n world.page = LoginPage(world.browser)\n world.page.visit()\n\n\n@step(u'And I login a user')\ndef and_i_login_a_user(step):\n world.page = LoginPage(world.browser)\n world.page.login(world.user)\n\n\n@step(u'Then I should see that I am logged in as given username')\ndef then_i_should_see_that_i_am_logged_in_as_given_username(step):\n world.page.see_home_page_and_logged_in_status(world.user)\n\n\n@step(u'And I am in the home page')\ndef and_i_am_in_the_home_page(step):\n world.page = HomePage(world.browser)\n world.page.visit()\n\n\n@step(u'And I click the login link')\ndef and_i_click_the_login_link(step):\n world.page.click_the_login_link()\n\n\n@step(u'Then I should see new investigator with logout link')\ndef then_i_should_see_new_investigator_with_logout_link(step):\n world.page = NewInvestigatorPage(world.browser)\n world.page.see_username_link()\n\n\n@step(u'Then I should see list investigator with logout link')\ndef then_i_should_see_list_investigator_with_logout_link(step):\n world.page = InvestigatorsListPage(world.browser)\n world.page.see_username_link()\n\n\n@step(u'Then I should see new household page with logout link')\ndef then_i_should_see_new_household_page_with_logout_link(step):\n world.page = NewHouseholdPage(world.browser)\n world.page.see_username_link()\n\n\n@step(u'Then I should see aggregate status page with logout link')\ndef then_i_should_see_aggregate_status_page_with_logout_link(step):\n world.page = AggregateStatusPage(world.browser)\n world.page.see_username_link()\n\n\n@step(u'Then I should see download excel page with logout link')\ndef then_i_should_see_download_excel_page_with_logout_link(step):\n world.page = DownloadExcelPage(world.browser)\n world.page.see_username_link()\n\n\n@step(u'Then I should see new user page with logout link')\ndef then_i_should_see_new_user_page_with_logout_link(step):\n world.page = NewUserPage(world.browser)\n world.page.see_username_link()\n","repo_name":"unicefuganda/uSurvey","sub_path":"survey/features/login-steps.py","file_name":"login-steps.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"25198789557","text":"from django.conf.urls import patterns, url\nfrom pmm_is2.apps.des import views\n\n__author__ = 'Eduardo'\nurlpatterns = patterns('',\n\n url(r'^$', views.index, name='index'),\n url(r'^crear_tipo_item/$', views.crear_tipo_item, name='crear_tipo_item'),\n url(r'^listar_tipo_item/$', views.listar_tipo_item, name='listar_tipo_item'),\n url(r'^editar_tipo_item/(?P\\d+)$', views.editar_tipo_item, name='editar_tipo_item'),\n url(r'^eliminar_tipo_item/(?P\\d+)$', views.eliminar_tipo_item, name='eliminar_tipo_item'),\n url(r'^tipo_item/(?P\\d+)$', views.ver_tipo_item, name='tipo_item'),\n url(r'^suggest_tipo_item/$', views.suggest_tipo_item, name='suggest_tipo_item'),\n\n url(r'^crear_atributo_tipo_item/$', views.crear_atributo_tipo_item, name='crear_atributo_tipo_item'),\n url(r'^listar_atributo_tipo_item/$', views.listar_atributo_tipo_item, name='listar_atributo_tipo_item'),\n url(r'^editar_atributo_tipo_item/(?P\\d+)$', views.editar_atributo_tipo_item, name='editar_atributo_tipo_item'),\n url(r'^eliminar_atributo_tipo_item/(?P\\d+)$', views.eliminar_atributo_tipo_item, name='eliminar_atributo_tipo_item'),\n url(r'^atributo_tipo_item/(?P\\d+)$', views.ver_atributo_tipo_item, name='atributo_tipo_item'),\n\n #agregado para probar mi parte\n url(r'^crear_item/(?P\\d+)$', views.crear_item, name='crear_item'),\n url(r'^listar_item/$', views.listar_item, name='listar_item'),\n url(r'^lista_item_revivir/(?P\\d+)$', views.lista_item_revivir, name='lista_item_revivir'),\n url(r'^editar_item/(?P\\d+)$', views.editar_item, name='editar_item'),\n url(r'^eliminar_item/(?P\\d+)$', views.eliminar_item, name='eliminar_item'),\n url(r'^item/(?P\\d+)$', views.ver_item, name='item'),\n url(r'^suggest_tipo_item/$', views.suggest_tipo_item, name='suggest_tipo_item'),\n url(r'^suggest_item/$', views.suggest_item, name='suggest_item'),\n url(r'^adjuntarArchivo_item/(?P\\d+)$', views.archivoadjunto_page, name='adjuntarArchivo_item'),\n\n url(r'^crear_archivoAdjunto/$', views.crear_archivoAdjunto, name='crear_archivoAdjunto'),\n\n url(r'^desasignar/(?P\\d+)$', views.desasignar, name='desasignar'),\n url(r'^eliminar_adjunto/(?P\\d+)$', views.eliminar_adjunto, name='eliminar_adjunto'),\n\n url(r'^project_list/$', views.project_list, name='project_list'),\n url(r'^phases_list/(?P\\d+)$', views.phases_list, name='phases_list'),\n url(r'^historial_item/(?P\\d+)$', views.historial_item, name='historial_item'),\n url(r'^revivir_item/(?P\\d+)$', views.revivir_item, name='revivir_item'),\n url(r'^item_reversion/(?P\\d+)$', views.item_reversion, name='item_reversion'),\n url(r'^item_reversion_list/(?P\\d+)$', views.item_reversion_list, name='item_reversion_list'),\n url(r'^revivir/(?P\\d+)$', views.revivir, name='revivir'),\n url(r'^agregar_relaciones/(?P\\d+)$', views.agregar_relaciones, name='agregar_relaciones'),\n url(r'^relation_fix/(?P\\d+)$', views.relation_fix, name='relation_fix'),\n url(r'^relation_fix_revive/(?P(\\d+))/(?P(\\d+))/$', views.relation_fix_revive, name='relation_fix_revive'),\n url(r'^listar_relaciones/(?P(\\d+))/$', views.listar_relaciones, name='listar_relaciones'),\n url(r'^eliminar_relacion/(?P\\d+)$', views.eliminar_relacion, name='eliminar_relacion'),\n url(r'^project_profile/(?P\\d+)$', views.project_profile, name='project_profile'),\n url(r'^phase_item_list/(?P(\\d+))/(?P(\\d+))/$', views.phase_item_list, name='phase_item_list'),\n\n #imports\n url(r'^import_item/(?P\\d+)$', views.import_item, name='import_item'),\n url(r'^item_import_list/(?P\\d+)$', views.item_import_list, name='item_import_list'),\n url(r'^calcular/(?P\\d+)$', views.calcular_impacto_y_costo_item, name='calcular'),\n url(r'^calcular_costo_total/(?P\\d+)$', views.calcular_costo_total, name='calcular_costo_total'),\n\n\n #url(r'^crear_solicitud/$', views.crear_solicitud, name='crear_solicitud'),\n #url(r'^listar_solicitud/$', views.listar_solicitud, name='listar_solicitud'),\n #url(r'^editar_solicitud/(?P\\d+)$', views.editar_solicitud, name='editar_solicitud'),\n url(r'^crear_solicitud/(?P(\\d+))/(?P(\\d+))/$', views.crear_solicitud, name='crear_solicitud'),\n url(r'^listar_solicitud/(?P(\\d+))/(?P(\\d+))/$', views.listar_solicitud, name='listar_solicitud'),\n\n url(r'^imprimir_solicitud/(?P\\d+)$', views.imprimir_solicitud, name='imprimir_solicitud'),\n url(r'^imprimir_listadoitem/(?P\\d+)$', views.imprimir_item, name='imprimir_item'),\n url(r'^enviar_solicitud/(?P\\d+)$', views.enviar_solicitud, name='enviar_solicitud'),\n url(r'^listar_solicitudRecibido/$', views.listar_solicitudRecibido, name='listar_solicitudRecibido'),\n url(r'^editar_solicitudRecibido/(?P\\d+)$', views.editar_solicitudRecibido, name='editar_solicitudRecibido'),\n\n url(r'^get_relation_items/$', views.get_relation_items, name='get_relation_items'),\n url(r'^visualizar_grafico/(?P\\d+)$', views.visualizar_grafico, name='visualizar_grafico'),\n\n\n url(r'^visualizar_grafico/(?P\\d+)$', views.visualizar_grafico, name='visualizar_grafico'),\n\n)","repo_name":"edufd/pmm-is2","sub_path":"pmm_is2/apps/des/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20874358940","text":"# encoding: utf-8 \n# 文件需要以utf-8格式编码\n# 文件名代表因子名称,需满足命名规范\n__author__ = \"陈熙元\" # 这里填下你的名字\ndefault_params = {\"t\":0} # 这里填写因子参数默认值,比如: {\"t1\": 10}\nparams_description = {\"t\":\"无参数\"} # 这里填写因子参数描述信息,比如: {\"t1\": \"并没有用上的参数\"}\n\ndef run_formula(dv, params=default_params):\n \"\"\"\n 超速动比率=(货币资金+交易性金融资产+应收票据+应收账款+其他应收款)/流动负债合计\n 计算方式:Latest\n \"\"\"\n value = dv.add_formula(\"SuperQuickRatio_J\",\n \"(monetary_cap+tradable_assets+notes_rcv+acct_rcv+other_rcv)/tot_cur_liab\",\n is_quarterly=True,\n add_data=True)\n\n return value\n","repo_name":"cbh968130/ShinieHW","sub_path":"8_factors/SuperQuickRatio.py","file_name":"SuperQuickRatio.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29082404429","text":"class Solution(object):\n def mergeAlternately(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: str\n \"\"\"\n\n # initialize the merged words to an epmty string\n merged_words = \"\"\n\n # store the minimum length in a container\n length = min(len(word1), len(word2))\n\n # loop through the range of length and append the words to the merged words\n for i in range(length):\n merged_words += word1[i] + word2[i]\n\n # Append any remaining characters from the longer word\n if len(word1) > length:\n merged_words += word1[length:]\n elif len(word2) > length:\n merged_words += word2[length:]\n\n # return merged words\n return merged_words\n","repo_name":"Markson17/My-LeetCode-Solution","sub_path":"LeetCode 75/merge-strings-alternatively.py","file_name":"merge-strings-alternatively.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"17391535239","text":"import json\n\nfrom ttdclient.models.base import Base\n\n\nclass SupplySource(Base):\n\n obj_name = \"supplyvendor\"\n\n def get_supply_sources(self, partner_id):\n payload = {\n \"PartnerId\": partner_id,\n \"PageStartIndex\": 0,\n \"PageSize\": None\n }\n method = \"POST\"\n url = '{0}/{1}'.format(self.get_url(), 'query/partner')\n response = self._execute(method, url, json.dumps(payload))\n return self._get_response_objects(response)","repo_name":"emmanueljob/ttd-api","sub_path":"ttdclient/models/supply_source.py","file_name":"supply_source.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"18096083905","text":"import torch.nn.functional as F\nimport torch\n\n\nclass Diffusion():\n def __init__(self, timesteps: int, device: str):\n self.device = device\n self.T = timesteps\n self.betas = torch.linspace(0.0001, 0.02, self.T).to(self.device)\n self.alphas = 1. - self.betas.to(self.device)\n self.initialize()\n\n def initialize(self):\n self.alphas_cumprod = torch.cumprod(self.alphas, axis=0)\n self.alphas_cumprod_prev = F.pad(\n self.alphas_cumprod[:-1], (1, 0), value=1.0)\n self.sqrt_recip_alphas = torch.sqrt(1.0 / self.alphas)\n self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)\n self.sqrt_one_minus_alphas_cumprod = torch.sqrt(\n 1. - self.alphas_cumprod)\n self.posterior_variance = self.betas * \\\n (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod)\n\n def forward_diffusion_sample(self, x_0, t, device):\n \"\"\" \n Takes an image and a timestep as input and \n returns the noisy version of it\n \"\"\"\n noise = torch.randn_like(x_0)\n sqrt_alphas_cumprod_t = self.get_index_from_list(\n self.sqrt_alphas_cumprod, t, x_0.shape, device)\n sqrt_one_minus_alphas_cumprod_t = self.get_index_from_list(\n self.sqrt_one_minus_alphas_cumprod, t, x_0.shape, device\n )\n # mean + variance\n return sqrt_alphas_cumprod_t.to(device) * x_0.to(device) \\\n + sqrt_one_minus_alphas_cumprod_t.to(device) * \\\n noise.to(device), noise.to(device)\n\n def get_index_from_list(self, vals, t, x_shape, device):\n \"\"\" \n Returns a specific index t of a passed list of values vals\n while considering the batch dimension.\n \"\"\"\n batch_size = t.shape[0]\n out = vals.gather(-1, t.to(device))\n return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))).to(t.device)\n\n @torch.no_grad()\n def sample_timestep(self, model, x, t):\n \"\"\"\n Calls the model to predict the noise in the image and returns \n the denoised image. \n Applies noise to this image, if we are not in the last step yet.\n \"\"\"\n device = next(model.parameters()).device\n betas_t = self.get_index_from_list(self.betas, t, x.shape, device)\n sqrt_one_minus_alphas_cumprod_t = self.get_index_from_list(\n self.sqrt_one_minus_alphas_cumprod, t, x.shape, device\n )\n sqrt_recip_alphas_t = self.get_index_from_list(\n self.sqrt_recip_alphas, t, x.shape, device)\n\n # Call model (current image - noise prediction)\n model_mean = sqrt_recip_alphas_t * (\n x - betas_t * model(x, t) / sqrt_one_minus_alphas_cumprod_t\n )\n posterior_variance_t = self.get_index_from_list(\n self.posterior_variance, t, x.shape, device)\n\n if t == 0:\n return model_mean\n else:\n noise = torch.randn_like(x)\n return model_mean + torch.sqrt(posterior_variance_t) * noise\n\n @torch.no_grad()\n def sample_image(self, model, T):\n\n # device\n device = next(model.parameters()).device\n\n # Sample noise\n img_size = 128\n img = torch.randn((1, 3, img_size, img_size), device=device)\n imgs = []\n\n # for loop sampling\n for i in reversed(range(0, T)):\n t = torch.full((1,), i, device=device, dtype=torch.long)\n img = self.sample_timestep(model, img, t)\n imgs.append(img)\n if i == 0:\n print(f\"Sampling is successful.\")\n return imgs\n","repo_name":"Rachan-por/EE_Senior_Project","sub_path":"forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27131199414","text":"#!/usr/bin/env python3\n\"\"\"\ncryptovault.py - A very basic tool that encrypts and decrypts text via AES-CBC\nusing 32 byte (256bit) keys.\n\nExample - Encrypting a message:\n(.env)cgleeson@autotron:~/src/crypto$ ./cryptovault.py -k 'FMcFGpP@A2ygsf#B6oYuTaNuG(4edE8)' -m 'This is a secret demo message'\n\n**********PyCrypto Vault Start**********\nMode: Encryption\nMessage is: This is a secret demo message\nMessage successfully encoded with AES-CBC.\nCiphertext: B/XVAmmcwXOEQ48pFac69Emk97gHQLNicq15YQc5PfEEqTOhF8i938/tGSVudHCu\n\n**********PyCrypto Vault FINISHED**********\n\nExample - Decrypting the same message:\n(.env)cgleeson@autotron:~/src/crypto$ ./cryptovault.py -k 'FMcFGpP@A2ygsf#B6oYuTaNuG(4edE8)' -c 'B/XVAmmcwXOEQ48pFac69Emk97gHQLNicq15YQc5PfEEqTOhF8i938/tGSVudHCu'\n\n**********PyCrypto Vault Start**********\nMode: Decryption\nCiphertext is: B/XVAmmcwXOEQ48pFac69Emk97gHQLNicq15YQc5PfEEqTOhF8i938/tGSVudHCu\nCiphertext successfully decoded with AES-CBC.\nDecrypted message (end-padded with empty space): This is a secret demo message\n\n**********PyCrypto Vault FINISHED**********\n\nAlternately, you can supply the key in a file if you prefer:\n\n(.env)cgleeson@autotron:~/src/crypto$ cat key.json\n{ \"32_byte_key\": \"FMcFGpP@A2ygsf#B6oYuTaNuG(4edE8)\" }\n\n(.env)cgleeson@autotron:~/src/crypto$ ./cryptovault.py -k ./key.json -m 'This is a secret demo message'\n\n\n\nAuthors: Chris Gleeson.\n\"\"\"\nimport os\nimport sys\nimport math\nimport json\nimport base64\nimport datetime\nimport getopt\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\n\n\ndef usage():\n \"\"\"\n Prints usage for nmapper.py\n \"\"\"\n print('Usage: cryptovault.py [-h] -k [-m | -c ]')\n print('Usage: Encrypt mode - Supply a 32 byte key (256bits) and a message of any length')\n print('Usage: Decrypt mode - Supply a 32 byte key (256bits) and a ciphertext from the output of Encrypt Mode.')\n\n\ndef parse_args():\n \"\"\"\n Parses input arguments and returns them to main.\n\n Exits on any raised exception or if any required arguments are missing.\n \"\"\"\n keystring = ''\n message = ''\n ciphertext = ''\n\n #Attempt to parse args\n try:\n opts, args = getopt.getopt(sys.argv[1:],\"hk:m:c:\",[\"help\",\"key=\",\"message=\",\"ciphertext=\"])\n except getopt.GetoptError as err:\n print(err)\n usage()\n sys.exit(2)\n\n #Populate local variables from args\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt in (\"-k\", \"--key\"):\n keystring = arg\n elif opt in (\"-m\", \"--message\"):\n message = arg\n elif opt in (\"-c\", \"--ciphertext\"):\n ciphertext = arg\n return (keystring, message, ciphertext)\n\n\ndef create_config_object(filepath):\n \"\"\"\n Takes a string that holds a file path and attempts to read the file and\n parse the file as JSON.\n\n Returns: Parsed json object via json.loads()\n\n Rasies: IOError if the file cannot be read, TypeError on bad Type,\n ValueError on failed parsing.\n \"\"\"\n try:\n json_raw = open(filepath).read()\n json_object = json.loads(json_raw)\n except IOError as err:\n print(\"Error: Failed to open file %s! Exiting...\" % filepath)\n raise\n except TypeError as err:\n print(\"Error: Parsing of file %s failed! Exiting...\" % filepath)\n raise\n except ValueError as err:\n print(\"Error: Parsing of file %s failed! Exiting...\" % filepath)\n raise\n return json_object\n\n\ndef key_in_file(keystring):\n \"\"\"\n Checks if keystring is a valid file (json). If not, keystring is treated as a key\n value and returned for validation later.\n\n Returns: The key from a readable file reference, else keystring is returned.\n \"\"\"\n real_key = ''\n #If key is a filepath we can read, lets grab it and return it\n if os.access(keystring, os.R_OK):\n key_json = create_config_object(keystring)\n try:\n real_key = key_json[\"32_byte_key\"]\n except TypeError as err:\n print(\"Error: Parsing of file %s failed! Exiting...\" % keystring)\n raise\n return real_key\n else:\n return keystring\n\n\ndef test_key(key):\n \"\"\"\n Extremely simple, 'is this string 32 bytes' check.\n\n Bails out if the key is of invalid length.\n \"\"\"\n if len(key) != 32:\n print(\"ERROR: The key supplied was not 32 bytes!.\")\n print(\"ERROR: Supplied key has length of:\", len(key))\n print(\"ERROR: Key was:\", key)\n sys.exit(2)\n\n\ndef encode(key,initv,message,algo):\n \"\"\"\n Attempts to encode the message via AES-CBC.\n\n Encoded ciphertext is printed to the screen.\n \"\"\"\n print(\"Message is:\", message)\n\n #If no init vector was supplied, we will create one\n if initv == '':\n initv = Random.new().read(AES.block_size)\n\n try:\n aes = AES.new(key, algo, initv)\n except ValueError as e:\n print(\"Exception caught trying to do 'AES.new(key, algo, initv)'!\")\n raise e\n\n #The input message must be a mulitple of 16 bytes for AES\n #We will only accept a 32 byte key, so a mulitple of 32 is used here.\n block_size = AES.block_size\n multiplier = math.ceil(len(message) / block_size)\n length = block_size * multiplier\n message_padded = message.ljust(length)\n\n try:\n ciphertext = aes.encrypt(message_padded)\n ciphertext = base64.b64encode(initv + ciphertext)\n except ValueError as e:\n print(\"Exception caught trying to do 'aes.encrypt(message)'!\")\n raise e\n\n print(\"Message successfully encoded with AES-CBC.\")\n #print(\"Ciphertext init vector (raw):\", initv) #debug only\n ciphertext = str(ciphertext, \"UTF-8\")\n print(\"Ciphertext:\", ciphertext)\n\n\ndef decode(key,initv,ciphertext,algo):\n \"\"\"\n Attempts to decode the message via AES-CBC.\n\n Decoded message is printed to the screen.\n \"\"\"\n print(\"Ciphertext is:\", ciphertext)\n\n #Decode from base64\n ciphertext = base64.b64decode(ciphertext)\n #Slice the init vector from the ciphertext\n initv = ciphertext[:AES.block_size]\n ciphertext = ciphertext[AES.block_size:]\n\n try:\n aes = AES.new(key, algo, initv)\n except ValueError as e:\n print(\"Exception caught trying to do 'AES.new(key, algo, initv)'!\")\n raise e\n\n try:\n message = aes.decrypt(ciphertext)\n except ValueError as e:\n print(\"Exception caught trying to do 'aes.encrypt(message)'!\")\n raise e\n\n print(\"Ciphertext successfully decoded with AES-CBC.\")\n #print(\"Ciphertext init vector (raw):\", initv) #debug only\n message = str(message, \"UTF-8\")\n print(\"Decrypted message (end-padded with whitespace):\", message)\n\n\ndef main():\n #Parse args\n (keystring, message, ciphertext) = parse_args()\n\n #Check if the key was given via a file or directly as input.\n key = key_in_file(keystring)\n\n #Validate the key length before we begin\n test_key(key)\n\n #Run begins\n print(\"\\n**********PyCrypto Vault Start**********\")\n\n #Pass in an empty init vector, we will create a random one anyway.\n #This is to potentially support a user supplied init vector in the future.\n initv = ''\n\n #Set CBC as the default mode, we could enable other modes like CBF later.\n algo = AES.MODE_CBC\n\n if ciphertext == '':\n print(\"Mode: Encryption\")\n encode(key, initv, message, algo)\n elif message == '':\n print(\"Mode: Decryption\")\n decode(key, initv, ciphertext, algo)\n\n #Run is complete\n print(\"\\n**********PyCrypto Vault FINISHED**********\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"randomInteger/cryptovault","sub_path":"cryptovault.py","file_name":"cryptovault.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26088149221","text":"from flask import Flask, jsonify, request\r\n\r\nimport tensorflow as tf\r\n\r\napp = Flask(__name__ )\r\n\r\n@app.route('/')\r\ndef home_page():\r\n return 'Train a linear regression model'\r\n\r\n# Input\r\nW = tf.Variable([.26], dtype=tf.float32)\r\nb = tf.Variable([-.26], dtype=tf.float32)\r\n\r\n# Output\r\ncurr_W, curr_b, curr_loss = 0.0, 0.0, 0.0\r\n\r\n# Initializes model parameters\r\n@app.route('/initialize-model')\r\ndef initialize():\r\n global W, b\r\n W = tf.Variable([.26], dtype=tf.float32)\r\n b = tf.Variable([-.26], dtype=tf.float32)\r\n return 'Initialized model parameters - W: %s b: %s' % (W, b)\r\n\r\n\r\n@app.route('/train-model')\r\ndef train_model():\r\n global W, b, curr_W, curr_b, curr_loss\r\n # Input and Output\r\n x = tf.placeholder(tf.float32)\r\n linear_model = W * x + b\r\n y = tf.placeholder(tf.float32)\r\n\r\n # loss\r\n loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares\r\n # optimizer\r\n optimizer = tf.train.GradientDescentOptimizer(0.01)\r\n train = optimizer.minimize(loss)\r\n\r\n # training data\r\n x_train = [1, 2, 3, 4]\r\n y_train = [0, -1, -2, -3]\r\n # training loop\r\n init = tf.global_variables_initializer()\r\n sess = tf.Session()\r\n sess.run(init) # Reset values\r\n for i in range(1000):\r\n sess.run(train, {x: x_train, y: y_train})\r\n\r\n # Evaluate training accuracy\r\n curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})\r\n return 'Finished training and evaluation'\r\n\r\n\r\n@app.route('/output-result')\r\ndef output_result():\r\n global curr_W, curr_b, curr_loss\r\n return \"Result of training the data - W: %s b: %s loss: %s\" % (curr_W, curr_b, curr_loss)\r\n\r\n# return jsonify({'Result of evaluation': language_list})\r\n\r\nif __name__ == '__main__':\r\n app.run(host = '127.0.0.1', port = 5000, debug = True)\r\n","repo_name":"mahant-ufl/Script_For_Training_Deep_Learning_Model","sub_path":"trainmodel.py","file_name":"trainmodel.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31850193849","text":"def countSetbit(n):\n rsbm=n&-n\n c=0\n while n!=0:\n n-=rsbm\n rsbm=n&-n\n c+=1\n return c\nn=int(input())\nc=0\nsetBitIn_n=countSetbit(n)\nfor i in range(n):\n x=countSetbit(i)\n if x==setBitIn_n:\n c+=1\nprint(c)\n","repo_name":"ommiy2j/Codeforces","sub_path":"Bit_Manipulation/SameNoOfBitsUsingBruteForce.py","file_name":"SameNoOfBitsUsingBruteForce.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4508837412","text":"from Shell import SshShell\nfrom Rsi import Rsi\nfrom Observer import Observer\n\nclass Person(Rsi, Observer):\n shell = SshShell()\n shell.open()\n @classmethod\n def spawn(cls, firstname, lastname, **attributes):\n new_class = type(lastname, (cls,), attributes)\n globals()[lastname] = new_class\n return new_class(firstname)\n\n def __init__(self, firstname):\n Rsi.__init__(self, Person.shell)\n Observer.__init__(self)\n print(\"Executing contructor of %s\"%self.__class__.__name__)\n self.firstname = firstname\n\n def wholename(self):\n return \"{} {}\".format(\n self.firstname.capitalize(),\n self.__class__.__name__\n )\n\n def __str__(self):\n return self.firstname\n\ndef punch(self):\n print(\"{} ({} damage with authlevel {} and nickname {})\".format(\n self.wholename(),\n self.punch_damage,\n self.authlevel,\n self.nickname\n ))\n\ndef set_input(self, m_dict):\n for key,value in m_dict.items():\n setattr(self,key,value)\n\nif __name__ == '__main__':\n frank = Person.spawn(\"Frank\", \"Puncherson\",\n punch_damage=10,\n punch=punch,\n set_input=set_input\n )\n\n frank.set_input({\"authlevel\":1, \"nickname\":\"franknick\"})\n frank.punch()\n frank.update(\"ARG\")\n print(frank)\n","repo_name":"Reins981/sandbox","sub_path":"example_scripts/spawn.py","file_name":"spawn.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"10303264193","text":"import json\nimport shutil\nimport os\nimport subprocess\nimport random\nimport statistics\n\nMAP_WIDTHS = [32, 40, 48, 56, 64]\n\ndef getAverageHaliteInMap(\n gameEngineFolder,\n mapWidth,\n dumpDir,\n seed) :\n '''\n Returns a number of the average halite\n in the map generated by seed.\n '''\n gameCommand = gameEngineFolder + '/halite'\n args = [gameCommand,\n '--replay-directory', dumpDir,\n '--width' , str(mapWidth),\n '--height', str(mapWidth),\n '--results-as-json',\n '--seed', str(seed),\n '--no-compression']\n\n args += ['no-bot-command', 'no-bot-command']\n jsonOutputString = subprocess.check_output(args)\n jsonOutput = json.loads(jsonOutputString.decode(\"utf-8\"))\n\n finalSnapshot = jsonOutput['final_snapshot']\n haliteListStr = finalSnapshot.split(';')[2]\n haliteList = haliteListStr.split(',')\n haliteList = list(filter(None, haliteList))\n # print(haliteList)\n return statistics.mean(map(int, haliteList))\n\ndef seed_selector(nSeed, mapSize, gameEngine, dumpDir) :\n '''\n Instead of playing the game with random seeds, we can play\n the game with seeds representative of the general game production\n density. This way, our graph will look cleaner.\n\n This function generates nSeed * 5 seeds, and return a sorted\n array of ints containing nSeed number of seeds. The replay generated\n is dumped in dumpDir. The seed selector requires the gameEngine.\n '''\n seeds = []\n randomOffset = random.randrange(100000)\n for seed in range(0, nSeed * 5) :\n avgHalite = getAverageHaliteInMap(\n gameEngineFolder=gameEngine,\n mapWidth=mapSize,\n dumpDir=dumpDir,\n seed=seed + randomOffset)\n seeds.append((seed + randomOffset, avgHalite))\n \n seeds.sort(key=lambda x: x[1])\n seeds = [seed for seed, avgHalite in seeds]\n \n representativeSeeds = []\n for i in range(nSeed) :\n chunkSize = len(seeds)//nSeed\n representativeIndex = i * chunkSize + int(chunkSize/2)\n representativeSeeds.append(seeds[representativeIndex])\n \n return representativeSeeds\n\ndef runHalite(botCallCommandList,\n gameEngineFolder,\n mapWidth = 32,\n replayDirectory = './replays',\n seed = None,\n logging = False) :\n '''\n Run halite on compiled bots.\n This command should return\n (1) the player rank, that is, a dict from player -> ranking 1,2,3\n (2) the list of the scores of players.\n '''\n numBots = len(botCallCommandList)\n assert numBots == 2 or numBots == 4, \\\n \"You are trying to run %d bots on a map\" % numBots\n assert mapWidth in MAP_WIDTHS, 'width mismatch %s' % mapWidth\n gameCommand = gameEngineFolder + '/halite'\n args = [gameCommand,\n '--replay-directory', replayDirectory,\n '-vvv',\n '--width' , str(mapWidth),\n '--height', str(mapWidth),\n '--results-as-json',\n '--no-compression']\n if seed :\n args += ['--seed', str(seed)]\n if not logging :\n args += ['--no-logs']\n args += botCallCommandList\n\n jsonOutputString = subprocess.check_output(args)\n jsonOutput = json.loads(jsonOutputString.decode(\"utf-8\"))\n return readResults(jsonOutput)\n\ndef readResults(jsonResult) :\n nPlayers = len(jsonResult['stats'].keys())\n playerRanks = [-1 for _ in range(nPlayers)]\n scoreList = []\n for player in range(nPlayers) :\n playerRank = jsonResult['stats'][str(player)]['rank']\n playerScore = jsonResult['stats'][str(player)]['score']\n playerRanks.append(playerRank)\n scoreList.append(playerScore)\n return playerRanks, scoreList\n\nif __name__ == '__main__' :\n # We would read the json containing the run info\n with open('run_halite_config.json', 'r') as config_file :\n config = json.loads(config_file.read())\n \n n_players = 2\n n_games = config[\"n_games\"]\n map_sizes = config[\"map_sizes\"]\n game_engine_dir = config[\"game_engine_dir\"]\n replay_dir = config[\"replay_dir\"]\n bot_call_command = config[\"bot_call_command\"]\n empty_bot_call_command = config[\"empty_bot_call_command\"]\n\n # create a dump directory\n dump_dir = os.path.join(replay_dir, 'dump-temp')\n os.makedirs(dump_dir, exist_ok=True)\n\n\n for map_size in map_sizes :\n\n # Then, we call our seed generator to generate the\n # correct seeds for each map we want to run.\n seeds = seed_selector(nSeed=n_games, mapSize=map_size, gameEngine=game_engine_dir, dumpDir=dump_dir)\n\n # We are always running with just one non-trivial players.\n # Basically, we are going to run it with an empty bot.\n\n # Let's say we have n_games = 20, and 5 different map sizes.\n # It will run 100 non-trivial games in total.\n for seed in seeds :\n player_ranks, score_list = runHalite([bot_call_command, empty_bot_call_command],\n game_engine_dir,\n map_size,\n replay_dir,\n seed)\n print('replay mapsize = %d, seed = %d' % (map_size, seed))\n print('result =', score_list)\n\n # remove the dump directory\n shutil.rmtree(dump_dir)\n\n","repo_name":"ssantichaivekin/halite3-tools","sub_path":"run_halite_games.py","file_name":"run_halite_games.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22393471885","text":"# • временные метки, конкретные моменты времени\n# • фиксированные периоды(январь 2007г. или весь 2010г)\n# • временные интервалы, обозначаемые метками начала и конца(можно считать частными случаями интервалов)\nimport pandas as pd\nimport numpy as np\nimport matplotlib as plt\n\n\n# Периоды и арифметика периодов\n# Периоды – это промежутки времени: дни, месяцы, кварталы, годы(класс Period)\np = pd.Period(2007, freq='A–DEC') # промежуток времени от 1 января 2007 года до 31 декабря 2007 года включительно.\n# Сложение и вычитание периода и целого числа(сдвиг на величину кратную частоте периода)\np + 5 # Period('2012', 'A–DEC')\np - 2 # Period('2005', 'A–DEC')\n# ///\n# Регулярные диапазоны периодов\nrng = pd.period_range('2000–01–01', '2000–06–30', freq='M') # PeriodIndex(['2000–01', '2000–02', '2000–03', '2000–04', '2000–05', '2000–06'], dtype='period[M]', freq='M')\n# ///\n# Преобразование частоты периода\n# годовой период преобразовать в месячный, начинающийся или заканчивающийся на границе года\np = pd.Period('2007', freq='A–DEC')\np.asfreq('M', how='start')\np.asfreq('M', how='end')\n# ///\n# Для финансового года, заканчивающегося в любом месяце, кроме декабря, месячные подпериоды\np = pd.Period('2007', freq='A–JUN')\np.asfreq('M', 'start')\np.asfreq('M', 'end')\n\n# Квартальная частота периода\n# 12 возможных значений квартальной частоты – от Q–JAN до Q–DEC\np = pd.Period('2012Q4', freq='Q–JAN')\np.asfreq('D', 'start')\np.asfreq('D', 'end')\n# ///\n# временная метка для момента «4 часа пополудни предпоследнего рабочего дня квартала»\np4pm = (p.asfreq('B', 'e') - 1).asfreq('T', 's') + 16 * 60\np4pm.to_timestamp()\n# ///\n# генерация квартальных диапазонов\nrng = pd.period_range('2011Q3', '2012Q4', freq='Q–JAN')\nts = pd.Series(np.arange(len(rng)), index=rng)\nnew_rng = (rng.asfreq('B', 'e') - 1).asfreq('T', 's') + 16 * 60\nts.index = new_rng.to_timestamp()\n\n# Преобразование временных меток в периоды и обратно\n# Объекты Series и DataFrame, индексированные временными метками преобразовать в периоды\nrng = pd.date_range('2000–01–01', periods=3, freq='M')\nts = pd.Series(np.random.randn(3), index=rng)\npts = ts.to_period()\n\n\n# Передискретизация и преобразование частоты\n# процесс изменения частоты временного ряда\nrng = pd.date_range('2000–01–01', periods=100, freq='D')\nts = pd.Series(np.random.randn(len(rng)), index=rng)\nts.resample('M').mean() # ts.resample('M', kind='period').mean()\n# ///\n# Понижающая передискретизация\n# • какой конец интервала будет включаться;\n# • помечать ли агрегированный интервал меткой его начала или конца.\n# данные с частотой одна минута:\nrng = pd.date_range('1/1/2000', periods=12, freq='T')\nts = pd.Series(np.arange(12), index=rng)\n# агрегировать данные в пятиминутные группы, или столбики, вычислив сумму по каждой группе\nts.resample('5min', how='sum')\nts.resample('5min', closed='right').sum() # closed='right' - включается правый конец интервала\n# Результирующий временной ряд помечен временными метками, соответствующими левым концам интервалов\nts.resample('5min', closed='right', label='right').sum()\n# вычесть одну секунду из правого конца, чтобы было понятнее, к какому интервалу относится временная метка\nts.resample('5min', closed='right', label='right', loffset='–1s').sum()\n# ///\n# Передискретизация OHLC\n# четыре значения для каждого интервала:\n# первое (открытие – open)\n# последнее (закрытие – close)\n# максимальное (high)\n# минимальное (low)\nts.resample('5min').ohlc()\n\n# Повышающая передискретизация и интерполяция\nframe = pd.DataFrame(np.random.randn(2, 4),\n index=pd.date_range('1/1/2000', periods=2,\n freq='W–WED'),\n columns=['Colorado', 'Texas', 'New York', 'Ohio'])\n# перейти к более высокой частоте без агрегирования\ndf_daily = frame.resample('D').asfreq()\n# ///\n# Передискретизация периодов\nannual_frame = frame.resample('A–DEC').mean()\nannual_frame.resample('Q–DEC').ffill()\nannual_frame.resample('Q–DEC', convention='end').ffill()\n\n\n# Скользящие оконные функции\n# для операций с временными рядами, – статистические и иные функции\n# загрузить временной ряд и передискретизировать на частоту «рабочий день»\nclose_px_all = pd.read_csv('examples/stock_px_2.csv', parse_dates=True, index_col=0)\nclose_px = close_px_all[['AAPL', 'MSFT', 'XOM']]\nclose_px = close_px.resample('B').ffill()\n# rolling(250) - создает объект(допускает группировку по скользящему окну шириной 250 дней), - средние котировки акций Apple\n# в скользящем окне шириной 250 дней.\nappl_std250 = close_px.AAPL.rolling(250, min_periods=10).std()\nappl_std250.plot()\n# Среднее с расширяющимся окном для временного ряда apple_std250\nexpanding_mean = appl_std250.expanding().mean()\n# преобразование применяется к каждому столбцу\nclose_px.rolling(60).mean().plot(logy=True)\n# скользящее среднее за 20 дней\nclose_px.rolling('20D').mean()\n\n# Экспоненциально взвешенные функции\n# постоянный коэффициент затухания, чтобы повысить вес последних наблюдений\n# скользящее среднее котировок акций Apple за 60 дней сравнивается с экспоненциально взвешенным скользящим средним для span=60\naapl_px = close_px.AAPL['2006':'2007']\nma60 = aapl_px.rolling(30, min_periods=20).mean()\newma60 = aapl_px.ewm(span=30).mean()\nma60.plot(style='k--', label='Simple MA')\newma60.plot(style='k-', label='EW MA')\nplt.legend()\n\n# Бинарные скользящие оконные функции\n# корреляции и ковариации, необходимы два временных ряда\n# вычислить относительные изменения в процентах для всего нашего временного ряда\nspx_px = close_px_all['SPX']\nspx_rets = spx_px.pct_change()\nreturns = close_px.pct_change()\ncorr = returns.AAPL.rolling(125, min_periods=100).corr(spx_rets)\ncorr.plot()\n# вычислить корреляцию индекса S&P 500\ncorr = returns.rolling(125, min_periods=100).corr(spx_rets)\ncorr.plot()\n\n# Скользящие оконные функции, определенные пользователем\nfrom scipy.stats import percentileofscore\nscore_at_2percent = lambda x: percentileofscore(x, 0.02)\nresult = returns.AAPL.rolling(250).apply(score_at_2percent)\nresult.plot()\n\n","repo_name":"sheviv/mirrors","sub_path":"pandas/time_data_2.py","file_name":"time_data_2.py","file_ext":"py","file_size_in_byte":8382,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36130352439","text":"import sys, os, math,numpy as np\nfrom PyQt5.QtWidgets import QWidget, QDesktopWidget, QApplication,QMainWindow, QAction,QSplitter,\\\n QGraphicsDropShadowEffect,QSizePolicy,QScrollArea,QFrame,QSizePolicy\nfrom PyQt5 import QtWidgets,QtGui\nfrom PyQt5.QtCore import Qt,QSize,QMargins\n#import first.py\nimport pymysql\nfrom PyQt5.QtGui import QImage, QPalette, QBrush, QColor\nimport urllib.request\nimport sip\nfrom functools import partial\nfrom ratingWidget import RatingWidget\n\ndef getLogin(UserID):\n connection = pymysql.connect(host='localhost', user='root', passwd='', db='movies')\n cursor = connection.cursor()\n sql=\"SELECT `Name` FROM `users` WHERE `UserID`=%s\"\n cursor.execute(sql, (int(UserID),))\n name = cursor.fetchone()[0]\n return name\n\ndef getFilms(mode,ganre,film=None):\n connection = pymysql.connect(host='localhost', user='root', passwd='', db='movies')\n cursor = connection.cursor()\n if film is None:\n if ganre=='all':\n if mode=='normal':\n sql = \"SELECT `links`.`Image`,`links`.`ImbdRate`,`movies`.`Title`,`movies`.`Year`,`movies`.`MovieID`, `movies`.`AllGanres` \" \\\n \"FROM `links` JOIN `movies` ON `links`.`MovieID`=`movies`.`MovieID`\"\n if mode=='rate':\n sql = \"SELECT `links`.`Image`,`links`.`ImbdRate`,`movies`.`Title`,`movies`.`Year`,`movies`.`MovieID`, `movies`.`AllGanres` \" \\\n \"FROM `links` JOIN `movies` ON `links`.`MovieID`=`movies`.`MovieID` ORDER BY `ImbdRate` DESC\"\n if mode=='year':\n sql = \"SELECT `links`.`Image`,`links`.`ImbdRate`,`movies`.`Title`,`movies`.`Year`,`movies`.`MovieID`, `movies`.`AllGanres` \" \\\n \"FROM `links` JOIN `movies` ON `links`.`MovieID`=`movies`.`MovieID`\" \\\n \"ORDER BY `Year` DESC\"\n cursor.execute(sql)\n else:\n if mode == 'normal':\n sql = \"SELECT `links`.`Image`,`links`.`ImbdRate`,`movies`.`Title`,`movies`.`Year`,`movies`.`MovieID`, `movies`.`AllGanres` \" \\\n \"FROM `links` JOIN `movies` ON `links`.`MovieID`=`movies`.`MovieID` \" \\\n \"WHERE `movies`.`\"+str(ganre)+\"`=1\"\n if mode == 'rate':\n sql = \"SELECT `links`.`Image`,`links`.`ImbdRate`,`movies`.`Title`,`movies`.`Year`,`movies`.`MovieID`, `movies`.`AllGanres` \" \\\n \"FROM `links` JOIN `movies` ON `links`.`MovieID`=`movies`.`MovieID` \" \\\n \"WHERE `movies`.`\" + str(ganre) + \"`=1 ORDER BY `ImbdRate` DESC\"\n if mode=='year':\n sql = \"SELECT `links`.`Image`,`links`.`ImbdRate`,`movies`.`Title`,`movies`.`Year`,`movies`.`MovieID`, `movies`.`AllGanres` \" \\\n \"FROM `links` JOIN `movies` ON `links`.`MovieID`=`movies`.`MovieID` \" \\\n \"WHERE `movies`.`\" + str(ganre) + \"`=1 ORDER BY `Year` DESC\"\n cursor.execute(sql)\n else:\n sql = \"SELECT `links`.`Image`,`links`.`ImbdRate`,`movies`.`Title`,`movies`.`Year`,`movies`.`MovieID`, `movies`.`AllGanres` \" \\\n \"FROM `links` JOIN `movies` ON `links`.`MovieID`=`movies`.`MovieID` WHERE `Title` LIKE %s\"\n cursor.execute(sql,('%'+film+'%',))\n if cursor.rowcount==0:\n return None,0\n movies=[]\n num=cursor.rowcount\n i=0\n for im in cursor:\n movies.append([])\n #Title,Year,Image,ImdbRate,Ganres\n movies[i].append(im[2])\n movies[i].append(im[3])\n movies[i].append(im[0])\n movies[i].append(im[1])\n movies[i].append(im[4])\n movies[i].append(im[5])\n i=i+1\n cursor.close()\n connection.close()\n return movies, num\n\ndef getRates(id,films):\n connection = pymysql.connect(host='localhost', user='root', passwd='', db='movies')\n cursor = connection.cursor()\n rates=[]\n for i in range(len(films)):\n filmId=films[i][4]\n sql=\"SELECT `Rating` FROM `ratings` WHERE `UserID`=%s AND `MovieID`=%s\"\n cursor.execute(sql,(id,filmId))\n if cursor.rowcount==0:\n rates.append(0)\n else:\n rate = cursor.fetchone()[0]\n rates.append(rate)\n return rates\n\ndef changeMatrix(numCol,numRow,newVal,filename):\n numCol=int(numCol)\n numRow=int(numRow)\n f = open(filename, 'r')\n for i, line in enumerate(f):\n if i == numRow - 1:\n l = line\n vals = l.split()\n vals[numCol - 1] = newVal\n newLine = \" \".join(str(x) for x in vals)\n break\n f.close()\n o = open('output.txt', 'w') # open for append\n i = 0\n for line in open(filename):\n i = i + 1\n if i == numRow:\n line = newLine + \"\\n\"\n o.write(line)\n o.close()\n os.remove(filename)\n os.rename(\"output.txt\", filename)\n\ndef changeYmean(numCol,numRow,rate):\n numCol = int(numCol)\n numRow = int(numRow)\n f = open('R.txt', 'r')\n for i, line in enumerate(f):\n if i == numRow - 1:\n R=line.split()\n R[numCol - 1] = 1\n break\n f.close()\n f = open('Y.txt', 'r')\n for i, line in enumerate(f):\n if i == numRow - 1:\n Y = line.split()\n Y[numCol - 1] = rate\n break\n f.close()\n R=[int(s) for s in R]\n Y=[int(s) for s in Y]\n arrayR=np.array(R)\n arrayY=np.array(Y)\n idx = np.where(arrayR == 1)\n YmeanNew=np.mean(arrayY[idx])\n changeMatrix(1,numRow,YmeanNew,'Ymean.txt')\n\ndef insertRate(idUser, idMovie, rate):\n connection = pymysql.connect(host='localhost', user='root', passwd='', db='movies')\n cursor = connection.cursor()\n sql=\"SELECT `links`.`Id` FROM `links` WHERE `links`.`MovieID`=%s\"\n cursor.execute(sql,(int(idMovie),))\n idLink = cursor.fetchone()[0]\n cursor.close()\n row=idLink\n col=idUser\n cursor = connection.cursor()\n cursor1 = connection.cursor()\n sql1 = \"SELECT * FROM `ratings` WHERE `UserID`=%s AND `MovieID`=%s\"\n cursor.execute(sql1, (int(idUser), int(idMovie)))\n if cursor.rowcount == 0:\n #print(\"not exist\")\n sql = \"INSERT INTO `ratings` (`UserID`, `MovieID`,`Rating`) VALUES (%s, %s, %s)\"\n cursor1.execute(sql, (int(idUser), int(idMovie), int(rate)))\n else:\n #print(\"exist\")\n sql = \"UPDATE `ratings` SET `Rating`=%s WHERE `UserID`=%s AND `MovieID`=%s\"\n cursor1.execute(sql, (int(rate), int(idUser), int(idMovie)))\n changeMatrix(col,row,rate,'Y.txt')\n changeMatrix(col,row,1,'R.txt')\n changeYmean(col,row,rate)\n connection.commit()\n cursor.close()\n cursor1.close()\n connection.close()\n\nclass Films(QWidget):\n def __init__(self,id,page,films,selected):\n super().__init__()\n self.UserID=id\n self.current_page=page\n self.films=films\n self.selected=selected\n self.initUI()\n\n def initUI(self):\n self.grid=QtWidgets.QGridLayout(self)\n self.setMinimumWidth(800)\n finish=self.current_page*20\n start=finish-20\n films = self.films[start:finish]\n self.myRates=getRates(self.UserID, films)\n self.print_imgs(films)\n self.grid.setSpacing(5)\n\n def print_imgs(self,films):\n j=-2\n self.bool = []\n for i in range(len(films)):\n ost=i%4\n if (ost==0):\n j=j+2\n w=QWidget()\n layout=QtWidgets.QGridLayout(w)\n layout.setSpacing(0)\n title=QtWidgets.QLabel(films[i][0]+\" (\"+str(films[i][1])+\")\")\n #title.setWordWrap(True)\n imdb=QtWidgets.QLabel(\"Imdb: \"+str(films[i][3]))\n ganres = QtWidgets.QLabel(films[i][5].replace(\"|\", \",\"))\n title.setWordWrap(True)\n ganres.setWordWrap(True)\n im = QtWidgets.QLabel()\n if(not self.selected):\n data = urllib.request.urlopen(films[i][2]).read()\n image = QtGui.QImage()\n image.loadFromData(data)\n wid = image.width()\n hei = image.height()\n pixmap = QtGui.QPixmap(image)\n #pixmap = pixmap.scaled(wid, hei)\n im.setPixmap(pixmap)\n im.setFixedSize(wid, hei)\n self.width = pixmap.width()\n else:\n self.width=182\n\n yourRating = QWidget()\n yourRating_layout = QtWidgets.QHBoxLayout(yourRating)\n rating_label = QtWidgets.QLabel('Your rate:')\n rating_value = QtWidgets.QLabel(str(self.myRates[i]))\n yourRating_layout.addWidget(rating_label)\n yourRating_layout.addWidget(rating_value)\n\n ratesImdb = QWidget()\n rates_layout = QtWidgets.QHBoxLayout(ratesImdb)\n rates_layout.addWidget(imdb)\n button = QtWidgets.QPushButton(\"Estimate\")\n button.setFixedSize(70, 20)\n button.setStyleSheet(\"background-color:rgba(5,232,217,0.3); color:black; font:15px;\")\n self.bool.append(False)\n button.clicked.connect(partial(self.estimate, films[i][4], j, ost, i,rating_value))\n rates_layout.addWidget(button)\n\n inform = QWidget()\n inform_layout = QtWidgets.QVBoxLayout(inform)\n layout.addWidget(im,0,0)\n inform_layout.addWidget(title)\n inform_layout.addWidget(ganres)\n inform_layout.addWidget(ratesImdb)\n inform_layout.addWidget(yourRating)\n layout.addWidget(inform,1,0)\n inform.setMaximumWidth(self.width)\n #inform.setMaximumHeight(150)\n inform.setObjectName(\"inform\")\n self.grid.addWidget(w, j, ost)\n self.setStyleSheet(\"\"\"\n QLabel{color:white;\n font:14px;}\n #inform{background-color:rgba(0,0,0,0.3);}\n \"\"\")\n\n def estimate(self,movieID,row,col,j,rating_value):\n stars = QWidget()\n # print(b, j, self.bool)\n stars.setMinimumWidth(self.width)\n stars_layout = QtWidgets.QHBoxLayout(stars)\n stars_layout.setSpacing(0)\n for i in range(5):\n im = QtWidgets.QLabel()\n pixmap = QtGui.QPixmap(\"rating2.png\")\n im.setPixmap(pixmap)\n stars_layout.addWidget(im)\n #rating_value_widget = QtWidgets.QLabel('0')\n rating_widget = RatingWidget(num_icons=5)\n rating_widget.value_updated.connect(\n lambda value: self.setRating(str(value), movieID, rating_value)\n )\n rate_layout = QtWidgets.QHBoxLayout()\n rate_layout.addWidget(rating_widget)\n rate_widget = QtWidgets.QWidget()\n rate_widget.setMinimumWidth(self.width)\n rate_widget.setLayout(rate_layout)\n if self.bool[j] == False:\n self.grid.addWidget(stars, row + 1, col)\n self.bool[j] = True\n # layout.addWidget(rating_value_widget,3,0)\n self.grid.addWidget(rate_widget, row + 1, col)\n\n def setRating(self,value,movieID,rating_value):\n rating_value.setText(str(value))\n insertRate(self.UserID,movieID,value)\n\nclass AllFilms(QMainWindow):\n def __init__(self,id,selected):\n super().__init__()\n self.UserID=id\n self.selected=selected\n self.initUI()\n\n def initUI(self):\n self.screenShape = QtWidgets.QDesktopWidget().screenGeometry()\n self.resize(self.screenShape.width()-10, self.screenShape.height()-100)\n self.setMinimumWidth(1300)\n\n self.top_layout = QtWidgets.QHBoxLayout()\n self.top_layout.setStretch(0,1)\n\n self.notSortButton = QtWidgets.QPushButton(\"All movies\")\n self.sortRateButton=QtWidgets.QPushButton(\"Sort by rating↓\")\n self.sortYearButton = QtWidgets.QPushButton(\"Sort by year↓\")\n self.notSortButton.setFixedSize(150,20)\n self.sortRateButton.setFixedSize(150,20)\n self.sortYearButton.setFixedSize(150,20)\n self.notSortButton.clicked.connect(self.change_mode)\n self.sortRateButton.clicked.connect(self.change_mode)\n self.sortYearButton.clicked.connect(self.change_mode)\n self.top_layout.addStretch(1)\n self.top_layout.insertWidget(0,self.notSortButton)\n self.top_layout.insertWidget(1,self.sortRateButton)\n self.top_layout.insertWidget(2,self.sortYearButton)\n\n self.searchEdit=QtWidgets.QLineEdit()\n self.searchEdit.setMinimumSize(500,30)\n self.findButton=QtWidgets.QPushButton(\"Find\")\n self.findButton.setFixedSize(150,20)\n self.findButton.clicked.connect(self.btn_find)\n self.top_layout.insertWidget(3,self.searchEdit)\n self.top_layout.insertWidget(4,self.findButton)\n\n #self.splitterV = QSplitter(Qt.Vertical)\n self.splitter=QWidget()\n self.splitterV=QtWidgets.QVBoxLayout(self.splitter)\n\n self.currentPage = 1\n self.mode='normal'\n self.ganre='all'\n\n self.imgs, self.numFilms = getFilms(self.mode, self.ganre)\n\n self.central=Films(self.UserID,self.currentPage,self.imgs,self.selected)\n self.top=QWidget()\n self.top.setLayout(self.top_layout)\n self.left=QWidget()\n self.bottom = QWidget()\n self.bottom.setObjectName(\"bottom\")\n\n self.main_widget = QWidget()\n self.main_layout = QtWidgets.QGridLayout(self.main_widget)\n self.main_widget.setObjectName('main')\n\n self.scrollWidget = QScrollArea()\n self.scrollWidget.setStyleSheet(\"background-color:transparent;\")\n self.scrollWidget.setWidget(self.splitter)\n self.scrollWidget.setWidgetResizable(True)\n self.scrollWidget.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n self.scrollWidget.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.scrollWidget.setFrameShape(QFrame.NoFrame)\n\n self.splitterV.insertWidget(0,self.central)\n self.splitterV.insertWidget(1,self.bottom)\n self.ganres()\n self.main_layout.addWidget(self.top,0,1)\n self.main_layout.addWidget(self.scrollWidget,1,1,21,1)\n\n self.setCentralWidget(self.main_widget)\n\n self.numPages = math.ceil(self.numFilms / 20)\n self.pages(self.numPages)\n\n self.setStyleSheet(\"\"\"\n #main {background-image: url(images/back1.jpg);background-attachment: fixed;}\n QSplitter::handle{background-color: transparent;}\n QPushButton {font:15px;\n width: 55px;\n height: 18px;\n color:white;\n background-color:rgba(0,0,0,0.3);\n border:1px solid rgba(0,0,0, 0.8);}\n QLineEdit{border-radius: 2px;\n background-color:rgba(0,0,0,0.3);\n border: 1px solid rgba(255,255,255, 0.3);\n color:white;\n font:20px;}\n\n QScrollBar:vertical {\n border: 1px solid #999999;\n background:white;\n width:10px;\n margin: 15px 0px 15px 0px;\n }\n\n QScrollBar::handle:vertical {\n background: qlineargradient(x1:0, y1:0, x2:1, y2:0,\n stop: 0 rgb(69,81,104), stop:1 rgb(2,19,25));\n min-height: 10px;\n }\n\n QScrollBar::add-line:vertical {\n background: none;\n height:15 px;\n subcontrol-position: bottom;\n subcontrol-origin: margin;\n }\n\n QScrollBar::sub-line:vertical {\n background: none;\n height: 15px;\n subcontrol-position: top;\n subcontrol-origin: margin;\n }\n\n QScrollBar::up-arrow:vertical {\n image:url('images/up.png');\n height: 15px;\n width: 15px\n }\n\n QScrollBar::down-arrow:vertical {\n image:url('images/down.png');\n height: 15px;\n width: 15px\n }\n \"\"\")\n\n def pages(self,num_pages):\n self.bottom_layout = QtWidgets.QGridLayout(self.bottom)\n self.bottom_layout.setSpacing(0)\n self.bottom.setStyleSheet(\"\"\"\n QPushButton{font:15px;\n width: 20px;\n height: 18px;\n color:white;\n background-color:rgba(0,0,0,0.3);\n border:1px solid rgba(255,255,255, 0.3);}\n QLabel{color:white;\n font:20px;\n width:10px;}\n \"\"\")\n\n if self.currentPage<5:\n for i in range(5):\n button = QtWidgets.QPushButton(str(i + 1))\n if i+1==self.currentPage:\n button.setStyleSheet(\"background-color:white; color:black;\")\n button.clicked.connect(self.change_page)\n self.bottom_layout.addWidget(button, 0, i)\n label=QtWidgets.QLabel('...')\n self.bottom_layout.addWidget(label, 0, 5)\n\n button = QtWidgets.QPushButton(str(num_pages))\n self.bottom_layout.addWidget(button, 0, 6)\n button.clicked.connect(self.change_page)\n\n elif self.currentPage>(num_pages-4):\n button = QtWidgets.QPushButton(str(1))\n self.bottom_layout.addWidget(button, 0, 0)\n button.clicked.connect(self.change_page)\n\n label = QtWidgets.QLabel('...')\n self.bottom_layout.addWidget(label, 0, 1)\n pgs=num_pages-4\n j=2\n while pgs<=num_pages:\n button = QtWidgets.QPushButton(str(pgs))\n if pgs==self.currentPage:\n button.setStyleSheet(\"background-color:white; color:black;\")\n button.clicked.connect(self.change_page)\n self.bottom_layout.addWidget(button, 0, j)\n pgs=pgs+1\n j=j+1\n\n else:\n button = QtWidgets.QPushButton(str(1))\n button.clicked.connect(self.change_page)\n self.bottom_layout.addWidget(button, 0, 0)\n\n label = QtWidgets.QLabel('...')\n self.bottom_layout.addWidget(label, 0, 1)\n\n button = QtWidgets.QPushButton(str(self.currentPage-1))\n button.clicked.connect(self.change_page)\n self.bottom_layout.addWidget(button, 0, 2)\n\n button = QtWidgets.QPushButton(str(self.currentPage))\n button.setStyleSheet(\"background-color:white; color:black;\")\n button.clicked.connect(self.change_page)\n self.bottom_layout.addWidget(button, 0, 3)\n\n button = QtWidgets.QPushButton(str(self.currentPage+1))\n button.clicked.connect(self.change_page)\n self.bottom_layout.addWidget(button, 0, 4)\n\n label = QtWidgets.QLabel('...')\n self.bottom_layout.addWidget(label, 0, 5)\n\n button = QtWidgets.QPushButton(str(num_pages))\n button.clicked.connect(self.change_page)\n self.bottom_layout.addWidget(button, 0, 6)\n\n def btn_find(self):\n film=self.searchEdit.text()\n self.ganre=\"all\"\n self.ganresL.setText(\"All ganres\")\n if hasattr(self, 'bottom'):\n sip.delete(self.bottom)\n del self.bottom\n sip.delete(self.central)\n del self.central\n\n if hasattr(self, 'sortRateButton'):\n sip.delete(self.sortRateButton)\n del self.sortRateButton\n sip.delete(self.sortYearButton)\n del self.sortYearButton\n\n self.currentPage=1\n self.imgs, self.numFilms = getFilms(self.mode, self.ganre,film)\n if self.numFilms==0:\n self.central = QWidget()\n layout=QtWidgets.QHBoxLayout(self.central)\n message = QtWidgets.QLabel('There are no matches.')\n layout.addWidget(message)\n self.splitterV.insertWidget(0, self.central)\n else:\n self.numPages = math.ceil(self.numFilms / 20)\n self.central = Films(self.UserID, self.currentPage, self.imgs,self.selected)\n self.splitterV.insertWidget(0, self.central)\n if self.numPages>1:\n self.bottom = QWidget()\n self.splitterV.insertWidget(1, self.bottom)\n self.pages(self.numPages)\n\n def change_page(self):\n sender=self.sender()\n click_btn=sender.text()\n self.currentPage=int(click_btn)\n self.central.hide()\n self.central.deleteLater()\n del self.central\n self.central=Films(self.UserID,self.currentPage,self.imgs,self.selected)\n self.splitterV.insertWidget(0,self.central)\n sip.delete(self.bottom)\n del self.bottom\n self.bottom=QWidget()\n self.splitterV.insertWidget(1, self.bottom)\n self.pages(self.numPages)\n\n def change_mode(self):\n self.searchEdit.setText('')\n if not hasattr(self, 'sortRateButton'):\n self.sortRateButton = QtWidgets.QPushButton(\"Sort by rating↓\")\n self.sortYearButton = QtWidgets.QPushButton(\"Sort by year↓\")\n self.sortRateButton.clicked.connect(self.change_mode)\n self.sortYearButton.clicked.connect(self.change_mode)\n self.top_layout.insertWidget(1,self.sortRateButton)\n self.top_layout.insertWidget(2,self.sortYearButton)\n\n sender = self.sender()\n text = sender.text()\n self.currentPage=1\n if text=='All movies':\n self.mode='normal'\n self.ganresL.setText(\"All ganres\")\n self.ganre=\"all\"\n if text==\"Sort by rating↓\":\n self.mode='rate'\n if text==\"Sort by year↓\":\n self.mode='year'\n\n del self.imgs\n self.imgs, self.numFilms = getFilms(self.mode, self.ganre)\n self.numPages = math.ceil(self.numFilms / 20)\n\n #self.central.hide()\n #self.central.deleteLater()\n #del self.central\n sip.delete(self.central)\n self.central = Films(self.UserID, self.currentPage,self.imgs,self.selected)\n self.splitterV.insertWidget(0, self.central)\n\n if hasattr(self, 'bottom'):\n sip.delete(self.bottom)\n self.bottom = QWidget()\n self.splitterV.insertWidget(1, self.bottom)\n self.pages(self.numPages)\n\n def ganres(self):\n self.yourID = QtWidgets.QLabel(\"Your login: \" + getLogin(self.UserID))\n self.yourID.setWordWrap(True)\n self.yourID.setFixedSize(100, 46)\n self.yourID.setObjectName(\"id\")\n\n self.main_layout.addWidget(self.yourID, 0, 0)\n self.ganresL = QtWidgets.QLabel(\"All ganres\")\n self.ganresL.setObjectName(\"ganresL\")\n self.main_layout.addWidget(self.ganresL,1,0)\n allGanres=['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy','Film-Noir', 'Horror', 'IMAX', 'Musical', 'Mystery','Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']\n allGanres.sort()\n\n for i in range(len(allGanres)):\n ganre=QtWidgets.QPushButton(allGanres[i])\n self.main_layout.addWidget(ganre,i+2,0)\n ganre.setFixedSize(150,20)\n ganre.clicked.connect(self.set_ganre)\n self.ganresL.setStyleSheet(\"\"\"color:white;font:20px;\"\"\")\n self.yourID.setStyleSheet(\"\"\"color:white;font:16px;\"\"\")\n\n def set_ganre(self):\n if not hasattr(self, 'sortRateButton'):\n self.sortRateButton = QtWidgets.QPushButton(\"Sort by rating↓\")\n self.sortYearButton = QtWidgets.QPushButton(\"Sort by year↓\")\n self.sortRateButton.clicked.connect(self.change_mode)\n self.sortYearButton.clicked.connect(self.change_mode)\n self.top_layout.insertWidget(1,self.sortRateButton)\n self.top_layout.insertWidget(2,self.sortYearButton)\n self.searchEdit.setText('')\n sender = self.sender()\n ganre = sender.text()\n self.ganresL.setText(ganre)\n\n self.currentPage = 1\n self.ganre=ganre\n\n self.imgs, self.numFilms = getFilms(self.mode, self.ganre)\n self.numPages = math.ceil(self.numFilms / 20)\n\n sip.delete(self.central)\n del self.central\n self.central = Films(self.UserID, self.currentPage,self.imgs,self.selected)\n self.splitterV.insertWidget(0, self.central)\n\n if hasattr(self, 'bottom'):\n sip.delete(self.bottom)\n self.bottom = QWidget()\n self.splitterV.insertWidget(1, self.bottom)\n self.pages(self.numPages)","repo_name":"sweetdream779/Recommend_System","sub_path":"gui3.py","file_name":"gui3.py","file_ext":"py","file_size_in_byte":25615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29680539314","text":"\"\"\"\nRuns LDA to create doc-topic and topic-word distributions.\nCode is taken from C410 MP3\n\"\"\"\nimport sys\nimport metapy\nif __name__ == '__main__':\n\n _config = 'config.toml'\n _output_prefix = 'output'\n _num_topics = 6\n\n metapy.log_to_stderr()\n fidx = metapy.index.make_forward_index(_config)\n dset = metapy.learn.Dataset(fidx)\n lda_inf = metapy.topics.LDAGibbs(dset, num_topics=_num_topics, alpha=0.1, beta=0.1)\n lda_inf.run(num_iters=500)\n lda_inf.save(_output_prefix)\n\n model = metapy.topics.TopicModel(_output_prefix)\n\n with open(_output_prefix+'-topic.txt','w+') as topic:\n for topic_id in range(_num_topics):\n print('Topic ' + str(topic_id))\n print([(fidx.term_text(pr[0]), pr[1]) for pr in model.top_k(tid=topic_id, k = 20)])\n topic.write('Topic ' + str(topic_id) + '\\n')\n topic.write(str([(fidx.term_text(pr[0]), pr[1]) for pr in model.top_k(tid=topic_id, k = 20)]))\n topic.write('\\n')\n\n target_doc_count = 5\n with open(_output_prefix+'-document.txt','w+') as doc:\n for d_id in range(target_doc_count):\n print('Document ' + str(d_id))\n print(model.topic_distribution(d_id))\n doc.write('Document ' + str(d_id) + '\\n')\n doc.write(str(model.topic_distribution(d_id)))\n doc.write('\\n')","repo_name":"KaiwenXue/MAL-Analyzer","sub_path":"topic_model.py","file_name":"topic_model.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32179229271","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n# создаем датасет\nfilePath_train_label = 'data/train-labels-idx1-ubyte'\nfilePath_train_set = 'data/train-images-idx3-ubyte'\n\nfilePath_test_label = 'data/t10k-labels-idx1-ubyte'\nfilePath_test_set = 'data/t10k-images-idx3-ubyte'\n\n\nwith open(filePath_train_label, 'rb') as trainLbpath:\n y_train = np.frombuffer(trainLbpath.read(), np.uint8, offset=8)\n\nwith open(filePath_train_set, 'rb') as trainSetpath:\n x_train = np.frombuffer(trainSetpath.read(), np.uint8, offset=16).reshape(\n len(y_train), 28, 28\n )\n\nwith open(filePath_test_label, 'rb') as testLbpath:\n y_test = np.frombuffer(testLbpath.read(), np.uint8, offset=8)\n\nwith open(filePath_test_set, 'rb') as testSetpath:\n x_test = np.frombuffer(testSetpath.read(), np.uint8, offset=16).reshape(\n len(y_test), 28, 28\n )\n\n# нормализация данных\nx_train = x_train / 255\nx_test = x_test / 255\n\n# классы/назчания обьектов\nclasses = np.unique(y_train)\n#print('Output classes : ', classes)\n# классы товаров человеческим языком\nclass_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n#print('Output classes : ', class_names)\n\n# plt.figure(figsize=(10,10))\n# for i in range(25):\n# plt.subplot(5,5,i+1)\n# plt.title(\"No.\" + str(i))\n# plt.imshow(x_train[i,:],cmap='Greys')\n\n# создание модели нейронной сети\nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28,28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax'),\n])\n\n# компиляция модели\nmodel.compile(optimizer=tf.keras.optimizers.SGD(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n# вывод параметров модели\nmodel.summary()\n# обучение модели\nmodel.fit(x_train, y_train, epochs=10)\n# сохраняем модель в файле\nmodel.save('models/shop_model')\n# открываем модель из файла\n#model = tf.keras.models.load_model('models/shop_model')\n\n# проверка точности предсказания\ntest_loss, test_acc = model.evaluate(x_test, y_test)\nprint('Test:', test_acc)\n\ncheck = 12\n# предсказываем\npredictions = model.predict(x_train)\npredictions[check]\n\n# получить результат\nresult = np.argmax(predictions[12])\n\n#выведем картинку\n# plt.figure()\n# plt.imshow(x_train[12])\n# plt.colorbar()\n# plt.grid(False)\n\n# класс обьекта\nprint(class_names[result])","repo_name":"roganovich/fashion-mnist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"48475910","text":"def calculate_mandatory_skills(candidate_skills,mandatory):\n mandatory_porcentage = 0\n mandatory_skills = 0\n if len(mandatory) == 0:\n return mandatory_porcentage\n for skill in mandatory:\n if skill in candidate_skills:\n mandatory_skills = mandatory_skills + 1\n mandatory_porcentage = (mandatory_skills*100)/len(mandatory) \n return mandatory_porcentage\n\ndef calculate_optional_skills(candidate_skills,optional):\n optional_porcentage = 0\n optional_skills = 0\n if len(optional) == 0:\n return optional_porcentage\n for skill in optional:\n if skill in candidate_skills:\n optional_skills = optional_skills + 1\n optional_porcentage = (optional_skills*100)/len(optional) \n return optional_porcentage\n\n\"\"\"def calculate_total_percentage(vacant,wheights): \n sum = (vacant[0]*wheights[0]) + (vacant[1]*wheights[1]) + (vacant[2]*wheights[2]) + (vacant[3]*wheights[3]) + (vacant[4]*wheights[4])\n total_max_percentage = wheights[0]+wheights[1]+wheights[2]+wheights[3]+wheights[4]\n print(total_max_percentage)\n percentage = sum*100/310\n print(percentage)\n return percentage\"\"\"\n\n\ndef calculate_languages(candidate_languages,vacant_languages):\n languages_count = 0\n porcentage = 0\n if len(vacant_languages) == 0:\n return porcentage\n for language in vacant_languages: \n if language in candidate_languages:\n languages_count = languages_count + 1\n porcentage = (languages_count*100)/len(vacant_languages)\n return porcentage\n\ndef calculate_salary(salary_objetive,min_salary,max_salary):\n porcentage = 0\n if min_salary < salary_objetive and salary_objetive < max_salary or salary_objetive == max_salary or min_salary == salary_objetive:\n porcentage = 100\n return porcentage\n if min_salary > salary_objetive:\n porcentage = 100\n return porcentage\n if salary_objetive > max_salary:\n porcentage = (max_salary*100)/salary_objetive\n return porcentage\n return porcentage\n\ndef calculate_experience(candidate_exp,required_exp):\n porcentage = 0\n if required_exp <= candidate_exp:\n porcentage = 100 \n return porcentage\n else:\n section = 100/required_exp\n porcentage = section * candidate_exp\n return porcentage\n return porcentage\n\ndef calculate_total_percentage(similarity,wheights): \n sum = (similarity[0]*wheights[0]) + (similarity[1]*wheights[1]) + (similarity[2]*wheights[2]) + (similarity[3]*wheights[3]) + (similarity[4]*wheights[4])\n total_max_percentage = wheights[0]+wheights[1]+wheights[2]+wheights[3]+wheights[4]\n print(\"porcentaje maximo\",total_max_percentage)\n percentage = sum/total_max_percentage\n #print(percentage)\n return percentage\n\ndef calculate_vacants_similarity(similarity):\n sum = similarity[0]+similarity[1]+similarity[2]+similarity[3] \n percentage = sum/4\n return percentage","repo_name":"enriqueSFranco/TT-ESCOM","sub_path":"backend/api/apps/recommendations/algorithms/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"11797949308","text":"\nimport ctypes\n\n\nwin_main = '浙江宏睿通信技术有限公司互联互通抄表系统'\nwin_addnode = '[CLASS:#32770]'\n\n\n\ndll = ctypes.WinDLL('AutoItX3_x64.dll')\ndll.AU3_Init()\n\n\nif dll.AU3_WinExists(win_main, ''):\n dll.AU3_WinActivate(win_main, '')\n dll.AU3_WinActivate(win_addnode, '')\n #dll.AU3_ControlClick(win_addnode, '',\n # '[CLASS:Button; INSTANCE:1]',\n # 'left', 1, 0, 0)\n dll.AU3_Send('{ENTER}', 0)\n #dll.AU3_Send('{ENTER}{TAB 2}%s{ENTER}' % node, 0)\n## with open('nodes.txt') as f:\n## for line in f:\n## node = line.strip()\n## if not node:\n## continue\n## if dll.AU3_WinExists(win_addnode, '从节点信息管理'):\n## dll.AU3_ControlClick(win_addnode, '从节点信息管理',\n## '[CLASS:Button; INSTANCE:1]',\n## 'left', 1, 5, 5)\n## dll.AU3_Send('{TAB 2}%s{ENTER}' % node, 0)\n## elif dll.AU3_WinExists(win_addnode, '档案确认'):\n## dll.AU3_ControlClick(win_addnode, '',\n## '[CLASS:Button; TEXT:手动添加]',\n## 'left', 1, 0, 0)\n## dll.AU3_Send('{TAB 2}%s{ENTER}' % node, 0)\n","repo_name":"zaazbb/YM001NetworkAnalyzer","sub_path":"add_nodes/add_nodes.py","file_name":"add_nodes.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11279500229","text":"\n \ndef intcode_program(intcode):\n i = 0 # start of the next optcode position \n # instruction pointer\n while True:\n instruction = intcode[i]\n if instruction == 99:\n return intcode\n elif instruction == 1:\n intcode[intcode[i+3]] = intcode[intcode[i+1]] + intcode[intcode[i+2]]\n elif instruction == 2:\n intcode[intcode[i+3]] = intcode[intcode[i+1]] * intcode[intcode[i+2]]\n else:\n return [0]\n i = i + 4\n\n\nassert intcode_program([1,0,0,0,99]) == [2,0,0,0,99]\nassert intcode_program([2,3,0,3,99]) == [2,3,0,6,99]\nassert intcode_program([2,4,4,5,99,0]) == [2,4,4,5,99,9801]\nassert intcode_program([1,1,1,4,99,5,6,0,99]) == [30,1,1,4,2,5,6,0,99]\n\n\ninput = []\ninput[1] = 12\ninput[2] = 2\noutput = intcode_program(input)\nprint(output[0])\n\n\nfrom copy import copy\n\nfor noun in range(99):\n for verb in range(99):\n input = copy(original_input)\n input[1] = noun\n input[2] = verb\n if intcode_program(input)[0] == 19690720:\n print(noun, verb)\n print(100*noun + verb)\n","repo_name":"roman-89/advent_of_code","sub_path":"2019/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25735420414","text":"from .template import SnippetParsingError\n\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\n\nimport re\nfrom csv import DictReader\n\n\nclass Binder:\n \"\"\"\n This class uses a mailtorpedo.reader.Reader object and a mailtorpedo.template.Template \n object and creates an iterable object containing tuples, each having an email addresses \n and a email.mime.multipart.MimeMultipart object when told.\n \"\"\"\n def __init__(self, reader, template):\n \"\"\"\n reader: mailtorpedo.reader.CSVReader or mailtorpedo.reader.ExcelReader instance\n template: mailtorpedo.template.Teamplate instance\n \"\"\"\n self.reader = reader\n self.template = template\n self._set_type()\n\n def _set_type(self):\n \"\"\"\n Decides whether the mailbody needs to be modified for each individual or not.\n\n Returns None\n \"\"\"\n for snippet in self.template.snippets:\n if snippet.type in (\"plain\", \"html\"):\n if self.reader.check_template(snippet):\n break\n\n if self.reader.is_required():\n self.type = \"solo\" # for individually modified\n else:\n self.type = \"bulk\" # for unmodified\n\n def parse(self):\n \"\"\"\n Creates a email.mime.multipart.MimeMultipart instance for each associated email\n address in self.reader object.\n \n Returns iterable containing tuples each having an email address and assosicated\n mime object.\n \"\"\"\n reader = self.reader.parsed_dict()\n for row in reader:\n mime = MIMEMultipart()\n for snippet in self.template.snippets:\n if snippet.type in (\"plain\", \"html\"):\n snippet_content = snippet.content\n if self.type == 'solo':\n for header in self.reader.headers:\n snippet_content = re.sub(\n f\"{{{{ {header} }}}}\", row[header], snippet_content\n )\n content = MIMEText(snippet_content, snippet.type)\n mime.attach(content)\n mime[\"Subject\"] = self.template.subject\n mime[\"To\"] = row[self.reader.email_field]\n yield (row[self.reader.email_field], mime)\n\n def add_attachments(self, mime):\n for snippet in self.template.snippets:\n if snippet.type in (\"plain\", \"html\"):\n continue\n if snippet.type == \"image\":\n with open(snippet.content, \"rb\") as img:\n content = MIMEImage(img.read())\n content.add_header(\n \"Content-Disposition\",\n \"attachment\",\n filename=snippet.content.split(\"/\")[-1],\n )\n elif snippet.type == \"audio\":\n with open(snippet.content, \"rb\") as audio:\n content = MIMEAudio(audio.read())\n content.add_header(\n \"Content-Disposition\",\n \"attachment\",\n filename=snippet.content.split(\"/\")[-1],\n )\n elif snippet.type == \"bin\":\n with open(snippet.content, \"rb\") as binary:\n content = MIMEApplication(binary.read())\n content.add_header(\n \"Content-Disposition\",\n \"attachment\",\n filename=snippet.content.split(\"/\")[-1],\n )\n else:\n raise SnippetParsingError(\"Unidentified Snippet\")\n\n mime.attach(content)\n return mime","repo_name":"pptx704/torpedo","sub_path":"src/mailtorpedo/binder.py","file_name":"binder.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"25272046331","text":"import sys\nsys.path.append('../yolov3-diploma')\n\nimport os\nimport time\nimport cv2 as cv\nimport tensorflow as tf\nimport numpy as np\nfrom core.utils import get_example\nfrom absl import app, flags\nfrom absl.flags import FLAGS\nfrom azure.cognitiveservices.vision.face import FaceClient\nfrom msrest.authentication import CognitiveServicesCredentials\nfrom tqdm import tqdm\n\nKEY = 'e7d85f97770144068b8cb0135d96f54c'\nENDPOINT = 'https://westcentralus.api.cognitive.microsoft.com'\n\nflags.DEFINE_string('input', './data/FDDB-folds', 'Path to initial dataset')\nflags.DEFINE_string('output', './data/fddb_dataset2.tfrecord', 'Path to preprocessed dataset')\n\n\ndef main(args):\n face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))\n contents = []\n lengths = []\n\n for i in range(1, 11):\n with open(os.path.join(FLAGS.input, f'FDDB-fold-{i:02d}-ellipseList.txt')) as file:\n line = file.readline().rstrip()\n while line:\n if not line.endswith(' 1'): # Mode originalPics folder to fddb-folds!\n contents.append(os.path.join(FLAGS.input, 'originalPics', line) + '.jpg')\n lengths.append(int(file.readline().rstrip()))\n\n line = file.readline().rstrip()\n\n\n writer = tf.io.TFRecordWriter(FLAGS.output)\n\n for image_p, length in tqdm(zip(contents, lengths), total=len(contents)):\n try:\n with open(image_p, 'rb') as img:\n detected_faces = face_client.face.detect_with_stream(image=img,\n return_face_attributes=['emotion'])\n except Exception:\n time.sleep(10)\n detected_faces = None\n\n if not detected_faces or len(detected_faces) != length:\n continue\n\n emotion_list, coordinates, class_labels = [], [], []\n\n image = cv.imread(image_p)\n h, w = image.shape[:2]\n\n for face in detected_faces:\n face_rect = face.face_rectangle\n x1 = face_rect.left\n y1 = face_rect.top\n x2 = x1 + face_rect.width\n y2 = y1 + face_rect.height\n\n # Add emotion with highest probability\n emotions = face.face_attributes.emotion.__dict__\n del emotions['additional_properties']\n emotions = np.array([[k, v] for k, v in emotions.items()])\n\n coordinates.append([x1 / w, y1 / h, x2 / w, y2 / h])\n class_labels.append(np.argmax(emotions[:, 1]))\n emotion_list.append(emotions[np.argmax(emotions[:, 1]), 0])\n\n example = get_example(image_p, coordinates, class_labels)\n writer.write(example.SerializeToString())\n\n writer.close()\n\n\nif __name__ == '__main__':\n app.run(main)\n\n#FDDB dataset","repo_name":"rudimytro/yolov3-emotions","sub_path":"preprocessors/preprocess_fddb.py","file_name":"preprocess_fddb.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28651087141","text":"# Código feito por Heloisa Cavalcanti Oliveira\n\n# Se achar necessario, faça import de outras bibliotecas\n\n# Crie a função que será avaliada no exercício aqui\ndef soma_dos_aninhados(lista_de_listas):\n soma = 0\n\n for lista in lista_de_listas:\n for i in lista:\n soma += i\n\n return soma\n\n# Teste a sua função aqui (caso ache necessário)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Modulo3-Inteli-2023-1/lista-semana1-Heloisa-Oliveira","sub_path":"exercicio3.py","file_name":"exercicio3.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25863609670","text":"import logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport time\n\nfrom volta_plus.models import VoltaNetwork\n\n\nlogging.basicConfig(\n level=logging.WARNING,\n format='[%(levelname)s][%(asctime)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n handlers=[TimedRotatingFileHandler('volta_plus.log', when='midnight', backupCount=3, utc=True)]\n)\n\n\nif __name__ == '__main__':\n volta_network = VoltaNetwork(poor=True)\n\n while True:\n try:\n volta_network.update()\n logging.debug(\"updated Volta Network\")\n time.sleep(15)\n except Exception as e:\n logging.exception(e)\n time.sleep(30)\n","repo_name":"rsjudka/volta-plus","sub_path":"volta_plus.py","file_name":"volta_plus.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13357732355","text":"import RPi.GPIO as GPIO\nimport smbus\nimport time\n\nbus = smbus.SMBus(1)\naddress = 0x48\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(14,GPIO.OUT)\n\nbuz = GPIO.PWM(14,100)\nbuz.start(100)\nbuz.ChangeDutyCycle(80)\n\ndef readLight() :\n adc = bus.read_byte(0x48)\n return adc\n\nwhile True:\n bus.write_byte(address,0)\n time.sleep(0.1)\n an0 = readLight()\n print (an0)\n \n if an0 < 100 :\n buz.ChangeFrequency(an0)\n \nGPIO.cleanup()\n","repo_name":"Hennakk/raspberry-Pi","sub_path":"Light_detection_ADC_control/Light_detection.py","file_name":"Light_detection.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23834572195","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def mergeTwoLists(self, list1, list2):\n \"\"\"\n :type list1: Optional[ListNode]\n :type list2: Optional[ListNode]\n :rtype: Optional[ListNode]\n \"\"\"\n if(list1 is None and list2 is None):\n return list1\n elif(list1 is None and list2 is not None):\n return list2\n elif(list1 is not None and list2 is None):\n return list1\n\n if (list1.val < list2.val):\n currNode = list1\n list1 = list1.next\n else:\n currNode = list2\n list2 = list2.next\n\n head = currNode\n \n while(list1 is not None and list2 is not None):\n if (list1.val < list2.val):\n currNode.next = list1\n currNode = currNode.next\n list1 = list1.next\n else:\n currNode.next = list2\n currNode = currNode.next\n list2 = list2.next\n\n if(list1 is None):\n currNode.next = list2\n elif(list2 is None):\n currNode.next = list1\n\n return head\n","repo_name":"matchasaur/NeetCode-150","sub_path":"Easy: Merge Two Sorted Lists.py","file_name":"Easy: Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5373061910","text":"\nimport json\nimport re\nfrom flask import Blueprint, request, g, current_app\n\nfrom app.code import ReturnCode\n\nfrom app.views.utils import json_response, jwt_authorize_required\n\nfrom .utils import BaseDBManager\n\nbase_dbs = Blueprint('base_dbs', __name__)\n\n\n@base_dbs.route('/base_dbs', methods=['GET'])\n@base_dbs.route('/base_dbs/', methods=['GET'])\ndef base_dbs_operate(db_name: str = None):\n if db_name is None:\n return_code, data = BaseDBManager.get_base_dbs()\n return json_response(200, code=return_code.SUCCESS.value, data=data)\n else:\n args = request.args\n kwargs = dict()\n for k, v in args.items():\n if isinstance(v, (str,)) and re.match(\"^\\[(.*)\\]$\", v):\n kwargs[k] = json.loads(v)\n return_code, data = BaseDBManager.get_db_datas(db_name, **kwargs)\n return json_response(200, code=return_code.value, data = data)\n\n\n@base_dbs.route('/base_dbs/valve_series//brief', methods=['GET'])\ndef valve_series(series_id: int):\n return_code, data = BaseDBManager.get_valve_series_breif(series_id)\n return json_response(200, code=return_code.value, data=data)\n","repo_name":"ypgsh/test","sub_path":"app/views/base_dbs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"497035970","text":"import numpy as np\nimport yfinance as yf\nimport plotly\nimport plotly.graph_objects as go\nfrom flask import Flask, render_template\nfrom flask import request\nimport json\n\ndef get_yf_tickers(tickers):\n return [yf.Ticker(t) for t in tickers]\n\ndef get_closing_prices(yf_tickers, period, interval='1d'):\n cps = []\n for t in yf_tickers:\n yf_history = t.history(period=period, interval=interval)\n cps.append(yf_history['Close'].values)\n dates = list(yf_history.index)\n return dates, cps\n\ndef calc_ratios(closing_prices):\n xlnx_cp, amd_cp = closing_prices\n return np.divide(xlnx_cp, amd_cp)\n\ndef calc_gains(mr_cps, cps, r):\n xlnx_mr, _ = mr_cps\n xlnx_cp, amd_cp = cps\n diff = xlnx_mr-xlnx_cp\n xnlx_rise_gains = diff+r*amd_cp-xlnx_cp\n amd_drop_gains = diff+amd_cp-(xlnx_cp/r)\n mm_gains = diff+(2/(r+1))*(r*amd_cp-xlnx_cp)\n return xnlx_rise_gains, amd_drop_gains, mm_gains\n\ndef fig_update_layout(fig, legend, xaxis, yaxis):\n fig.update_layout(\n template=\"simple_white\",\n showlegend=legend, \n hovermode=\"x unified\", \n xaxis_title=xaxis,\n yaxis_title=yaxis,\n margin=dict(l=20, r=20, t=20, b=20),\n legend=dict(yanchor=\"top\",y=0.99,xanchor=\"right\",x=0.99))\n return fig\n\ndef graph_ratios(dates, ratios):\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=dates, y=ratios, mode='lines', name='Ratio', hovertemplate='Ratio: %{y:.3f}', line_color=\"blue\")) \n return fig_update_layout(fig, False, \"Date\", \"XNLX/AMD Ratio\")\n\ndef graph_prices(dates, cps):\n xlnx_cp, amd_cp = cps\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=dates, y=xlnx_cp, mode='lines', name='XNLX', line_color=\"purple\", hovertemplate='XNLX: %{y:.2f}'))\n fig.add_trace(go.Scatter(x=dates, y=amd_cp, mode='lines', name='AMD', line_color=\"rgb(237, 28, 36)\", hovertemplate='AMD: %{y:.2f}'))\n return fig_update_layout(fig, True, \"Date\", \"Stock Price ($)\")\n\ndef graph_gains(dates, gains):\n xnlx_rise_gains, amd_drop_gains, mm_gains = gains\n fig = go.Figure(go.Scatter(x=dates, y=mm_gains, hovertemplate='Meet in Middle: %{y:.2f}'))\n fig.add_hline(y=1, line_color='rgb(191, 52, 52)', line_width=1, line_dash='dash')\n fig.add_trace(go.Scatter(\n x=np.concatenate([dates, dates[::-1]]),\n y=np.concatenate([xnlx_rise_gains, amd_drop_gains[::-1]]),\n customdata=np.concatenate([amd_drop_gains, xnlx_rise_gains[::-1]]),\n line_color='rgb(205, 209, 228)',\n fill='toself',\n hovertemplate='Range: %{y:.2f} to %{customdata:.2f}'\n ))\n fig.update_traces(hoverinfo='skip')\n fig.for_each_trace(lambda t: t.update(hoveron='points'))\n return fig_update_layout(fig, False, \"Purchase Date\", \"Gain/Loss ($)\")\n\ndef get_data(duration):\n mr = 1.7234\n tickers = ['XLNX','AMD']\n yft = get_yf_tickers(tickers)\n dates, cps = get_closing_prices(yft, '1d', '5m') # today's data\n mr_cps = [cp[-1] for cp in cps] # most recent prices\n\n if duration == \"Month\":\n dates, cps = get_closing_prices(yft, '1mo') \n if duration == \"Year\":\n dates, cps = get_closing_prices(yft, '1y')\n if duration == \"Three_Months\":\n dates, cps = get_closing_prices(yft, '3mo')\n\n fig1 = graph_prices(dates, cps)\n fig2 = graph_ratios(dates, calc_ratios(cps))\n fig3 = graph_gains(dates, calc_gains(mr_cps, cps, mr))\n graph1JSON = json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder)\n graph2JSON = json.dumps(fig2, cls=plotly.utils.PlotlyJSONEncoder)\n graph3JSON = json.dumps(fig3, cls=plotly.utils.PlotlyJSONEncoder)\n return graph1JSON, graph2JSON, graph3JSON\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef view():\n if request.method == 'POST':\n duration = request.form.get('duration')\n duration = duration.replace(\" \", \"_\")\n else: \n duration = 'Three_Months'\n graph1JSON, graph2JSON, graph3JSON = get_data(duration)\n return render_template(\"index.html\", graph1JSON=graph1JSON, graph2JSON=graph2JSON, graph3JSON=graph3JSON, duration=duration)\n","repo_name":"HilarieSit/amd-xlnx-dashboard","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32965857961","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 9/5/2016\r\n\r\n@author: Dell\r\n'''\r\nfrom Billetera import * \r\nfrom datetime import date \r\nimport unittest\r\n\r\n\r\nclass BilleteraTester(unittest.TestCase):\r\n \r\n def testCrearBilletera(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"nombre\", \"apellido\", 23711366, \"4561\")\r\n self.assertTrue(miBilletera._BilleteraElectronica__Id==\"1234\")\r\n self.assertTrue(miBilletera.saldo()==0)\r\n self.assertTrue(miBilletera._BilleteraElectronica__PIN==\"4561\")\r\n self.assertTrue(miBilletera._BilleteraElectronica__recargas==[])\r\n self.assertTrue(miBilletera._BilleteraElectronica__consumos==[])\r\n self.assertTrue(miBilletera.nombres==\"nombre\")\r\n self.assertTrue(miBilletera.apellidos==\"apellido\")\r\n self.assertTrue(miBilletera.CI==23711366)\r\n \r\n def testCrearBilleteraFalloId(self):\r\n try:\r\n miBilletera = BilleteraElectronica(\"1234.3213\", \"nombre\", \"apellido\", 23711366, \"4561\")\r\n print(\"Fallo de deteccion de error de tipo al construir\")\r\n except:\r\n print(\"Exito al detectar error de tipo al construir\")\r\n\r\n def testCrearBilleteraFalloNombre(self):\r\n try:\r\n miBilletera = BilleteraElectronica(\"1234\", 1273676, \"apellido\", 23711366, \"4561\")\r\n print(\"Fallo de deteccion de error de tipo al construir\")\r\n except:\r\n print(\"Exito al detectar error de tipo al construir\")\r\n\r\n def testCrearBilleteraFalloApellid(self):\r\n try:\r\n miBilletera = BilleteraElectronica(\"1234.3213\", \"nombre\", 123478628, 23711366, \"4561\")\r\n print(\"Fallo de deteccion de error de tipo al construir\")\r\n except:\r\n print(\"Exito al detectar error de tipo al construir\")\r\n\r\n def testCrearBilleteraFalloId(self):\r\n try:\r\n miBilletera = BilleteraElectronica(\"1234.3213\", \"nombre\", \"apellido\", 23711366, \"asd4561\")\r\n print(\"Fallo de deteccion de error de tipo al construir\")\r\n except:\r\n print(\"Exito al detectar error de tipo al construir\")\r\n\r\n def testUsuarioConNombreEspecial(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Ñángara\", \"Diaz\", 23711366, \"4561\")\r\n self.assertEquals(miBilletera.nombres,\"Ñángara\")\r\n \r\n def testUsuarioConApellidoEspecial(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"David\", \"Ñángara\", 23711366, \"4561\")\r\n self.assertEquals(miBilletera.apellidos,\"Ñángara\")\r\n \r\n def testUsuarioConNombreSimple(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n self.assertEquals(miBilletera.nombres,\"Eliot\")\r\n \r\n def testUsuarioConNombreMultiple(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot David\", \"Hernandez\", 23711366, \"4561\")\r\n self.assertEquals(miBilletera.nombres,\"Eliot David\")\r\n \r\n def testUsuarioConApellidoSimple(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n self.assertEquals(miBilletera.apellidos,\"Hernandez\")\r\n \r\n def testUsuarioConApellidoMultiple(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez Diaz\", 23711366, \"4561\")\r\n self.assertEquals(miBilletera.apellidos,\"Hernandez Diaz\")\r\n \r\n def testRecargaSimple(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(123, 117)\r\n miBilletera.recargar(credito)\r\n self.assertEquals(miBilletera.saldo(), 123)\r\n \r\n\r\n def testConsumirConPINCorrecto(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(234, \"Casa\")\r\n miBilletera.recargar(credito)\r\n debito = Transaccion(122, \"Restaurant El Paso\")\r\n miBilletera.consumir(\"4561\", debito)\r\n self.assertEquals(miBilletera.saldo(), 234-122)\r\n\r\n def testConsumirSinPINCorrecto(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(234, \"Casa\")\r\n miBilletera.recargar(credito)\r\n debito = Transaccion(122, \"Restaurant El Paso\")\r\n miBilletera.consumir(\"4562\", debito)\r\n self.assertEquals(miBilletera.saldo(), 234)\r\n\r\n def testConsumirSinSaldoSuficiente(self):\r\n try:\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(100, \"Casa\")\r\n miBilletera.recargar(credito)\r\n debito = Transaccion(122, \"Restaurant El Paso\")\r\n miBilletera.consumir(\"4561\", debito)\r\n print(\"Fallo de encuentro de Error\")\r\n except:\r\n self.assertEquals(miBilletera.saldo(),100)\r\n \r\n def testRecargaNegativa(self):\r\n try:\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(25680, \"Casa\")\r\n miBilletera.recargar(credito)\r\n credito2 = Transaccion(-680, \"Casa\")\r\n miBilletera.recargar(credito2)\r\n except:\r\n self.assertEquals(miBilletera.saldo(), 25680)\r\n\r\n def testConsumoNegativo(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(970680, \"Casa\")\r\n miBilletera.recargar(credito)\r\n debito = Transaccion(-680, \"Casa\")\r\n miBilletera.consumir(\"4561\", debito)\r\n self.assertEquals(miBilletera.saldo(), 970680)\r\n \r\n def testRecargaYConsumoDecimal(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(25.97068085, \"Casa\")\r\n miBilletera.recargar(credito)\r\n debito = Transaccion(20.987654321, \"Casa\")\r\n miBilletera.consumir(\"4561\", debito)\r\n self.assertEquals(miBilletera.saldo(), (Decimal(25.97068085)-Decimal(20.987654321)))\r\n\r\n def testConsumoExacto(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Eliot\", \"Hernandez\", 23711366, \"4561\")\r\n credito = Transaccion(\"25.97068085\", \"Casa\")\r\n miBilletera.recargar(credito)\r\n debito = Transaccion(\"25.97068085\", \"Casa\")\r\n miBilletera.consumir(\"4561\", debito)\r\n self.assertEquals(miBilletera.saldo(), (Decimal(25.97068085)-Decimal(25.97068085))) \r\n \r\n def testRecargaCero(self):\r\n try:\r\n miBilletera = BilleteraElectronica(\"1234\", \"Meñique\", \"Stark\", 23711366, \"4561\")\r\n credito2 = Transaccion(123,\"Invernalia\")\r\n miBilletera.recargar(credito2)\r\n credito = Transaccion(0,\"Samoa\")\r\n miBilletera.recargar(credito)\r\n print(\"Fallo al no permitir recargas de 0\")\r\n except:\r\n self.assertEquals(miBilletera.saldo(),123)\r\n\r\n def testConsumoCero(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Meñique\", \"Stark\", 23711366, \"4561\")\r\n credito2 = Transaccion(123,\"Invernalia\")\r\n miBilletera.recargar(credito2)\r\n consumo = Transaccion(0,\"Samoa\")\r\n miBilletera.consumir(\"4561\",consumo)\r\n self.assertEquals(miBilletera.saldo(),123)\r\n \r\n def testConsumoEnteroRecargaDecimal(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Meñique\", \"Stark\", 23711366, \"4561\")\r\n credito = Transaccion(123.123,\"Invernalia\")\r\n miBilletera.recargar(credito)\r\n consumo = Transaccion(123,\"Samoa\")\r\n miBilletera.consumir(\"4561\",consumo)\r\n self.assertEquals(miBilletera.saldo(),Decimal(123.123)-consumo.monto)\r\n\r\n def testConsumoDecimalRecargaEntero(self):\r\n miBilletera = BilleteraElectronica(\"1234\", \"Meñique\", \"Stark\", 23711366, \"4561\")\r\n credito = Transaccion(123,\"Invernalia\")\r\n miBilletera.recargar(credito)\r\n consumo = Transaccion(12.12,\"Samoa\")\r\n miBilletera.consumir(\"4561\",consumo)\r\n self.assertEquals(miBilletera.saldo(),Decimal(123)-consumo.monto)\r\n\r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()\r\n","repo_name":"Davidhervil/Ing_Soft_Tarea3","sub_path":"pruebasBilletera.py","file_name":"pruebasBilletera.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9818561290","text":"coded = \"pbatenghyngvbaf, lbh fbyirq zl yhvtv chmmyr. @ zr ba gjvggre jvgu lbhe snibhevgr qrffreg gb trg lbhe erjneq. Vg'f yvxr, abg n irel tbbq erjneq fb hu\"\n\nbase = ord(\"a\")\nfor i in range(1, 26):\n out = \"\"\n for char in coded:\n # only transpose alphanumeric characters\n if 97 <= ord(char) <= 122:\n out += chr(base + (ord(char) + i - base) % 26)\n else:\n out += char\n print(i, out)\n","repo_name":"psm2303/Python-projects","sub_path":"cesar.py","file_name":"cesar.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29014291003","text":"''' Getting user input\n\nWe can get input from users by using the input() function.\nWhat I can do is get the input fron the users by using the input() function.\nAnd the store that info in a variable and then I can interact with that information the way I want.\n \n'''\n\n\n\n# person = input(\"Enter your name: \") # Carl\n# print(\"Hello \", person) # Hello Carl\n\n# x = input(\"Enter a number: \") # 5\n# y = input(\"Enter another number: \") # 6\n# z = x + y\n\n# print(z) # 56\n\n\n\n\n\nx = input(\"Enter a number: \") # 4\ny = input(\"Enter another number: \") # 8\nprint(int(x) + int(y)) # 12\n","repo_name":"minefarmer/Python-CompLearn","sub_path":"my_notes/Interacting_with_python/getting_user_input.py","file_name":"getting_user_input.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3263469703","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport plotly_express as px\r\nimport pandas as pd\r\n\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\nserver = app.server\r\n\r\ndf = pd.read_excel(\"https://github.com/chris1610/pbpython/blob/master/data/salesfunnel.xlsx?raw=True\", engine=\"openpyxl\")\r\nmgr = df.Manager.unique()\r\norders_all = (df\r\n .groupby(['Name','Status'])\r\n .Quantity.sum()\r\n .reset_index()\r\n )\r\n \r\nfig_all = px.bar(orders_all, \r\n x='Name', \r\n y='Quantity',\r\n color='Status',\r\n title='Order Status by Customer')\r\n\r\napp.layout = html.Div([\r\n html.H2('This is my first plotly Dash'),\r\n dcc.Dropdown(id = 'manager-dropdown',\r\n options = [{'label': i, 'value' : i} for i in mgr],\r\n value=None),\r\n html.Div(id='selected-manager-div', children='selected manager is: '),\r\n dcc.Graph(id='sales-funnel-bar-graph', figure=fig_all)\r\n])\r\n\r\n\r\n### add interactiveness\r\n@app.callback(Output(component_id='selected-manager-div',\r\n component_property='children'),\r\n Input(component_id='manager-dropdown', \r\n component_property='value'))\r\ndef update_manager_selection(manager_dropdown_value):\r\n return 'selected manager is: {}'.format(manager_dropdown_value)\r\n\r\n\r\n@app.callback(Output(component_id='sales-funnel-bar-graph',\r\n component_property='figure'),\r\n Input(component_id='manager-dropdown',\r\n component_property='value'))\r\ndef update_graph(manager_dropdown_value):\r\n \r\n if manager_dropdown_value is None:\r\n return fig_all\r\n else:\r\n df_plot = df.query('Manager == \"{}\"'.format(manager_dropdown_value))\r\n title = 'Sales funnel of {}'.format(manager_dropdown_value)\r\n \r\n orders = (df_plot\r\n .groupby(['Name','Status'])\r\n .Quantity.sum()\r\n .reset_index()\r\n )\r\n \r\n fig = px.bar(orders, \r\n x='Name', \r\n y='Quantity',\r\n color='Status',\r\n title=title)\r\n \r\n return fig\r\n\r\nif __name__ == '__main__':\r\n app.run_server()","repo_name":"maksteel/ga-dash-heroku","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70581562492","text":"import torch\nfrom torch_geometric.nn import GATConv, to_hetero\n\n\nclass GNNEncoder(torch.nn.Module):\n def __init__(self,\n hidden_channels=128,\n num_layers=2,\n activation=torch.nn.ReLU(),\n drop_rate=0.4):\n super().__init__()\n self.convs = torch.nn.ModuleList()\n self.hidden_channels = hidden_channels\n self.activation = activation\n self.drop_rate = drop_rate\n\n for _ in range(num_layers):\n conv = GATConv((-1, -1), hidden_channels, dropout=drop_rate, add_self_loops=False)\n self.convs.append(conv)\n\n def forward(self, x, edge_index, edge_attr):\n for conv in self.convs:\n x = conv(x, edge_index, edge_attr)\n return x\n\n\n","repo_name":"Faiail/ArtRecSys","sub_path":"src/models/graph/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42844931316","text":"x = int(input())\n\nif x%2!=0:\n str =input()\n lst = str.split()\n for i in range(0, len(lst)):\n lst[i] = int(lst[i])\n\n lst.sort()\n l = int(len(lst)/2)\n print(lst)\n print(lst[l])","repo_name":"CodeZoneTech/bleedcode","sub_path":"bleedcode 1.0/Hunting the Middle Value.java/HuntingtheMiddleValue.py","file_name":"HuntingtheMiddleValue.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26651986737","text":"import os\nfrom datetime import datetime\n\ndesired_date = datetime.strptime('2020-3-2T08:00:00', '%Y-%m-%dT%H:%M:%S')\n\nreport = 'ID;Backend;Circuit;Date;Results\\n'\nmitigation_report = 'ID;Backend;Circuit;Date;Results\\n'\n\n\ndef get_report_string_from_file(file_path):\n opened_file = open(file_path)\n\n line = opened_file.readline() # Omit first line of report\n line = opened_file.readline()\n\n report_str = ''\n\n while line:\n job_date = datetime.strptime(line.split(';')[3], '%Y-%m-%dT%H:%M:%S.%fZ')\n if job_date > desired_date:\n report_str += line\n line = opened_file.readline()\n\n opened_file.close()\n return report_str\n\n\nfor file in os.listdir(os.getcwd()):\n if file.startswith('raw_jobs'):\n report += get_report_string_from_file(file)\n elif file.startswith('raw_mitigation'):\n mitigation_report += get_report_string_from_file(file)\n else:\n print(f'Ignoring: {file}.')\n\nfile = open('raw_jobs_report.csv', 'w')\nfile.write(report)\nfile.close()\n\nfile = open('raw_mitigation_jobs_report.csv', 'w')\nfile.write(mitigation_report)\nfile.close()\n\nprint('Launching aReportConverter.')\n\nos.system('python aReportConverter.py')\n\nprint('Finished.')\n","repo_name":"Tomev/QEL","sub_path":"Tools/report_integrator.py","file_name":"report_integrator.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"5857557753","text":"#!/usr/bin/env python3\n\"\"\"\nCount the polytomy sizes for each node, and output along with the number of leaves descending from each, and\nwhether it is a plant, animal, fungus, or (eu)bacterium.\n\nAfter running `polytomy_analysis.py OpenTreevX.tre > results.out`, the data can be visualized e.g. in R using\n\ndata <- read.delim('results.out', header=T, stringsAsFactors=FALSE)\nc('Other'= '#00000011', 'Chloroplastida'='#00FF0033', 'Metazoa'='#FF000033', 'Fungi'='#0000FF33', 'Bacteria'='#FF00FF33') -> cols\nplot(polytomy.size ~ num.spp, data=data, log='xy', pch=20, col=cols[type], cex=2)\n\n#individual points can then be identified by\n\nidentify(data$num.spp, data$polytomy.size, labels = data$name, cex=0.6)\n\n########\n\nTo get specific results for a subclade, e.g. the lepidosaurs, find the OTT number (e.g. 35881 for lepidosaurs, 186816 for serpentes, etc), and extract that clade from labelled_supertree_simplified_ottnames.tre using my subtree_extract.pl script.\n\ne.g.\n\nServerScripts/TreeBuild/subtree_extract.pl data/OpenTree/opentree7.0_tree/labelled_supertree/labelled_supertree_simplified_ottnames.tre 35881\nServerScripts/Analysis/polytomy_analysis.py 35881.nwk > 35881.out\n\n#then in R\n\ndata <- read.delim('35881.out', header=T, stringsAsFactors=FALSE)\nc('Other'= '#00000011', 'Chloroplastida'='#00FF0033', 'Metazoa'='#FF000033', 'Fungi'='#0000FF33', 'Bacteria'='#FF00FF33') -> cols\nplot(polytomy.size ~ num.spp, data=data, log='xy', pch=20, col=cols[type], cex=2)\nidentify(data$num.spp, data$polytomy.size, labels = data$name, cex=0.6)\n\n\"\"\"\nimport argparse\nimport re\nfrom dendropy import Tree\n\n\nlabel_nodes = {'Other':0, 'Chloroplastida_ott361838':1, 'Metazoa_ott691846':2, 'Fungi_ott352914':3, 'Bacteria_ott844192':4}\ntarget_nodes = {}\nnames = {index:re.sub(\"_ott\\d+\", \"\", k) for k, index in label_nodes.items()}\n\nparser = argparse.ArgumentParser(description='Count the number of unnamed nodes in a tree')\nparser.add_argument('treefile', type=argparse.FileType('r'), help='A newick-format tree')\n\nargs = parser.parse_args()\n\ndef warn(*objs):\n print(*objs, file=sys.stderr)\n\ntree = Tree.get(file=args.treefile, schema='newick', preserve_underscores=True, suppress_leaf_node_taxa=True)\n\n#set edge length to number of leaves\nfor node in tree.postorder_node_iter():\n if node.is_leaf():\n node.n_leaves = 1\n else:\n if node.label in label_nodes:\n target_nodes[node.label] = node\n try:\n node._parent_node.n_leaves += node.n_leaves\n except:\n try:\n node._parent_node.n_leaves = node.n_leaves\n except:\n pass #the root\n\n\n \n#set all descendants of each type\nfor nm, root in target_nodes.items(): \n for nd in root.postorder_internal_node_iter():\n nd.type = label_nodes[nm]\n\nprint(\"\\t\".join([\"polytomy.size\", \"num.spp\", \"type\", \"name\"]))\nfor n in tree.postorder_internal_node_iter():\n try:\n leaves = n.n_leaves\n except:\n leaves = None\n try:\n i = n.type\n except:\n i = None\n \n print(\"\\t\".join([str(n.num_child_nodes()),str(leaves), names.get(i or 0) or \"\", n.label or \"\"]))","repo_name":"OneZoom/OZtree","sub_path":"OZprivate/ServerScripts/Analysis/polytomy_analysis.py","file_name":"polytomy_analysis.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"78"} +{"seq_id":"24181124829","text":"import sys\r\nimport xml.etree.ElementTree as ET\r\n\r\n'''\r\n根据函数体源码生成的xml提取statement子树序列\r\n'''\r\n\r\n\r\n# 带双亲节点的树节点\r\nclass treeNode:\r\n def __init__(self, parent, ele):\r\n if parent != None:\r\n self.parent = parent\r\n self.ele = ele\r\n else:\r\n self.parent = parent\r\n self.ele = ele\r\n\r\n\r\n# 根据根节点提取AST\r\ndef extractSTBaseRoot(root):\r\n # 添加AST叶子节点\r\n def transform(root):\r\n if root.text != None:\r\n root.append(ET.Element(root.text))\r\n for child in root:\r\n transform(child)\r\n return root\r\n\r\n # 深度优先遍历树\r\n def traverse(node):\r\n print(node.tag)\r\n for childNode in node:\r\n traverse(childNode)\r\n\r\n # 根据深度优先遍历得到的列表,提取statement子树\r\n def extractStatement(tree):\r\n statementList = []\r\n for node in tree:\r\n if node.ele.tag in statemnentTag:\r\n statementList.append(node.ele)\r\n if node.parent != None:\r\n node.parent.remove(node.ele)\r\n return statementList\r\n\r\n # 深度优先遍历树,树的节点为带双亲节点的结构\r\n def createTreeDeepFirst(root, list, parent):\r\n list.append(treeNode(parent, root))\r\n for node in root:\r\n createTreeDeepFirst(node, list, root)\r\n\r\n statemnentTag = {\"if\", \"while\", \"for\", \"unit\", \"switch\"}\r\n treeDeepFirstList = []\r\n # root = transform(root)\r\n createTreeDeepFirst(root, treeDeepFirstList, None)\r\n statementList = extractStatement(treeDeepFirstList)\r\n return statementList\r\n","repo_name":"zhichengshi/codeClassificationAndClustering","sub_path":"cvrnn/extractStatement.py","file_name":"extractStatement.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73428611131","text":"with open(\"day13_input.txt\", \"r\") as f:\n lines = f.read().splitlines()\n\n\ndots = []\nactions = []\nfor line in lines:\n if not line:\n continue\n if \",\" in line:\n x, y = line.split(\",\")\n dots.append((int(x), int(y)))\n else:\n actions.append(line.split()[-1].split(\"=\"))\n\nprevious = dots[:]\n\nfor axis, value in actions:\n result = []\n value = int(value)\n if axis == \"x\":\n for dot in previous:\n if dot[0] < value:\n result.append((dot[0], dot[1]))\n else:\n result.append((2 * value - dot[0], dot[1]))\n\n elif axis == \"y\":\n for dot in previous:\n if dot[1] < value:\n result.append((dot[0], dot[1]))\n else:\n result.append((dot[0], 2 * value - dot[1]))\n\n print(len(set(result)))\n\n previous = result[:]\n\nmax_x = max(result, key=lambda x: x[0])[0]\nmax_y = max(result, key=lambda x: x[1])[1]\nprint(max_x, max_y)\n\nfor y in range(max_y + 1):\n for x in range(max_x + 1):\n if (x, y) in result:\n print(\"#\", end=\"\")\n else:\n print(\" \", end=\"\")\n print()\n\nfrom PIL import Image\n\nimg = Image.new(\"RGB\", (max_x + 1, max_y + 1), \"black\")\npixels = img.load()\nfor dot in set(result):\n pixels[dot[0], dot[1]] = (255, 255, 255)\nimg.show()\n","repo_name":"coding-armadillo/advent-of-code","sub_path":"src/2021/day13_part2.py","file_name":"day13_part2.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"18129784287","text":"class Description:\n \"\"\"Satisfied Control Implementation Responsibility Description\n\n An implementation statement that describes the aspects of a control or\ncontrol statement implementation that a leveraging system is\nimplementing based on a requirement from a leveraged system.\n\n Attributes:\n prose (str):Default value holder for raw data in texts\n\n \"\"\"\n\n contexts = [\n \"oscal-ssp\",\n \"oscal-metadata\",\n \"oscal-implementation-common\",\n ]\n parameters = [\n ]\n subcomponents = [\n \"prose\",\n ]\n\n def __init__(\n self,\n use_name='description',\n prose=None,\n ):\n self._prose = None\n self.prose = \\\n prose\n self.use_name = use_name\n\n def __str__(self):\n\n return str(self.prose)\n\n @classmethod\n def fromDict(cls, obj):\n newcls = cls(\n prose=obj.get(\n 'prose',\n None),\n )\n return newcls\n\n @property\n def prose(self):\n \"\"\"Default value holder for raw data in texts\n \"\"\"\n return self._prose\n\n @prose.setter\n def prose(self, x):\n self._prose = x\n","repo_name":"SHRGroup/pyoscal","sub_path":"pyoscal/core/oscal_ssp/Description.py","file_name":"Description.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39226297588","text":"\r\nn = int(input())\r\nans = []\r\n\r\ndef check():\r\n\r\n if len(ans) == n:\r\n print(*ans, sep=\" \")\r\n return\r\n\r\n for i in range(1, n + 1):\r\n if i not in ans:\r\n ans.append(i)\r\n check()\r\n ans.pop()\r\n\r\ncheck()","repo_name":"unboxing96/ALGO","sub_path":"백준/Silver/10974. 모든 순열/모든 순열.py","file_name":"모든 순열.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70221922813","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 29 11:55:40 2016\n\n@author: pedromachado\n\"\"\"\n\nimport serial\nimport time\nimport sys\nimport os\nfrom subprocess import call\nimport struct\nimport numpy as np1\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\nimport struct\nimport random\nimport binascii\nimport numpy as np\n\ncycles=3500\nVm=-60.0\nC=1400.0\nstep=0.005\n\n\n\n#-------------------------------------------------------------Main Part of the Code---------------------------------------\n# DO NOT CHANGE\nstByte = 255\nreset = 238\n# Simulation and model config/initialisation\n#cyclesMSB=23\n#cyclesLSB=112\ncycles=struct.pack('>H',cycles)\nVm=struct.pack('>f',Vm)\nC=struct.pack('>f',C)\nstep=struct.pack('>f',step)\n\nport = serial.Serial(\"/dev/ttyUSB0\", baudrate=115200, timeout=0)\nprint (\"connected to: \" + port.portstr)\nfp=open('ASER.hex', 'wb+')\nfp.close()\nport.write(bytes([reset]))\ntime.sleep(1);\ntxTime=time.time()\n#start byte\nport.write(bytes([stByte]))\nport.write(cycles)\nport.write(Vm)\nport.write(C)\nport.write(step)\nnpi=True\nlast_time=time.time()\nprint(\"Receiving data from the FPGA...\")\ntxTime=time.time()\nwhile npi:\n line =port.read(7) # should block, not take anything less than 1500 bytes\n if line:\n fp=open('ASER.hex', 'ab+') \n #print (str(count)+\" \"+str(binascii.hexlify(line))) \n fp.write(line)\n fp.close()\n last_time=time.time()\n now=time.time()\n if now-last_time>1.0:\n npi=False \nport.close()\ntxTime=round(time.time()-txTime,4)\nprint(\"Received in \", str(txTime), \"s\")\nprint(\"All Data was received with success!\")\nprint (\"Port closed with success!\") \n\nfile1 = open('ASER.hex',\"rb\") \nblocksize = 450000000 #56.25 MB\n#reads from that block from the file and concatenates the values into string\nwith file1:\n block = file1.read(blocksize)\n rawHex = \"\"\n for ch in block:\n # [2:] helps get rid of 0x and zfill(2) makes sure that the hex are represented into two hex numbers\n rawHex += hex(ch)[2:].zfill(2)\nrawHex=binascii.a2b_hex(rawHex)\n#print(rawHex)\n#counter to take note of number of iterations\ncounter = 0\n#opening a file to write the output\nfile2 = open('ASER.csv',\"w+\")\nfile2.close()\n#file2.write('----Timestamp---- ')\n#file2.write(' ----Value----------- ')\n#file2.write(' ----Spike------\\n')\n#there might be more than one set of hex information\n#so it iterates as it breaks the numbers into a group of 7 bytes\nfile2 = open('ASER.csv',\"a+\")\nfor i in range (0,len(rawHex),7):\n timestamp=struct.unpack('>H',rawHex[i:i+2])[0]\n value=struct.unpack('>f',rawHex[i+2:i+6])[0]\n file2.write('%s'%timestamp)\n file2.write(';')\n file2.write('%s'%value)\n file2.write(';')\n file2.write('\\n')\nfile2.close()\n#once the file is completely read, the output file is closed and the opeartion is finished\nprint(\"Data anaysed.\") \n#print(\"Check log.txt file to see the output\")\n#call([\"cat\", \"log.csv\"])\n\n\nstim = 4\nstim_start1 = 100\nstim_end1 = 600\nstim_start2 = 1200\nstim_end2 = 1700\nstim_start3 = 2300\nstim_end3 = 2800\nstimulus=np.zeros(cycles-1)\nfor i in range(0,cycles-1,1):\n if (i>=stim_start1 and i<=stim_end1) or (i>=stim_start2 and i<=stim_end2) or (i>=stim_start3 and i<=stim_end3) :\n stimulus[i]=stim\n\ndata = np1.genfromtxt('ASER.csv', delimiter=';', names=['timestamp', 'Ap'])\nplt.figure(2)\nplt.suptitle(\"C elegans neuron model - ASER\", fontsize=16)\nplt.subplot(211)\nplt.show()\nplt.title(\"Action potential\")\nplt.xlabel('Time [ms]')\nplt.ylabel('Action potential [mV]')\nplt.plot(data['timestamp'], data['Ap'], color='r', label='Forces')\naxes = plt.gca()\naxes.set_xlim([0,3500])\naxes.set_ylim([-60,20])\n\nplt.subplot(212)\nplt.title(\"Stimulus\")\nplt.xlabel('Time [ms]')\nplt.ylabel('Current in [mA]')\nplt.plot(data['timestamp'], stimulus, color='b', label='Spikes')\naxes = plt.gca()\naxes.set_xlim([0,3500])\naxes.set_ylim([0,5])\nplt.show()\nprint(\"Data ploted\")","repo_name":"CNCR-NTU/NMModels","sub_path":"Results/ASER/receive_data_ASER.py","file_name":"receive_data_ASER.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40322451131","text":"from flask import Flask, session,request, render_template, redirect, url_for\nimport sqlite3\nfrom datetime import datetime\n\ndef review(clubname):\n if(request.method == 'POST'):\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n review = request.form['review']\n\n time = datetime.now().strftime(\"%B %d, %Y %I:%M%p\")\n \n username = session['username']\n c.execute('INSERT INTO reviews(username,review,time,clubname) VALUES(?,?,?,?)',(username,review,time,clubname))\n conn.commit()\n return redirect(url_for('index'))\n\n if('username' in session):\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n able = c.execute('SELECT * FROM users WHERE username=?',(session['username'],)).fetchone()[3]\n if(able == \"False\"):\n return render_template(\"blog/review.html\",username=session[\"username\"])\n else:\n return redirect(url_for(\"index\"))\n else:\n return redirect(url_for(\"index\"))\n\n","repo_name":"naveenr414/Local-Hack-Day","sub_path":"reviewApp.py","file_name":"reviewApp.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27143253433","text":"import csv\nimport pickle\n\nimport pandas as pd\n\nfrom os.path import exists\n\nwork_folder = '/home/charan/workspace/bcr_analysis/'\ninput_folder = work_folder + 'input_files/'\nremoval_ratios_folder = work_folder + 'removal_ratios/'\n\nremoval_ratio_files = {}\ncity_rows = {}\nwith open(input_folder + 'cities.csv', 'r') as c:\n cities_reader = csv.reader(c, delimiter=';')\n next(cities_reader)\n header_row = next(cities_reader)\n for row in cities_reader:\n city_rows[row[0]] = row\n removal_ratio_files[row[0]] = removal_ratios_folder + row[0] + '.pckl'\n\narea_ratios = []\nblock_ratios = []\nmissing_cities = []\nbuilt_city_areas = {}\nfor city in removal_ratio_files:\n if not exists(removal_ratio_files[city]):\n print('Skipping ' + city)\n missing_cities.append(city)\n continue\n\n with open(removal_ratio_files[city], 'rb') as i:\n [area_retain_ratio, block_retain_ratio, built_city_area] = pickle.load(i)\n\n area_ratios.append(area_retain_ratio)\n block_ratios.append(block_retain_ratio)\n built_city_areas[city] = built_city_area\n\ncity_rows = {k: v for k, v in city_rows.items() if k not in missing_cities}\n\n# Create a dataframe to store the ratios.\nblock_stats_df = pd.DataFrame(index=city_rows.keys())\nblock_stats_df['area_retain_ratio'] = area_ratios\nblock_stats_df['blocks_built_ratio'] = block_ratios\n\n# Threshold values for blocks built ratio and area retain ratio\nthreshold = [0.5, 0.5]\n\n# Filter the dataframe based on the threshold values.\ntemp_df_1 = block_stats_df[(block_stats_df['area_retain_ratio'] < threshold[0]) & (block_stats_df['blocks_built_ratio'] < threshold[1])]\ntemp_df_2 = block_stats_df[(block_stats_df['blocks_built_ratio'] < threshold[0]) & (block_stats_df['area_retain_ratio'] > threshold[1])]\n\nfiltered_cities_df = pd.concat([temp_df_1, temp_df_2], axis=0)\n\n# Write a new CSV with cities that satisfy the filtering criteria.\nwith open(input_folder + 'cities_filtered.csv', 'w') as o:\n csv_writer = csv.writer(o, delimiter=';')\n header_row.append('built_area')\n csv_writer.writerow(header_row)\n\n # Iterate through the cities and write the rows by appending city area to it.\n for city in filtered_cities_df.index:\n city_rows[city].append(built_city_areas[city])\n csv_writer.writerow(city_rows[city])\n\n# Write the dataframe to reuse later as an input for plotting Fig5.\nwith open(input_folder + 'block_stats.pckl', 'wb') as o:\n pickle.dump(block_stats_df, o)\n","repo_name":"nagacharan-tangirala/bcr_analysis","sub_path":"src/preprocess/filter_using_removal_ratios.py","file_name":"filter_using_removal_ratios.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32792339510","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\n\"\"\"\r\n The sim is a 3dof point mass rocket simulation. The state is expressed in the NED frame.\r\n\"\"\"\r\n# Earth constants\r\ng_0 = 9.80665 # m/s^2\r\nearth_mean_radius = 6378000 # m\r\n\r\n# atmospheric parameters\r\nR_specific = 287.053 # J/kg-K\r\nspecific_heat_ratio = 1.4\r\n\r\ndefault_atmospheric_params = {'height': [0.0, 11000.0, 20000.0, 32000.0, 47000.0, 51000.0, 71000.0], # m\r\n # Pa\r\n 'pressure': [101325.0, 22632.10, 5474.89, 868.02, 110.91, 66.94, 3.96],\r\n # K\r\n 'temperature': [288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65],\r\n 'lapse_rate': [-0.0065, 0.0, 0.001, 0.0028, 0.0, -0.0028, -0.002]} # K/m\r\n\r\n\r\ndef atmospheric_model(height, use_standard=True, base_height=default_atmospheric_params['height'][0],\r\n base_temp=default_atmospheric_params['temperature'][0], base_pressure=default_atmospheric_params['pressure'][0]):\r\n if height < base_height:\r\n return {'success': False}\r\n\r\n if height >= 85.0e3:\r\n return _thermosphere(height)\r\n\r\n atmospheric_params = default_atmospheric_params\r\n\r\n # find the section of the atmospheric table that applies\r\n base_heights = atmospheric_params['height']\r\n index = np.nonzero(np.array(base_heights) <= height)[0][-1]\r\n\r\n base_height = base_heights[index]\r\n base_pressure = atmospheric_params['pressure'][index]\r\n base_temp = atmospheric_params['temperature'][index]\r\n lapse_rate = atmospheric_params['lapse_rate'][index]\r\n\r\n temperature, pressure = get_temperature_pressure_at_height(\r\n height, lapse_rate, base_height, base_temp, base_pressure)\r\n\r\n density = pressure / (R_specific * temperature)\r\n speed_of_sound = np.sqrt(\r\n specific_heat_ratio * R_specific * temperature)\r\n\r\n dynamic_viscosity = calculate_dynamic_viscosity_air(temperature)\r\n\r\n output = {'success': True, 'temperature': temperature,\r\n 'pressure': pressure, 'density': density,\r\n 'speed_of_sound': speed_of_sound, 'dynamic_viscosity': dynamic_viscosity}\r\n return output\r\n\r\n\r\ndef calculate_dynamic_viscosity_air(temperature):\r\n\r\n reference_viscosity = 1.458e-6\r\n reference_temperature = 110.4\r\n\r\n return (reference_viscosity * temperature ** 1.5) / (temperature + reference_temperature)\r\n\r\n\r\ndef get_temperature_pressure_at_height(height, lapse_rate, base_height, base_temp, base_pressure):\r\n if lapse_rate == 0.0:\r\n temperature = base_temp\r\n pressure = base_pressure * \\\r\n np.exp(-g_0 * (height - base_height) /\r\n (R_specific * base_temp))\r\n else:\r\n temperature = base_temp + lapse_rate * (height - base_height)\r\n pressure = base_pressure * \\\r\n (base_temp / temperature)**(g_0 /\r\n (R_specific * lapse_rate))\r\n\r\n return temperature, pressure\r\n\r\n\r\ndef _thermosphere(height):\r\n base_heights = [85000, 91000, 110000, 120000]\r\n\r\n if height < base_heights[0]:\r\n print(\"Should not call this function for altitudes lower than 85 km! Using `atmosphere(height)'.\")\r\n return atmospheric_model(height)\r\n\r\n if height >= base_heights[3]:\r\n output = {'success': True, 'temperature': 0.0, 'pressure': 0.0,\r\n 'density': 0.0, 'speed_of_sound': 1.0e-6, 'dynamic_viscosity': 0.0}\r\n return output\r\n\r\n if height < base_heights[1]:\r\n temperature = 186.8673\r\n elif height < base_heights[2]:\r\n temperature = 263.1905 - 76.3232 * \\\r\n np.sqrt(1.0 - ((height - base_heights[1])/-19.9429e3)**2)\r\n else:\r\n temperature = 240.0 + 12.0e-3 * (height - base_heights[2])\r\n\r\n # pressure follows the same pattern is entire thermosphere\r\n height_in_km = height / 1.0e3\r\n pressure = np.exp(-0.0000000422012 * (height_in_km)**5 + 0.0000213489 * (height_in_km)**4 -\r\n 0.00426388 * (height_in_km)**3 + 0.421404 * (height_in_km)**2 - 20.8270 * (height_in_km) + 416.225)\r\n\r\n density = pressure / (R_specific * temperature)\r\n speed_of_sound = np.sqrt(\r\n specific_heat_ratio * R_specific * temperature)\r\n dynamic_viscosity = calculate_dynamic_viscosity_air(temperature)\r\n\r\n output = {'success': True, 'temperature': temperature,\r\n 'pressure': pressure, 'density': density,\r\n 'speed_of_sound': speed_of_sound, 'dynamic_viscosity': dynamic_viscosity}\r\n\r\n return output\r\n\r\n\r\ndef rot_x(theta):\r\n return np.array([[1., 0., 0.], [0., np.cos(theta), np.sin(theta)], [0., -np.sin(theta), np.cos(theta)]])\r\n\r\n\r\ndef rot_y(theta):\r\n return np.array([[np.cos(theta), 0., -np.sin(theta)], [0., 1., 0.], [np.sin(theta), 0., np.cos(theta)]])\r\n\r\n\r\ndef rot_z(theta):\r\n return np.array([[np.cos(theta), np.sin(theta), 0.], [-np.sin(theta), np.cos(theta), 0]], [0., 0., 1.])\r\n\r\n\r\nclass RocketSim:\r\n def __init__(self):\r\n self.time_final = 200 # seconds\r\n self.dt = 0.01\r\n self.no_of_points = int(np.floor(self.time_final/self.dt))\r\n self.time = np.linspace(0, self.time_final, self.no_of_points)\r\n\r\n # x, y, z, x_dot, y_dot, z_dot, m. This state is expressed in the NED frame.\r\n self.state = np.zeros((self.no_of_points, 7))\r\n self.theta = np.zeros((self.no_of_points, 1))\r\n self.thrust = np.zeros((self.no_of_points, 3))\r\n self.gravity = np.zeros((self.no_of_points, 3))\r\n self.drag = np.zeros((self.no_of_points, 3))\r\n self.alpha = np.zeros((self.no_of_points, 1))\r\n\r\n self.theta_start = 89 * np.pi/180\r\n self.theta_end = 0 * np.pi/180\r\n\r\n # Engine parameters\r\n self.Isp = 290 # seconds\r\n self.m_dot = 1 # kg/s\r\n self.no_of_engines = 1\r\n\r\n # Rocket aerodynamic properties\r\n self.cd = 0.5\r\n self.area = 1 # m^2 (cross sectional area)\r\n\r\n # Rocket mass properties\r\n self.dry_mass = 10 # kg\r\n self.fuel = 90 # kg\r\n self.state[0, 6] = self.dry_mass + self.fuel\r\n self.time_final_burn = self.fuel/self.m_dot\r\n\r\n def get_thrust_per_engine(self, m_dot):\r\n # to do with engine parameters\r\n return self.Isp * g_0 * m_dot\r\n\r\n def get_thrust_force(self, m_dot, C_B2N, switch_on):\r\n if switch_on:\r\n thrust = self.get_thrust_per_engine(m_dot)\r\n return np.matmul(C_B2N, np.array([self.no_of_engines * thrust, 0., 0.]))\r\n else:\r\n return np.array([0., 0., 0.])\r\n\r\n def get_drag(self, timestep):\r\n velocity = np.sqrt(self.state[timestep, 3]\r\n ** 2 + self.state[timestep, 5]**2)\r\n alt = -self.state[timestep, 2]\r\n atm_model = atmospheric_model(alt)\r\n\r\n drag = self.cd * self.area * atm_model['density'] * velocity**2\r\n return drag\r\n\r\n def get_drag_force(self, timestep, C_B2N, C_V2B, switch_on):\r\n if switch_on:\r\n drag = self.get_drag(timestep)\r\n return np.matmul(C_B2N, np.matmul(C_V2B, np.array([-drag, 0, 0])))\r\n else:\r\n return np.array([0., 0., 0.])\r\n\r\n def get_gravity(self, timestep):\r\n alt = -self.state[timestep, 2]\r\n return g_0 * (earth_mean_radius/(earth_mean_radius + alt))**2\r\n\r\n def get_gravity_force(self, timestep, switch_on):\r\n if switch_on:\r\n gravity = self.get_gravity(timestep)\r\n return np.array([0., 0., self.state[timestep, 6]*gravity])\r\n else:\r\n return np.array([0., 0., 0.])\r\n\r\n def get_C_N2B(self, theta):\r\n return rot_y(theta)\r\n\r\n def get_beta(self, timestep):\r\n return np.arctan2(-self.state[timestep, 5], self.state[timestep, 3])\r\n\r\n def get_theta(self, timestep):\r\n # Using linear tangent law\r\n cur_time = self.time[timestep]\r\n if cur_time <= self.time_final_burn:\r\n # print(np.tan(self.theta_start) - (np.tan(self.theta_start) -\r\n # np.tan(self.theta_end))/self.time_final_burn * cur_time)\r\n # print(cur_time, self.time_final_burn)\r\n\r\n return np.arctan2(np.tan(self.theta_start) - (np.tan(self.theta_start) - np.tan(self.theta_end))/self.time_final_burn * cur_time, 1)\r\n else:\r\n return self.theta_end\r\n\r\n def get_alpha(self, theta, timestep):\r\n beta = self.get_beta(timestep)\r\n return theta - beta\r\n\r\n def get_C_V2B(self, theta, timestep):\r\n alpha = self.get_alpha(theta, timestep)\r\n self.alpha[timestep] = alpha\r\n return rot_y(alpha)\r\n\r\n def propogate_state(self, timestep):\r\n self.theta[timestep] = self.get_theta(timestep)\r\n\r\n if self.state[timestep, 6] <= self.dry_mass:\r\n # Exhausted the fuel\r\n m_dot = 0.0\r\n else:\r\n m_dot = -self.m_dot\r\n\r\n # Testing\r\n # print(self.theta[timestep])\r\n C_B2N = self.get_C_N2B(self.theta[timestep, 0]).T\r\n\r\n C_V2B = self.get_C_V2B(self.theta[timestep], timestep)\r\n self.drag[timestep] = self.get_drag_force(\r\n timestep, C_B2N, C_V2B, True)\r\n # print(self.drag[timestep])\r\n self.gravity[timestep] = self.get_gravity_force(timestep, True)\r\n\r\n self.thrust[timestep] = self.get_thrust_force(-m_dot, C_B2N, True)\r\n\r\n acc = (self.drag[timestep] + self.gravity[timestep] + self.thrust[timestep]) / \\\r\n self.state[timestep, 6]\r\n\r\n return np.array([self.state[timestep, 3], self.state[timestep, 4], self.state[timestep, 5], acc[0], acc[1], acc[2], m_dot])\r\n\r\n def plot_figs(self):\r\n # X plotting\r\n plt.figure()\r\n\r\n plt.subplot(1, 2, 1)\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"North (m)\")\r\n plt.plot(self.time, self.state[:, 0])\r\n\r\n # Altitude plotting\r\n\r\n plt.subplot(1, 2, 2)\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Altitude (m)\")\r\n plt.plot(self.time, -self.state[:, 2])\r\n\r\n # X vs altitude\r\n plt.figure()\r\n plt.xlabel(\"X (m)\")\r\n plt.ylabel(\"Y (m)\")\r\n plt.plot(self.state[:, 0], -self.state[:, 2])\r\n\r\n # Theta plotting\r\n plt.figure()\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Theta (deg)\")\r\n plt.plot(self.time, self.theta*180/np.pi)\r\n\r\n # Vel north plotting\r\n plt.figure()\r\n plt.subplot(1, 2, 1)\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"North vel(m/s)\")\r\n plt.plot(self.time, self.state[:, 3])\r\n\r\n # Vel down plotting\r\n plt.subplot(1, 2, 2)\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"North down(m/s)\")\r\n plt.plot(self.time, self.state[:, 5])\r\n\r\n # Mass plotting\r\n plt.figure()\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Mass (kg)\")\r\n plt.plot(self.time, self.state[:, 6])\r\n\r\n # Thrust\r\n plt.figure()\r\n plt.subplot(1, 3, 1)\r\n plt.plot(self.time, self.thrust[:, 0])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Thrust x (N)\")\r\n\r\n plt.subplot(1, 3, 2)\r\n plt.plot(self.time, self.thrust[:, 1])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Thrust y (N)\")\r\n\r\n plt.subplot(1, 3, 3)\r\n plt.plot(self.time, self.thrust[:, 2])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Thrust z (N)\")\r\n\r\n # Gravity\r\n plt.figure()\r\n plt.subplot(1, 3, 1)\r\n plt.plot(self.time, self.gravity[:, 0])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Gravity x (N)\")\r\n\r\n plt.subplot(1, 3, 2)\r\n plt.plot(self.time, self.gravity[:, 1])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Gravity y (N)\")\r\n\r\n plt.subplot(1, 3, 3)\r\n plt.plot(self.time, self.gravity[:, 2])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Gravity z (N)\")\r\n\r\n # Drag\r\n plt.figure()\r\n plt.subplot(1, 3, 1)\r\n plt.plot(self.time, self.drag[:, 0])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Drag x (N)\")\r\n\r\n plt.subplot(1, 3, 2)\r\n plt.plot(self.time, self.drag[:, 1])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Drag y (N)\")\r\n\r\n plt.subplot(1, 3, 3)\r\n plt.plot(self.time, self.drag[:, 2])\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Drag z (N)\")\r\n\r\n # angle of attack\r\n plt.figure()\r\n plt.plot(self.time, self.alpha*180/np.pi)\r\n plt.xlabel('Time (s)')\r\n plt.ylabel(\"Angle of attack (deg)\")\r\n plt.show()\r\n\r\n def run_sim(self):\r\n timestep = 0\r\n while timestep < self.no_of_points-1:\r\n x_dot = self.propogate_state(timestep)\r\n # print(\"X dot\", x_dot)\r\n self.state[timestep+1] = self.state[timestep] + \\\r\n x_dot*self.dt\r\n # print(\"state\", self.state[timestep])\r\n timestep = timestep + 1\r\n\r\n self.plot_figs()\r\n\r\n\r\ndef main():\r\n rsim = RocketSim()\r\n rsim.run_sim()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"anirudhtorres/rocket_sim","sub_path":"rocket_sim.py","file_name":"rocket_sim.py","file_ext":"py","file_size_in_byte":13035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30757854717","text":"import unreal_engine as ue\nfrom unreal_engine import SWindow, SBox, SPythonComboBox, STextBlock, SBorder, SVerticalBox, SEditableTextBox, SHorizontalBox, SButton, SPythonListView\nfrom unreal_engine.enums import EHorizontalAlignment, EVerticalAlignment\nfrom unreal_engine import FLinearColor\nfrom unreal_engine.structs import SlateColor, Margin\nimport xmlrpc.client\nfrom unreal_engine.classes import ActorComponent\nfrom collections import Counter\nfrom unreal_engine.classes import Material\nfrom ast import literal_eval\n\n\nimport sys\n\nprint('version :', sys.version)\nworld = ue.get_editor_world()\n\n\ndef get_mbu_product():\n\tprint(\"get_mbu_product\")\n\tactors = world.all_actors()\n\t\n\t#print(\"\",[ a.get_actor_component_by_type('StaticMesh') for a in actors])\n\tmbu_actors = filter(lambda a : len(a.tags)>1 and a.tags[0]=='mbu', actors)\n\tprint(\"=====>\",mbu_actors)\n\treturn [a.get_property('ref') for a in mbu_actors]\n\n\nurl = 'http://pos.agiloc.org:9090'\ndb = '00mbu_test'\nusername = 'admin'\npassword = 'MBU@ao2017'\n \ncommon = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url))\nprint('version',common.version())\n\nuid = common.authenticate(db, username, password, {})\nmodels = xmlrpc.client.ServerProxy('{}/xmlrpc/2/object'.format(url))\n\n\ndef get_projects():\n\timported= False \n\tleads= models.execute_kw(\n\tdb, uid,password, 'crm.lead', 'search_read',\n\t[[['is_blender_lead','=',True]]], {'fields': ['id','name','create_date']})\n\treturn leads\n\n\nprojects= [str((p['id'],p['name'])) for p in get_projects()]\n\n\nclass DynamicComboBox:\n\n def __init__(self, items):\n self.box = SBox(height_override=100, min_desired_width=400)\n self.items = items\n self.build_combo_box()\n self.combo_box.set_selected_item(self.items[0])\n\n def get_widget(self):\n return self.box\n\n def generate_combo_box_widget(self, item):\n return STextBlock(text=item)\n\n def append(self, item, committed):\n if item and item not in self.items:\n self.items.append(item)\n self.build_combo_box()\n self.combo_box.set_selected_item(item)\n\n def get_current_item(self):\n return self.combo_box.get_selected_item()\n\n def build_combo_box(self):\n self.combo_box = SPythonComboBox(options_source=self.items, on_generate_widget=self.generate_combo_box_widget, content=STextBlock(text=self.get_current_item))\n self.box.set_content(self.combo_box)\n\n\nclass ListView:\n\n def __init__(self, items):\n self.box = SBox(height_override=100, min_desired_width=400)\n self.items = items\n self.build_list_view()\n self.combo_box.set_selected_item(self.items[0])\n\n def get_widget(self):\n return self.box\n\n def generate_list_view_widget(self, item):\n return STextBlock(text=item)\n\n def append(self, item, committed):\n if item and item not in self.items:\n self.items.append(item)\n self.build_combo_box()\n self.combo_box.set_selected_item(item)\n\n def get_current_item(self):\n return self.combo_box.get_selected_item()\n\n def build_list_view(self):\n self.list_view = SPythonListView(list_items=self.items, on_generate_widget=self.generate_list_view_widget)\n self.box.set_content(self.combo_box)\n\ndynamic_combo_box = DynamicComboBox(projects)\n\nlist_view = ListView(projects)\n\ndef update_prd_odoo():\n\tlead_id = literal_eval(dynamic_combo_box.get_current_item())\n\tprint ('dynamic_combo_box',lead_id[0])\n\tprint('products :', dict(Counter(get_mbu_product())))\n\todoo_context={'blender_select':{'name':'blabla','blender_file':'test','products':dict(Counter(get_mbu_product()))}}\n\tprint(odoo_context) \n\tleads= models.execute_kw(db, uid, password, 'crm.lead.blender.model', 'save_last_blender_model',[lead_id[0], odoo_context])\n\n\n\n# the final backslash is required for the 'pretty syntax'\nSWindow(client_size=(1024, 576), title='DynamicComboBox')\\\n(\n SBorder(color_and_opacity=FLinearColor(1, 1, 1, 1), border_background_color=SlateColor(SpecifiedColor=FLinearColor(1, 1, 1, 1)))\n (\n SBox(h_align=EHorizontalAlignment.HAlign_Center, v_align=EVerticalAlignment.VAlign_Center)\n (\n SBorder(color_and_opacity=FLinearColor(1, 1, 1, 1), border_background_color=SlateColor(SpecifiedColor=FLinearColor(1, 1, 1, 1)))\n (\n \t\t\t\tSVerticalBox()\n (\n dynamic_combo_box.get_widget()\n )\n(list_view.get_widget())\n (\n SHorizontalBox()(\n SButton(text='Upload', on_clicked=update_prd_odoo),\n fill_width=0.2\n )\n )\n )\n )\n )\n)\n","repo_name":"AyoubZahid/MyProject","sub_path":"Plugins/MBU_CATALOGUE/Content/Scripts/odoo.py","file_name":"odoo.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33353174926","text":"#!/usr/bin/python\nimport sys\nimport hashlib\n\nif (len(sys.argv) != 3):\n print(\"Wrong number of arguments. Please provide two input PDG files.\")\n print(\"Usage: ./comparator.py \")\n sys.exit(0)\n\nnodes1 = []\nnodes2 = []\n\ndef nonblank(f):\n for l in f:\n line = l.rstrip()\n if line:\n yield line\n\nclass pdgnode:\n def __init__(self, idval):\n self.idh = hashlib.md5(idval).hexdigest()\n self.name = idval\n self.alias_depend = []\n self.data_depend = []\n self.ctrl_depend = []\n\n def add_alias_depend(self, node):\n self.alias_depend.append(node)\n\n def add_ctrl_depend(self, node):\n self.ctrl_depend.append(node)\n\n def add_data_depend(self, node):\n self.data_depend.append(node)\n\n def __repr__(self, l=0):\n retstr = \"\"\n for i in range(l): retstr += \"\\t\"\n retstr += \"{ %s : %s\\n\" % (self.idh, self.name)\n for i in range(l): retstr += \"\\t\"\n retstr += \"alias:\\n\"\n for a in self.alias_depend:\n for i in range(l): retstr += \"\\t\"\n retstr += a.__repr__() + \"\\n\"\n for i in range(l): retstr += \"\\t\"\n retstr += \"data:\\n\"\n for d in self.data_depend:\n for i in range(l): retstr += \"\\t\"\n retstr += d.__repr__() + \"\\n\"\n for i in range(l): retstr += \"\\t\"\n retstr += \"ctrl:\\n\"\n for c in self.ctrl_depend:\n for i in range(l): retstr += \"\\t\"\n retstr += c.__repr__() + \"\\n\"\n for i in range(l): retstr += \"\\t\"\n retstr += \"}\"\n return retstr\n\nfname = sys.argv[1]\nfname2 = sys.argv[2]\ncur_node = 0\nstart = False\n\nwith open(fname) as infile:\n for line in nonblank(infile):\n if (line == \"[pdg] ====== PDG GRAPH COMPUTED ======\"):\n start = True\n\n if (start == True):\n elements = line.split()\n if (elements[0] == \"[Elem]\"):\n nodes1.append(pdgnode(elements[1]))\n cur_node = nodes1[-1]\n elif (elements[0][2] == 'a'):\n cur_node.add_alias_depend(elements[1])\n elif (elements[0][3] == 'c'):\n cur_node.add_ctrl_depend(elements[1])\n elif (elements[0][4] == 'd'):\n cur_node.add_data_depend(elements[1])\n else:\n continue\n\nstart = False\ncur_node = 0\n\nwith open(fname2) as infile:\n for line in nonblank(infile):\n if (line == \"[pdg] ====== PDG GRAPH COMPUTED ======\"):\n start = True\n\n if (start == True):\n elements = line.split()\n if (len(elements[0]) < 5):\n continue\n if (elements[0] == \"[Elem]\"):\n nodes2.append(pdgnode(elements[1]))\n cur_node = nodes2[-1]\n elif (elements[0][2] == 'a'):\n cur_node.add_alias_depend(elements[1])\n elif (elements[0][3] == 'c'):\n cur_node.add_ctrl_depend(elements[1])\n elif (elements[0][4] == 'd'):\n cur_node.add_data_depend(elements[1])\n else:\n continue\n\nfor node1 in nodes1:\n for node2 in nodes2:\n if (node1.name == node2.name and node1.alias_depend == node2.alias_depend and\n node1.ctrl_depend == node2.ctrl_depend and node1.data_depend == node2.data_depend):\n print(\"Match found!\\nNode 1: \")\n print(node1)\n print(\"\\nNode 2: \")\n print(node2)\n","repo_name":"mosqutip/EECS395-27","sub_path":"Matlack+Swiech_Code/comparator.py","file_name":"comparator.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"272071341","text":"import copy\nimport itertools\nimport unittest\n\nfrom ConfigSpace import Configuration\n\nimport numpy as np\n\nimport pandas as pd\n\nimport torch\n\nfrom autoPyTorch.constants import TASK_TYPES_TO_STRING, TIMESERIES_FORECASTING\nfrom autoPyTorch.pipeline.components.setup.network_backbone.forecasting_backbone import ForecastingNetworkChoice\nfrom autoPyTorch.pipeline.components.setup.network_backbone.forecasting_backbone.components_util import NetworkStructure\nfrom autoPyTorch.pipeline.components.setup.network_backbone.forecasting_backbone.forecasting_decoder.MLPDecoder import (\n ForecastingMLPDecoder\n)\nfrom autoPyTorch.pipeline.components.setup.network_backbone.forecasting_backbone.forecasting_decoder.components import (\n DecoderBlockInfo\n)\nfrom autoPyTorch.pipeline.components.setup.network_backbone.forecasting_backbone.forecasting_encoder.\\\n base_forecasting_encoder import BaseForecastingEncoder\nfrom autoPyTorch.pipeline.components.setup.network_backbone.forecasting_backbone.forecasting_encoder.components import (\n EncoderBlockInfo,\n EncoderNetwork\n)\nfrom autoPyTorch.pipeline.components.setup.network_head.forecasting_network_head.distribution import (\n ALL_DISTRIBUTIONS,\n DisForecastingStrategy\n)\nfrom autoPyTorch.pipeline.components.setup.network_head.forecasting_network_head.forecasting_head import ForecastingHead\nfrom autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdate\n\n\nclass DummyEmbedding(torch.nn.Module):\n def forward(self, x):\n if x.shape[-1] > 10:\n return x[..., :-10]\n return x\n\n\nclass DummyEncoderNetwork(EncoderNetwork):\n def forward(self, x, output_seq=False):\n if output_seq:\n return torch.ones((*x.shape[:-1], 10))\n return torch.ones((*x.shape[:-2], 1, 10))\n\n\nclass DummyForecastingEncoder(BaseForecastingEncoder):\n def n_encoder_output_feature(self):\n return 10\n\n def build_encoder(self, input_shape):\n return DummyEncoderNetwork()\n\n\nclass DummyTranformers():\n def __call__(self, x):\n return x[..., :(x.shape[-1] // 2)]\n\n\ndef generate_fit_dict_and_dataset_property():\n embedding = DummyEmbedding()\n\n transformation = [DummyTranformers()]\n n_prediction_steps = 3\n input_shape = (100, 50)\n output_shape = (n_prediction_steps, 1)\n time_feature_transform = [1, 2]\n\n feature_names = ('f1', 'f2', 'f3', 'f4', 'f5')\n feature_shapes = {'f1': 10, 'f2': 10, 'f3': 10, 'f4': 10, 'f5': 10}\n known_future_features = ('f1', 'f2', 'f3', 'f4', 'f5')\n\n dataset_properties = dict(input_shape=input_shape,\n output_shape=output_shape,\n transform_time_features=True,\n time_feature_transform=time_feature_transform,\n feature_shapes=feature_shapes,\n known_future_features=known_future_features,\n n_prediction_steps=n_prediction_steps,\n encoder_can_be_auto_regressive=True,\n feature_names=feature_names,\n is_small_preprocess=True,\n task_type=TASK_TYPES_TO_STRING[TIMESERIES_FORECASTING],\n uni_variant=False,\n future_feature_shapes=(n_prediction_steps, 50),\n )\n\n fit_dictionary = dict(X_train=pd.DataFrame(np.random.randn(*input_shape)),\n y_train=pd.DataFrame(np.random.randn(*output_shape)),\n network_embedding=embedding,\n preprocess_transforms=transformation,\n transform_time_features=True,\n window_size=5\n )\n\n return dataset_properties, fit_dictionary\n\n\nclass TestForecastingNetworkBases(unittest.TestCase):\n def setUp(self) -> None:\n self.dataset_properties, self.fit_dictionary = generate_fit_dict_and_dataset_property()\n\n self.encoder = DummyForecastingEncoder()\n\n mlp_cs = ForecastingMLPDecoder.get_hyperparameter_search_space(self.dataset_properties,\n can_be_auto_regressive=True)\n mlp_cfg_non_ar_w_local = mlp_cs.get_default_configuration()\n mlp_cfg_non_ar_wo_local = copy.copy(mlp_cfg_non_ar_w_local.get_dictionary())\n\n mlp_cfg_non_ar_wo_local['has_local_layer'] = False\n mlp_cfg_non_ar_wo_local.pop('units_local_layer')\n\n mlp_cfg_ar = copy.copy(mlp_cfg_non_ar_wo_local)\n mlp_cfg_ar.pop('has_local_layer')\n mlp_cfg_ar['auto_regressive'] = True\n\n mlp_cfg_non_ar_wo_local = Configuration(mlp_cs, values=mlp_cfg_non_ar_wo_local)\n mlp_cfg_ar = Configuration(mlp_cs, values=mlp_cfg_ar)\n\n self.decoder_ar = ForecastingMLPDecoder(**mlp_cfg_ar)\n self.decoder_w_local = ForecastingMLPDecoder(**mlp_cfg_non_ar_w_local)\n self.decoder_wo_local = ForecastingMLPDecoder(**mlp_cfg_non_ar_wo_local)\n\n self.decoders = {\"non_ar_w_local\": self.decoder_w_local,\n \"non_ar_wo_local\": self.decoder_wo_local,\n \"ar\": self.decoder_ar}\n\n def test_encoder_choices(self):\n dataset_properties = {'task_type': TASK_TYPES_TO_STRING[TIMESERIES_FORECASTING]}\n encoder_choices = ForecastingNetworkChoice(dataset_properties)\n cs = encoder_choices.get_hyperparameter_search_space(dataset_properties)\n self.assertListEqual(list(cs.get_hyperparameter('__choice__').choices), ['flat_encoder', 'seq_encoder'])\n\n cs_only_flat = encoder_choices.get_hyperparameter_search_space(dataset_properties, include=['flat_encoder'])\n for hp_name in cs_only_flat.get_hyperparameter_names():\n self.assertFalse(hp_name.startswith('seq_encoder'))\n\n cs_only_flat = encoder_choices.get_hyperparameter_search_space(dataset_properties, include=['flat_encoder'])\n for hp_name in cs_only_flat.get_hyperparameter_names():\n self.assertFalse(hp_name.startswith('seq_encoder'))\n\n cs_only_rnn = encoder_choices.get_hyperparameter_search_space(dataset_properties,\n include=['seq_encoder:RNNEncoder'])\n\n self.assertListEqual(list(cs_only_rnn.get_hyperparameter('__choice__').choices), ['seq_encoder'])\n self.assertListEqual(list(cs_only_rnn.get_hyperparameter('seq_encoder:block_1:__choice__').choices),\n ['RNNEncoder'])\n\n cs_no_rnn = encoder_choices.get_hyperparameter_search_space(dataset_properties,\n exclude=['seq_encoder:RNNEncoder'])\n for hp_name in cs_no_rnn.get_hyperparameter_names():\n self.assertFalse('RNNEncoder' in hp_name)\n\n sample = cs.sample_configuration()\n\n encoder_choices = encoder_choices.set_hyperparameters(sample)\n self.assertIsInstance(encoder_choices.choice.choice, BaseForecastingEncoder)\n\n encoder_choices = ForecastingNetworkChoice(dataset_properties)\n\n update_seq = HyperparameterSearchSpaceUpdate(node_name=\"network_backbone\",\n hyperparameter='__choice__',\n value_range=('seq_encoder',),\n default_value='seq_encoder', )\n\n encoder_choices._apply_search_space_update(update_seq)\n cs_seq = encoder_choices.get_hyperparameter_search_space(dataset_properties)\n self.assertListEqual(list(cs_seq.get_hyperparameter('__choice__').choices), ['seq_encoder'])\n\n encoder_choices = ForecastingNetworkChoice(dataset_properties)\n update_rnn_decoder_type = HyperparameterSearchSpaceUpdate(\n node_name=\"network_backbone\",\n hyperparameter='seq_encoder:block_1:RNNEncoder:decoder_type',\n value_range=('MLPDecoder',),\n default_value='MLPDecoder', )\n encoder_choices._apply_search_space_update(update_rnn_decoder_type)\n cs_seq = encoder_choices.get_hyperparameter_search_space(dataset_properties)\n hp_rnn_decoder_type = cs_seq.get_hyperparameter(update_rnn_decoder_type.hyperparameter)\n self.assertListEqual(list(hp_rnn_decoder_type.choices), ['MLPDecoder'])\n\n def test_base_encoder(self):\n window_size = self.fit_dictionary['window_size']\n all_settings = [(True, False)] * 4\n for hp_values in itertools.product(*all_settings):\n uni_variant = hp_values[0]\n variable_selection = hp_values[1]\n transform_time_features = hp_values[2]\n is_small_preprocess = hp_values[3]\n with self.subTest(uni_variant=uni_variant,\n variable_selection=variable_selection,\n transform_time_features=transform_time_features,\n is_small_preprocess=is_small_preprocess):\n network_structure = NetworkStructure(variable_selection=variable_selection)\n\n dataset_properties = copy.copy(self.dataset_properties)\n fit_dictionary = copy.copy(self.fit_dictionary)\n\n dataset_properties['is_small_preprocess'] = is_small_preprocess\n dataset_properties['uni_variant'] = uni_variant\n\n fit_dictionary['dataset_properties'] = self.dataset_properties\n fit_dictionary['network_structure'] = network_structure\n fit_dictionary['transform_time_features'] = transform_time_features\n fit_dictionary['dataset_properties'] = dataset_properties\n\n encoder_block_1 = copy.deepcopy(self.encoder)\n\n encoder_block_2 = copy.deepcopy(self.encoder)\n encoder_block_2.block_number = 2\n\n encoder_block_1 = encoder_block_1.fit(fit_dictionary)\n fit_dictionary = encoder_block_1.transform(fit_dictionary)\n network_encoder = fit_dictionary['network_encoder']\n self.assertIsInstance(network_encoder['block_1'], EncoderBlockInfo)\n self.assertEqual(network_encoder['block_1'].encoder_output_shape, (1, 10))\n\n if variable_selection:\n self.assertEqual(network_encoder['block_1'].encoder_input_shape, (window_size, 10))\n else:\n if uni_variant:\n n_input_features = 0\n else:\n if is_small_preprocess:\n n_input_features = 40\n else:\n n_input_features = 15\n\n if transform_time_features:\n n_input_features += len(dataset_properties['time_feature_transform'])\n\n n_input_features += dataset_properties['output_shape'][-1]\n self.assertEqual(network_encoder['block_1'].encoder_input_shape, (window_size,\n n_input_features))\n\n encoder_block_2 = encoder_block_2.fit(fit_dictionary)\n fit_dictionary = encoder_block_2.transform(fit_dictionary)\n\n network_encoder = fit_dictionary['network_encoder']\n self.assertIsInstance(network_encoder['block_2'], EncoderBlockInfo)\n self.assertEqual(network_encoder['block_2'].encoder_output_shape, (1, 10))\n self.assertEqual(network_encoder['block_2'].encoder_input_shape, (1, 10))\n\n def test_base_decoder(self):\n n_prediction_steps = self.dataset_properties['n_prediction_steps']\n for variable_selection in (True, False):\n with self.subTest(variable_selection=variable_selection):\n network_structure = NetworkStructure(variable_selection=variable_selection, num_blocks=2)\n dataset_properties = copy.copy(self.dataset_properties)\n fit_dictionary = copy.copy(self.fit_dictionary)\n\n fit_dictionary['network_structure'] = network_structure\n fit_dictionary['dataset_properties'] = dataset_properties\n\n encoder_block_1 = copy.deepcopy(self.encoder)\n encoder_block_2 = copy.deepcopy(self.encoder)\n encoder_block_2.block_number = 2\n\n encoder_block_1 = encoder_block_1.fit(fit_dictionary)\n fit_dictionary = encoder_block_1.transform(fit_dictionary)\n encoder_block_2 = encoder_block_2.fit(fit_dictionary)\n fit_dictionary = encoder_block_2.transform(fit_dictionary)\n\n decoder1 = copy.deepcopy(self.decoder_w_local)\n decoder1 = decoder1.fit(fit_dictionary)\n self.assertEqual(decoder1.n_prediction_heads, n_prediction_steps)\n fit_dictionary = decoder1.transform(fit_dictionary)\n\n network_decoder = fit_dictionary['network_decoder']\n self.assertIsInstance(network_decoder['block_1'], DecoderBlockInfo)\n if variable_selection:\n self.assertEqual(network_decoder['block_1'].decoder_input_shape,\n (n_prediction_steps, 10)) # Pure variable selection\n self.assertEqual(network_decoder['block_1'].decoder_output_shape,\n (n_prediction_steps, 26)) # 10 (input features) + 16 (n_output_dims)\n else:\n self.assertEqual(network_decoder['block_1'].decoder_input_shape,\n (n_prediction_steps, 52)) # 50 (input features) + 2 (time_transforms)\n self.assertEqual(network_decoder['block_1'].decoder_output_shape,\n (n_prediction_steps, 68)) # 52 (input features) + 16 (n_out_dims)\n\n for name, decoder in self.decoders.items():\n with self.subTest(decoder_name=name):\n fit_dictionary_ = copy.deepcopy(fit_dictionary)\n decoder2 = copy.deepcopy(decoder)\n decoder2.block_number = 2\n decoder2 = decoder2.fit(fit_dictionary_)\n fit_dictionary_ = decoder2.transform(fit_dictionary_)\n self.assertTrue(decoder2.is_last_decoder)\n if name == 'ar':\n self.assertEqual(fit_dictionary_['n_prediction_heads'], 1)\n else:\n self.assertEqual(fit_dictionary_['n_prediction_heads'], n_prediction_steps)\n n_prediction_heads = fit_dictionary_['n_prediction_heads']\n\n network_decoder = fit_dictionary_['network_decoder']['block_2']\n self.assertIsInstance(network_decoder, DecoderBlockInfo)\n if variable_selection:\n self.assertEqual(network_decoder.decoder_input_shape, (n_prediction_heads, 26))\n\n if name == 'non_ar_w_local':\n # 26+16\n self.assertEqual(network_decoder.decoder_output_shape, (n_prediction_heads, 42))\n elif name == 'non_ar_wo_local':\n # num_global\n self.assertEqual(network_decoder.decoder_output_shape, (n_prediction_heads, 32))\n elif name == 'ar':\n self.assertEqual(network_decoder.decoder_output_shape, (n_prediction_heads, 32)) # 32\n else:\n self.assertEqual(network_decoder.decoder_input_shape, (n_prediction_heads, 68))\n\n if name == 'non_ar_w_local':\n # 26+16\n self.assertEqual(network_decoder.decoder_output_shape, (n_prediction_heads, 84))\n elif name == 'non_ar_wo_local':\n # num_global\n self.assertEqual(network_decoder.decoder_output_shape, (n_prediction_heads, 32))\n elif name == 'ar':\n self.assertEqual(network_decoder.decoder_output_shape, (n_prediction_heads, 32)) # 32\n\n def test_forecasting_heads(self):\n variable_selection = False\n n_prediction_steps = self.dataset_properties[\"n_prediction_steps\"]\n\n network_structure = NetworkStructure(variable_selection=variable_selection, num_blocks=1)\n\n dataset_properties = copy.copy(self.dataset_properties)\n fit_dictionary = copy.copy(self.fit_dictionary)\n\n input_tensor = torch.randn([10, 20, 3 + fit_dictionary['X_train'].shape[-1]])\n input_tensor_future = torch.randn([10, n_prediction_steps, 2 + fit_dictionary['X_train'].shape[-1]])\n\n network_embedding = self.fit_dictionary['network_embedding']\n input_tensor = network_embedding(input_tensor)\n\n fit_dictionary['dataset_properties'] = self.dataset_properties\n fit_dictionary['network_structure'] = network_structure\n fit_dictionary['transform_time_features'] = True\n fit_dictionary['dataset_properties'] = dataset_properties\n encoder = copy.deepcopy(self.encoder)\n encoder = encoder.fit(fit_dictionary)\n fit_dictionary = encoder.transform(fit_dictionary)\n\n quantiles = [0.5, 0.1, 0.9]\n for name, decoder in self.decoders.items():\n with self.subTest(decoder_name=name):\n fit_dictionary_ = copy.deepcopy(fit_dictionary)\n decoder = decoder.fit(fit_dictionary_)\n fit_dictionary_ = decoder.transform(fit_dictionary_)\n\n for net_output_type in ['regression', 'distribution', 'quantile']:\n def eval_heads_output(fit_dict):\n head = ForecastingHead()\n head = head.fit(fit_dict)\n fit_dictionary_copy = head.transform(fit_dict)\n\n encoder = fit_dictionary_copy['network_encoder']['block_1'].encoder\n decoder = fit_dictionary_copy['network_decoder']['block_1'].decoder\n\n head = fit_dictionary_copy['network_head']\n output = head(decoder(input_tensor_future, encoder(input_tensor, output_seq=False)))\n if name != \"ar\":\n if net_output_type == 'regression':\n self.assertListEqual(list(output.shape), [10, n_prediction_steps, 1])\n elif net_output_type == 'distribution':\n self.assertListEqual(list(output.sample().shape), [10, n_prediction_steps, 1])\n elif net_output_type == 'quantile':\n self.assertEqual(len(output), len(quantiles))\n for output_quantile in output:\n self.assertListEqual(list(output_quantile.shape), [10, n_prediction_steps, 1])\n else:\n if net_output_type == 'regression':\n self.assertListEqual(list(output.shape), [10, 1, 1])\n elif net_output_type == 'distribution':\n self.assertListEqual(list(output.sample().shape), [10, 1, 1])\n elif net_output_type == 'quantile':\n self.assertEqual(len(output), len(quantiles))\n for output_quantile in output:\n self.assertListEqual(list(output_quantile.shape), [10, 1, 1])\n with self.subTest(net_output_type=net_output_type):\n fit_dictionary_copy = copy.deepcopy(fit_dictionary_)\n fit_dictionary_copy['net_output_type'] = net_output_type\n\n if net_output_type == 'distribution':\n for dist in ALL_DISTRIBUTIONS.keys():\n fit_dictionary_copy['dist_forecasting_strategy'] = DisForecastingStrategy(dist_cls=dist)\n eval_heads_output(fit_dictionary_copy)\n elif net_output_type == 'quantile':\n fit_dictionary_copy['quantile_values'] = quantiles\n eval_heads_output(fit_dictionary_copy)\n else:\n eval_heads_output(fit_dictionary_copy)\n","repo_name":"automl/Auto-PyTorch","sub_path":"test/test_pipeline/components/setup/forecasting/forecasting_networks/test_base_components.py","file_name":"test_base_components.py","file_ext":"py","file_size_in_byte":20825,"program_lang":"python","lang":"en","doc_type":"code","stars":2173,"dataset":"github-code","pt":"78"} +{"seq_id":"74612509371","text":"# scan from left to right, find the first i that s[i] < s[i-1]\n# reduce s[i-1] by 1 and then change all the numbers after to 9\n# watch out for N containing consective duplicates, such as N=14423 (res = 13999)\nclass Solution(object):\n def monotoneIncreasingDigits(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n s = str(N)\n j = -1 # don't forget\n for i in range(1, len(s)):\n if s[i] < s[i-1]:\n j = i-1\n while j-1 >= 0 and s[j] == s[j-1]:\n j -= 1\n break\n \n if j == -1: # when the number N itself is good\n return N\n \n res = s[:j] + str(int(s[j])-1) + \"9\"*(len(s)-j-1)\n return int(res)\n \n","repo_name":"CaizhiXu/LeetCode-Solutions-Python-Weimin","sub_path":"0738. Monotone Increasing Digits.py","file_name":"0738. Monotone Increasing Digits.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74450748092","text":"import pandas as pd\nimport numpy as np\nimport os\nimport anndata\nimport sys\nimport scanpy as sc\nfrom scvi.dataset import GeneExpressionDataset\n\nWORKING_DIR = \"/data/leslie/bplee/scBatch_project\"\n# adding the project dir to the path to import relevant modules below\nif WORKING_DIR not in sys.path:\n print(\"________CHANGING PATH_________\")\n sys.path.append(WORKING_DIR)\n print(\"\\tWorking dir appended to Sys path.\")\nfrom Step0_Data.code.starter import *\nfrom scBatch.dataprep import set_adata_train_test_batches\nfrom scBatch.main import *\n\ndata_dir = \"../data/\"\nTIM_DATA_FILEPATH = \"/data/leslie/bplee/scBatch_project/pan_cancer_TIMs/quickload_data/TIMs_all_data.h5ad\"\n\n\ndef read_dataset(name):\n data = pd.read_csv(name+\"_normalized_expression.csv\")\n counts = data.drop(['index'], axis=1)\n metadata = pd.read_csv(name+\"_metadata.csv\")\n adata = anndata.AnnData(X=counts, obs=metadata)\n return adata\n\n\ndef get_valid_datasets(dir_path):\n files = os.listdir(dir_path)\n lst = []\n rtn = []\n for f in files:\n if suffix(f, suf='_metadata.csv'):\n lst.append(suffix(f, suf='_metadata.csv'))\n if suffix(f, suf=\"_normalized_expression.csv\"):\n lst.append(suffix(f, suf=\"_normalized_expression.csv\"))\n while lst:\n curr = lst.pop()\n if os.path.exists(dir_path+curr+\"_metadata.csv\") and os.path.exists(dir_path+curr+\"_metadata.csv\"):\n rtn.append(curr)\n lst.remove(curr)\n return rtn\n\n\ndef suffix(word, suf):\n if word[-len(suf):] == suf:\n return word[:-len(suf)]\n return False\n\n\ndef load_data(data_dir):\n rtn = []\n datasets = get_valid_datasets(data_dir)\n print(f\" found {len(datasets)} datasets in {data_dir}\")\n for name in get_valid_datasets(data_dir):\n rtn.append(read_dataset(os.path.join(data_dir, name)))\n print(f\" added {name} dataset\")\n return rtn\n\n\ndef quick_load(filepath=TIM_DATA_FILEPATH):\n return anndata.read_h5ad(filepath)\n\n\ndef identify_singleton_labels(label_count_table):\n # temp = get_label_counts(adata.obs, label_name, domain_name)\n temp = label_count_table > 0\n # these are cell types that are only present in one patient\n cell_type_cancer_prevalence = temp.sum(axis=1)[0]\n return list(cell_type_cancer_prevalence[cell_type_cancer_prevalence == 1].index)\n\n\ndef remove_prefixes(cell_names):\n return cell_names.map(lambda x: x[4:])\n\n\ndef filter_cancers(adata, cancers_types_to_remove=[\"L\", \"OV\", \"PACA\", \"MM\", \"LYM\"]):\n bool_inds = ~adata.obs.cancer.isin(cancers_types_to_remove)\n print(f'removing {sum(~bool_inds)} cells')\n return adata[bool_inds, :]\n\n\ndef filter_cell_types(adata, cell_types_to_remove):\n bool_inds = ~adata.obs.MajorCluster.isin(cell_types_to_remove)\n print(f'removing {sum(~bool_inds)} cells')\n return adata[bool_inds, :]\n\n\nif __name__ == \"__main__\":\n adata = quick_load(TIM_DATA_FILEPATH)\n print(f\" loaded all TIM data into anndata obj named: `adata`\")\n print(\" Removing prefixes\")\n adata.obs.MajorCluster = remove_prefixes(adata.obs.MajorCluster)\n\n adata = filter_cancers(adata)\n label_counts = get_label_counts(adata.obs, \"MajorCluster\", \"cancer\")\n cell_types_to_remove = identify_singleton_labels(label_counts)\n adata = filter_cell_types(adata, cell_types_to_remove)\n\n sc.pp.normalize_total(adata, 1e5)\n\n gene_ds = GeneExpressionDataset()\n batches = adata.obs.patient\n gene_ds.populate_from_data(X=adata.X,\n gene_names=np.array(adata.var.index),\n batch_indices=pd.factorize(batches)[0],\n remap_attributes=False)\n gene_ds.subsample_genes(784)\n\n adata = adata[:, gene_ds.gene_names]\n # batches are going to be built off of adata.obs.subtype\n adata = set_adata_train_test_batches(adata,\n test=0,\n train=None,\n domain_name=\"cancer\")\n\n adata.obs['cell_type'] = adata.obs['MajorCluster'].copy()\n del adata.obs['MajorCluster']\n\n adata.obs['domain'] = adata.obs['cancer'].copy()\n\n # obj = DIVAObject()\n # obj.args.epochs=10\n # obj.fit(adata, '210624_test')","repo_name":"bplee/scBatch_project","sub_path":"pan_cancer_TIMs/code/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1684337409","text":"import json\nimport boto3\nimport time\n\ndb = boto3.client('dynamodb')\n\n\ndef lambda_handler(event, context):\n MailID = event['queryStringParameters']['MailID']\n\n OTPFromUser = event['queryStringParameters']['OTPFromUser']\n # OTPFromUser = int(OTPFromUser)\n\n response = db.query(\n TableName='OTP',\n KeyConditionExpression='MailID = :MailID',\n ExpressionAttributeValues={\n ':MailID': {'S': MailID}\n }, ScanIndexForward=False, Limit=1\n )\n\n if response['Count'] == 0:\n return \"No such OTP was generated\"\n else:\n latestSortedOTP = response['Items'][0]['otp']['N']\n ExpiryTime = response['Items'][0]['ExpiryTime']['N']\n if int(response['Items'][0]['ExpiryTime']['N'])= range_list[0] and value <= range_list[1]:\n return True\n else:\n return False\n \n def get_lock_status(self):\n \"\"\"\n get control lock status \n 1: remote control is prohibited\n 0: remote control is allowed\n \"\"\"\n lock_status = self.redis.get(\"remote_lock\")\n return lock_status\n \n \n def __set_order_json(self, order_json):\n \"\"\"\n set control data in redis\n \"\"\"\n order_json = json.dumps(order_json)\n return self.redis.lpush(\"order_queue\", order_json)\n \n def __parse_data_to_order(self, data): \n \"\"\"\n parse request json data to redis order\n \"\"\"\n redis_json = {\n \"type\": self.__get_type(data['channel']),\n \"address\": self.__get_address(data['channel']),\n \"value\": self.__set_value(data)\n }\n return redis_json\n\n @staticmethod\n def __set_value(data):\n \"\"\"\n value must be a int\n \"\"\"\n float_list = [\"TestTemperature\", \"MaintainTemperature\", \"AgeingTemperature\",]\n \n if data['channel'] in float_list:\n value = int(data['value']*10)\n else:\n value = int(data['value'])\n \n return value\n\n @staticmethod\n def __get_type(channel_name):\n \"\"\"\n get redis json type name\n \"\"\"\n\n holding_registers_names = [\"TestTemperature\", \"MaintainTemperature\", \"AgeingTemperature\",\n \"ExhaustTime\", \"AgeingTime\", \"CoolingTime\", \"TestTime\",]\n\n if channel_name in holding_registers_names:\n return \"holding_registers\"\n else:\n return None\n \n @staticmethod\n def __get_address(channel_name):\n \"\"\"\n get redis json address\n \"\"\"\n address_dicts = {\n \"TestTemperature\": 0x11a4,\n \"MaintainTemperature\": 0x11a5,\n \"AgeingTemperature\": 0x11a6,\n \"ExhaustTime\": 0x119a,\n \"AgeingTime\": 0x119c,\n \"CoolingTime\": 0x119e,\n \"TestTime\": 0x11a0,\n } \n\n return address_dicts.get(channel_name, None)\n \n\n \n\n ","repo_name":"Mabo-IoT/fuxuan_control_server","sub_path":"fuxuan/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37108167312","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 9 10:22:35 2019\n\n@author: Mayank Jain\n\"\"\"\n\n\"\"\"\nCode Challenge\n Name: \n weeks\n Filename: \n weeks.py\n Problem Statement:\n Write a program that adds missing days to existing tuple of days\n Input: \n ('Monday', 'Wednesday', 'Thursday', 'Saturday')\n Output:\n ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')\n\"\"\"\ntuple1 = ('Monday', 'Wednesday', 'Thursday', 'Saturday')\ntuple2 = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')\n\nlist1 = list(tuple1)\nlist2 = list(tuple2)\n\n\nfor item in list2:\n if item not in list1:\n list1.insert(list2.index(item),item)\n\nprint(list1)\n\ntuple3 = tuple(list1)\nprint(tuple3)\n\n\n","repo_name":"electrovirus8/FSDP2019","sub_path":"Day_03/Day_3_code_challange_solution/weeks.py","file_name":"weeks.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2124292290","text":"#!/usr/bin/python3\n\n# Define a function that checks if a string is a palindrome. \n# Example: \"anna\" is a palindrome but \"car\" is not\n\n# To solve this one, we will use the reverse function\n# We import it\nfrom reverse_function import reverse\n\ndef is_palindrome(string):\n string_reversed = reverse(string)\n return (string == string_reversed)\n\nif __name__ == '__main__':\n print(is_palindrome(\"anna\")) # True\n print(is_palindrome(\"car\")) # False","repo_name":"FernandezEnrique/Exercises-Python","sub_path":"06-Functions/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31806564742","text":"from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('songs.views',\n url(r'^$', 'index'),\n url(r'^new-artist-form$', 'new_artist_form'),\n url(r'^new-artist-submit$', 'new_artist_submit'),\n url(r'^new-song-form$', 'new_song_form'),\n url(r'^new-song-submit$', 'new_song_submit'),\n url(r'^song/(?P\\d+)/$', 'song_detail'),\n)\n\n","repo_name":"wintergalt/tutorial","sub_path":"songs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"29210128314","text":"\"\"\"Implements SeismicIndex class that allows for iteration over gathers in a survey or a group of surveys\"\"\"\n\nimport os\nimport warnings\nfrom functools import wraps, reduce\nfrom textwrap import indent, dedent\n\nimport numpy as np\nimport pandas as pd\n\nfrom .survey import Survey\nfrom .containers import GatherContainer\nfrom .utils import to_list, maybe_copy\nfrom ..batchflow import DatasetIndex\n\n\nclass IndexPart(GatherContainer):\n \"\"\"A class that represents a part of `SeismicIndex` which contains trace headers of several surveys being merged\n together.\"\"\"\n\n def __init__(self):\n self._headers = None\n self._indexer = None\n self.common_headers = set()\n self.surveys_dict = {}\n\n def __getitem__(self, key):\n \"\"\"Select values of headers by their names.\"\"\"\n if isinstance(key, tuple):\n key = [key]\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n \"\"\"Set given values to selected headers.\"\"\"\n if isinstance(key, tuple):\n key = [key]\n return super().__setitem__(key, value)\n\n @property\n def survey_names(self):\n \"\"\"list of str: names of surveys in the index part.\"\"\"\n return sorted(self.surveys_dict.keys())\n\n @classmethod\n def from_attributes(cls, headers, surveys_dict, common_headers, copy_headers=False):\n \"\"\"Create a new index part from its attributes.\"\"\"\n part = cls()\n part.headers = headers.copy(copy_headers)\n part.common_headers = common_headers\n part.surveys_dict = surveys_dict\n return part\n\n @classmethod\n def from_survey(cls, survey, copy_headers=False):\n \"\"\"Construct an index part from a single survey.\"\"\"\n if not isinstance(survey, Survey):\n raise ValueError(\"survey must be an instance of Survey\")\n\n headers = survey.headers.copy(copy_headers)\n common_headers = set(headers.columns)\n headers.columns = pd.MultiIndex.from_product([[survey.name], headers.columns])\n\n part = cls()\n part._headers = headers # Avoid calling headers setter since the indexer is already calculated\n part._indexer = survey._indexer # pylint: disable=protected-access\n part.common_headers = common_headers\n part.surveys_dict = {survey.name: survey}\n return part\n\n @staticmethod\n def _filter_equal(headers, header_cols):\n \"\"\"Keep only those rows of `headers` where values of given headers are equal in all surveys.\"\"\"\n if not header_cols:\n return headers\n drop_mask = np.column_stack([np.ptp(headers.loc[:, (slice(None), col)], axis=1).astype(np.bool_)\n for col in header_cols])\n return headers.loc[~np.any(drop_mask, axis=1)]\n\n def merge(self, other, on=None, validate_unique=True, copy_headers=False):\n \"\"\"Create a new `IndexPart` by merging trace headers of `self` and `other` on given common headers.\"\"\"\n self_indexed_by = set(to_list(self.indexed_by))\n other_indexed_by = set(to_list(other.indexed_by))\n if self_indexed_by != other_indexed_by:\n raise ValueError(\"All parts must be indexed by the same headers\")\n if set(self.survey_names) & set(other.survey_names):\n raise ValueError(\"Only surveys with unique names can be merged\")\n\n possibly_common_headers = self.common_headers & other.common_headers\n if on is None:\n on = possibly_common_headers - {\"TRACE_SEQUENCE_FILE\"}\n left_df = self.headers\n right_df = other.headers\n else:\n on = set(to_list(on)) - self_indexed_by\n # Filter both self and other by equal values of on\n left_df = self._filter_equal(self.headers, on - self.common_headers)\n right_df = self._filter_equal(other.headers, on - other.common_headers)\n headers_to_check = possibly_common_headers - on\n\n merge_on = sorted(on)\n left_survey_name = self.survey_names[0]\n right_survey_name = other.survey_names[0]\n left_on = to_list(self.indexed_by) + [(left_survey_name, header) for header in merge_on]\n right_on = to_list(other.indexed_by) + [(right_survey_name, header) for header in merge_on]\n\n validate = \"1:1\" if validate_unique else \"m:m\"\n headers = pd.merge(left_df, right_df, how=\"inner\", left_on=left_on, right_on=right_on, copy=copy_headers,\n sort=False, validate=validate)\n\n # Recalculate common headers in the merged DataFrame\n common_headers = on | {header for header in headers_to_check\n if headers[left_survey_name, header].equals(headers[right_survey_name, header])}\n return self.from_attributes(headers, {**self.surveys_dict, **other.surveys_dict}, common_headers)\n\n def create_subset(self, indices):\n \"\"\"Return a new `IndexPart` based on a subset of its indices given.\"\"\"\n subset_headers = self.get_headers_by_indices(indices)\n return self.from_attributes(subset_headers, self.surveys_dict, self.common_headers)\n\n def copy(self, ignore=None):\n \"\"\"Perform a deepcopy of all part attributes except for `surveys_dict`, `_indexer` and those specified in\n `ignore`, which are kept unchanged.\"\"\"\n ignore = set() if ignore is None else set(to_list(ignore))\n return super().copy(ignore | {\"surveys_dict\"})\n\n @wraps(GatherContainer.reindex)\n def reindex(self, new_index, inplace=False):\n old_index = to_list(self.indexed_by)\n new_index = to_list(new_index)\n new_index_diff = set(new_index) - set(old_index)\n old_index_diff = set(old_index) - set(new_index)\n if new_index_diff - self.common_headers:\n raise ValueError(\"IndexPart can be reindexed only with common headers\")\n\n self = maybe_copy(self, inplace) # pylint: disable=self-cls-assignment\n new_diff_list = list(new_index_diff)\n self.headers[new_diff_list] = self.headers[((self.survey_names[0], new_ix) for new_ix in new_diff_list)]\n super().reindex(new_index, inplace=True)\n\n # Copy old index to each survey\n for sur in self.survey_names:\n for old_ix in old_index_diff:\n self.headers[(sur, old_ix)] = self.headers[(old_ix, \"\")]\n\n # Drop unwanted headers\n cols_to_drop = ([(sur, new_ix) for sur in self.survey_names for new_ix in new_index_diff] +\n [(old_ix, \"\") for old_ix in old_index_diff])\n self.headers.drop(columns=cols_to_drop, inplace=True)\n\n self.common_headers = (self.common_headers - new_index_diff) | old_index_diff\n return self\n\n @wraps(GatherContainer.filter)\n def filter(self, cond, cols, axis=None, unpack_args=False, inplace=False, **kwargs):\n cols = to_list(cols)\n survey_names = self.survey_names\n indexed_by = set(to_list(self.indexed_by))\n if (set(cols) - indexed_by) <= self.common_headers:\n # Filter only one survey since all of them share values of `cols` headers\n survey_names = [survey_names[0]]\n\n self = maybe_copy(self, inplace) # pylint: disable=self-cls-assignment\n for sur in survey_names:\n sur_cols = [col if col in indexed_by else (sur, col) for col in cols]\n super().filter(cond, cols=sur_cols, axis=axis, unpack_args=unpack_args, inplace=True, **kwargs)\n return self\n\n @wraps(GatherContainer.apply)\n def apply(self, func, cols, res_cols=None, axis=None, unpack_args=False, inplace=False, **kwargs):\n cols = to_list(cols)\n res_cols = cols if res_cols is None else to_list(res_cols)\n\n survey_names = self.survey_names\n indexed_by = set(to_list(self.indexed_by))\n if (set(cols) - indexed_by) <= self.common_headers:\n # Apply func only to one survey since all of them share values of `cols` headers\n survey_names = [survey_names[0]]\n\n self = maybe_copy(self, inplace) # pylint: disable=self-cls-assignment\n for sur in survey_names:\n sur_cols = [col if col in indexed_by else (sur, col) for col in cols]\n sur_res_cols = [(sur, col) for col in res_cols]\n super().apply(func, cols=sur_cols, res_cols=sur_res_cols, axis=axis, unpack_args=unpack_args, inplace=True,\n **kwargs)\n\n # Duplicate results for all surveys if func was applied only to the first one\n if len(survey_names) == 1:\n for sur in self.survey_names[1:]:\n self[[(sur, col) for col in res_cols]] = self[[(survey_names[0], col) for col in res_cols]]\n self.common_headers |= set(res_cols)\n return self\n\n\ndef delegate_to_parts(*methods):\n \"\"\"Implement given `methods` of `SeismicIndex` by calling the corresponding method of its parts. In addition to all\n the arguments of the method of a part each created method accepts `recursive` flag which defines whether to process\n `train`, `test` and `validation` subsets of the index in the same manner if they exist.\"\"\"\n def decorator(cls):\n for method in methods:\n def method_fn(self, *args, method=method, recursive=True, inplace=False, **kwargs):\n self = maybe_copy(self, inplace) # pylint: disable=self-cls-assignment\n for part in self.parts:\n getattr(part, method)(*args, inplace=True, **kwargs)\n # Explicitly reset iter since index parts were modified\n self.reset(\"iter\")\n\n if recursive:\n for split in self.splits.values():\n getattr(split, method)(*args, recursive=True, inplace=True, **kwargs)\n return self\n setattr(cls, method, method_fn)\n return cls\n return decorator\n\n\n@delegate_to_parts(\"reindex\", \"filter\", \"apply\")\nclass SeismicIndex(DatasetIndex):\n \"\"\"A class that enumerates gathers in a survey or a group of surveys and allows iterating over them.\n\n While `Survey` describes a single SEG-Y file, `SeismicIndex` is primarily used to describe survey concatenation\n (e.g. when several fields are being processed in the same way one after another) or merging (e.g. when traces from\n the same field before and after a given processing stage must be matched and compared).\n\n `SeismicIndex` consists of parts - instances of `IndexPart` class stored in `parts` attribute. Parts act as an\n additional SEG-Y file identifier after concatenation since different surveys may have non-unique `indices` making\n it impossible to recover a source survey for a given gather by its index. Each part in turn represents several\n surveys being merged together. It contains the following main attributes:\n - `indices` - unique identifiers of gathers in the part,\n - `headers` - merged trace headers from underlying surveys,\n - `surveys_dict` - a mapping from a survey name to the survey itself to further load traces.\n\n Thus a gather in a `SeismicIndex` is identified by values of its `header_index`, part and survey name. It can be\n obtained by calling :func:`~SeismicIndex.get_gather`. Iteration over gathers in the index is generally performed\n via :func:`~SeismicIndex.next_batch`.\n\n A complete algorithm of index instantiation looks as follows:\n 1. Independently transform each argument to `SeismicIndex`:\n - instance of `SeismicIndex` is kept as is,\n - `Survey` is transformed to a single part. Its `headers` replicate survey `headers` except for a new level\n added to `DataFrame` columns with the name of the survey. This is done to avoid headers collisions during\n subsequent merges.\n In both cases input `headers` can optionally be copied.\n 2. If a single argument was processed on the previous step, an index is already created.\n 3. Otherwise combine parts of created indices depending on the `mode` provided:\n - \"c\" or \"concat\": Parts of the resulting index is simply a concatenation of all input parts with preserved\n order. All parts must contain surveys with same `name`s.\n - \"m\" or \"merge\": Parts with same ordinal numbers are combined together by merging their `headers`. The number\n of parts in all inputs must match and all the underlying surveys must have different `name`s.\n In both cases all parts must be indexed by the same trace headers.\n\n Examples\n --------\n Let's consider 4 surveys describing a single field before and after processing. Note that all of them have the same\n `header_index`:\n >>> s1_before = Survey(path, header_index=index_headers, name=\"before\")\n >>> s2_before = Survey(path, header_index=index_headers, name=\"before\")\n\n >>> s1_after = Survey(path, header_index=index_headers, name=\"after\")\n >>> s2_after = Survey(path, header_index=index_headers, name=\"after\")\n\n An index can be created from a single survey in the following way:\n >>> index = SeismicIndex(s1_before)\n\n If `s1_before` and `s2_before` represent different parts of the same field, they can be concatenated into one index\n to iterate over the whole field and process it at once. Both surveys must have the same `name`:\n >>> index = SeismicIndex(s1_before, s2_before, mode=\"c\")\n\n Gathers before and after given processing stage can be matched using merge operation. Both surveys must have\n different `name`s:\n >>> index = SeismicIndex(s1_before, s1_after, mode=\"m\")\n\n Merge can follow concat and vice versa. A more complex case, covering both operations is demonstrated below:\n >>> index_before = SeismicIndex(s1_before, s2_before, mode=\"c\")\n >>> index_after = SeismicIndex(s1_after, s2_after, mode=\"c\")\n >>> index = SeismicIndex(index_before, index_after, mode=\"m\")\n\n Parameters\n ----------\n args : tuple of Survey, IndexPart or SeismicIndex\n A sequence of surveys, indices or parts to construct an index.\n mode : {\"c\", \"concat\", \"m\", \"merge\", None}, optional, defaults to None\n A mode used to combine multiple `args` into a single index. If `None`, only one positional argument can be\n passed.\n copy_headers : bool, optional, defaults to False\n Whether to copy `DataFrame`s of trace headers while constructing index parts.\n kwargs : misc, optional\n Additional keyword arguments to :func:`~SeismicIndex.merge` if the corresponding mode was chosen.\n\n Attributes\n ----------\n parts : tuple of IndexPart\n Parts of the constructed index.\n \"\"\"\n def __init__(self, *args, mode=None, copy_headers=False, **kwargs): # pylint: disable=super-init-not-called\n self.parts = tuple()\n self.train = None\n self.test = None\n self.validation = None\n\n if args:\n index = self.build_index(*args, mode=mode, copy_headers=copy_headers, **kwargs)\n self.__dict__ = index.__dict__\n elif kwargs:\n raise ValueError(\"No kwargs must be passed if an empty index is being created\")\n\n self._iter_params = None\n self.reset(\"iter\")\n\n @property\n def index(self):\n \"\"\"tuple of pd.Index: Unique identifiers of seismic gathers in each part of the index.\"\"\"\n return tuple(part.indices for part in self.parts)\n\n @property\n def n_parts(self):\n \"\"\"int: The number of parts in the index.\"\"\"\n return len(self.parts)\n\n @property\n def n_gathers_by_part(self):\n \"\"\"int: The number of gathers in each part of the index.\"\"\"\n return [part.n_gathers for part in self.parts]\n\n @property\n def n_gathers(self):\n \"\"\"int: The number of gathers in the index.\"\"\"\n return sum(self.n_gathers_by_part)\n\n @property\n def n_traces_by_part(self):\n \"\"\"int: The number of traces in each part of the index.\"\"\"\n return [part.n_traces for part in self.parts]\n\n @property\n def n_traces(self):\n \"\"\"int: The number of traces in the index.\"\"\"\n return sum(self.n_traces_by_part)\n\n @property\n def indexed_by(self):\n \"\"\"str or list of str or None: Names of header indices of each part. `None` for empty index.\"\"\"\n if self.is_empty:\n return None\n return self.parts[0].indexed_by\n\n @property\n def survey_names(self):\n \"\"\"list of str or None: Names of surveys in the index. `None` for empty index.\"\"\"\n if self.is_empty:\n return None\n return self.parts[0].survey_names\n\n @property\n def is_empty(self):\n \"\"\"bool: Whether the index is empty.\"\"\"\n return self.n_parts == 0\n\n @property\n def splits(self):\n \"\"\"dict: A mapping from a name of non-empty train/test/validation split to its `SeismicIndex`.\"\"\"\n return {split_name: getattr(self, split_name) for split_name in (\"train\", \"test\", \"validation\")\n if getattr(self, split_name) is not None}\n\n def __len__(self):\n \"\"\"The number of gathers in the index.\"\"\"\n return self.n_gathers\n\n def get_index_info(self, index_path=\"index\", indent_size=0, split_delimiter=\"\"):\n \"\"\"Recursively fetch index description string from the index itself and all the nested subindices.\"\"\"\n if self.is_empty:\n return \"Empty index\"\n\n info_df = pd.DataFrame({\"Gathers\": self.n_gathers_by_part, \"Traces\": self.n_traces_by_part},\n index=pd.RangeIndex(self.n_parts, name=\"Part\"))\n for sur in self.survey_names:\n info_df[f\"Survey {sur}\"] = [os.path.basename(part.surveys_dict[sur].path) for part in self.parts]\n\n msg = f\"\"\"\n {index_path} info:\n\n Indexed by: {\", \".join(to_list(self.indexed_by))}\n Number of gathers: {self.n_gathers}\n Number of traces: {self.n_traces}\n Is split: {self.is_split}\n\n Index parts info:\n \"\"\"\n msg = indent(dedent(msg) + info_df.to_string() + \"\\n\", \" \" * indent_size)\n\n # Recursively fetch info about index splits\n for split_name, split in self.splits.items():\n msg += split_delimiter + \"\\n\" + split.get_index_info(f\"{index_path}.{split_name}\", indent_size+4,\n split_delimiter=split_delimiter)\n return msg\n\n def __str__(self):\n \"\"\"Print index metadata including information about its parts and underlying surveys.\"\"\"\n delimiter_placeholder = \"{delimiter}\"\n msg = self.get_index_info(split_delimiter=delimiter_placeholder)\n for i, part in enumerate(self.parts):\n for sur in part.survey_names:\n msg += delimiter_placeholder + f\"\\n\\nPart {i}, Survey {sur}\\n\\n\" + str(part.surveys_dict[sur]) + \"\\n\"\n delimiter = \"_\" * max(len(line) for line in msg.splitlines())\n return msg.strip().format(delimiter=delimiter)\n\n def info(self):\n \"\"\"Print index metadata including information about its parts and underlying surveys.\"\"\"\n print(self)\n\n #------------------------------------------------------------------------#\n # Index creation methods #\n #------------------------------------------------------------------------#\n\n @classmethod\n def build_index(cls, *args, mode=None, copy_headers=False, **kwargs):\n \"\"\"Build an index from `args` as described in :class:`~SeismicIndex` docs.\"\"\"\n # Create an empty index if no args are given\n if not args:\n return cls(**kwargs)\n\n # Select an appropriate builder by passed mode\n if mode is None and len(args) > 1:\n raise ValueError(\"mode must be specified if multiple positional arguments are given\")\n builders_dict = {\n None: cls.from_index,\n \"m\": cls.merge,\n \"merge\": cls.merge,\n \"c\": cls.concat,\n \"concat\": cls.concat,\n }\n if mode not in builders_dict:\n raise ValueError(f\"Unknown mode {mode}\")\n\n # Convert all args to SeismicIndex and combine them into a single index\n indices = cls._args_to_indices(*args)\n return builders_dict[mode](*indices, copy_headers=copy_headers, **kwargs)\n\n @classmethod\n def _args_to_indices(cls, *args):\n \"\"\"Independently convert each positional argument to a `SeismicIndex`.\"\"\"\n indices = []\n for arg in args:\n if isinstance(arg, Survey):\n builder = cls.from_survey\n elif isinstance(arg, IndexPart):\n builder = cls.from_parts\n elif isinstance(arg, SeismicIndex):\n builder = cls.from_index\n else:\n raise ValueError(f\"Unsupported type {type(arg)} to convert to index\")\n indices.append(builder(arg, copy_headers=False))\n return indices\n\n @classmethod\n def from_parts(cls, *parts, copy_headers=False):\n \"\"\"Construct an index from its parts.\n\n Parameters\n ----------\n parts : tuple of IndexPart\n Index parts to convert to an index.\n copy_headers : bool, optional, defaults to False\n Whether to copy `headers` of parts.\n\n Returns\n -------\n index : SeismicIndex\n Constructed index.\n \"\"\"\n if not parts:\n return cls()\n\n if not all(isinstance(part, IndexPart) for part in parts):\n raise ValueError(\"All parts must be instances of IndexPart\")\n\n survey_names = parts[0].survey_names\n if any(survey_names != part.survey_names for part in parts[1:]):\n raise ValueError(\"Only parts with the same survey names can be concatenated into one index\")\n\n indexed_by = parts[0].indexed_by\n if any(indexed_by != part.indexed_by for part in parts[1:]):\n raise ValueError(\"All parts must be indexed by the same columns\")\n\n if copy_headers:\n parts = tuple(part.copy() for part in parts)\n\n index = cls()\n index.parts = parts\n index.reset(\"iter\")\n return index\n\n @classmethod\n def from_survey(cls, survey, copy_headers=False):\n \"\"\"Construct an index from a single survey.\n\n Parameters\n ----------\n survey : Survey\n A survey used to build an index.\n copy_headers : bool, optional, defaults to False\n Whether to copy survey `headers`.\n\n Returns\n -------\n index : SeismicIndex\n Constructed index.\n \"\"\"\n return cls.from_parts(IndexPart.from_survey(survey, copy_headers=copy_headers))\n\n @classmethod\n def from_index(cls, index, copy_headers=False):\n \"\"\"Construct an index from an already created `SeismicIndex`. Leaves it unchanged if `copy_headers` is `False`,\n returns a copy otherwise.\n\n Parameters\n ----------\n index : SeismicIndex\n Input index.\n copy_headers : bool, optional, defaults to False\n Whether to copy the index.\n\n Returns\n -------\n index : SeismicIndex\n Constructed index.\n \"\"\"\n if not isinstance(index, SeismicIndex):\n raise ValueError(\"index must be an instance of SeismicIndex\")\n if copy_headers:\n return index.copy()\n return index\n\n @classmethod\n def concat(cls, *args, copy_headers=False):\n \"\"\"Concatenate `args` into a single index.\n\n Each positional argument must be an instance of `Survey`, `IndexPart` or `SeismicIndex`. All of them must be\n indexed by the same headers. Underlying surveys of different arguments must have same `name`s.\n\n Notes\n -----\n A detailed description of index concatenation can be found in :class:`~SeismicIndex` docs.\n\n Parameters\n ----------\n args : tuple of Survey, IndexPart or SeismicIndex\n Inputs to be concatenated.\n copy_headers : bool, optional, defaults to False\n Whether to copy `headers` of `args`.\n\n Returns\n -------\n index : SeismicIndex\n Concatenated index.\n \"\"\"\n indices = cls._args_to_indices(*args)\n parts = sum([ix.parts for ix in indices], tuple())\n return cls.from_parts(*parts, copy_headers=copy_headers)\n\n @classmethod\n def merge(cls, *args, on=None, validate_unique=True, copy_headers=False):\n \"\"\"Merge `args` into a single index.\n\n Each positional argument must be an instance of `Survey`, `IndexPart` or `SeismicIndex`. All of them must be\n indexed by the same headers. Underlying surveys of different arguments must have different `name`s.\n\n Notes\n -----\n A detailed description of index merging can be found in :class:`~SeismicIndex` docs.\n\n Parameters\n ----------\n args : tuple of Survey, IndexPart or SeismicIndex\n Inputs to be merged.\n on : str or list of str, optional\n Headers to be used as join keys. If not given, all common headers are used except for `TRACE_SEQUENCE_FILE`\n unless it is used to index `args`.\n validate_unique : bool, optional, defaults to True\n Check if merge keys are unique in all input `args`.\n copy_headers : bool, optional, defaults to False\n Whether to copy `headers` of `args`.\n\n Returns\n -------\n index : SeismicIndex\n Merged index.\n \"\"\"\n indices = cls._args_to_indices(*args)\n if len({ix.n_parts for ix in indices}) != 1:\n raise ValueError(\"All indices being merged must have the same number of parts\")\n indices_parts = [ix.parts for ix in indices]\n merged_parts = [reduce(lambda x, y: x.merge(y, on, validate_unique, copy_headers), parts)\n for parts in zip(*indices_parts)]\n\n # Warn if the whole index or some of its parts are empty\n empty_parts = [i for i, part in enumerate(merged_parts) if not part]\n if len(empty_parts) == len(merged_parts):\n warnings.warn(\"Empty index after merge\", RuntimeWarning)\n elif empty_parts:\n warnings.warn(f\"Empty parts {empty_parts} after merge\", RuntimeWarning)\n\n return cls.from_parts(*merged_parts, copy_headers=False)\n\n #------------------------------------------------------------------------#\n # DatasetIndex interface implementation #\n #------------------------------------------------------------------------#\n\n def index_by_pos(self, pos):\n \"\"\"Return gather index and part by its position in the index.\n\n Parameters\n ----------\n pos : int\n Ordinal number of the gather in the index.\n\n Returns\n -------\n index : int or tuple\n Gather index.\n part : int\n Index part to get the gather from.\n \"\"\"\n part_pos_borders = np.cumsum([0] + self.n_gathers_by_part)\n part = np.searchsorted(part_pos_borders[1:], pos, side=\"right\")\n return self.indices[part][pos - part_pos_borders[part]], part\n\n def subset_by_pos(self, pos):\n \"\"\"Return a subset of gather indices by their positions in the index.\n\n Parameters\n ----------\n pos : int or array-like of int\n Ordinal numbers of gathers in the index.\n\n Returns\n -------\n indices : list of pd.Index\n Gather indices of the subset by each index part.\n \"\"\"\n pos = np.sort(np.atleast_1d(pos))\n part_pos_borders = np.cumsum([0] + self.n_gathers_by_part)\n pos_by_part = np.split(pos, np.searchsorted(pos, part_pos_borders[1:]))\n part_indices = [part_pos - part_start for part_pos, part_start in zip(pos_by_part, part_pos_borders[:-1])]\n return tuple(index[subset] for index, subset in zip(self.index, part_indices))\n\n def create_subset(self, index):\n \"\"\"Return a new index object based on a subset of its indices given.\n\n Parameters\n ----------\n index : SeismicIndex or tuple of pd.Index\n Gather indices of the subset to create a new `SeismicIndex` object for. If `tuple` of `pd.Index`, each item\n defines gather indices of the corresponding part in `self`.\n\n Returns\n -------\n subset : SeismicIndex\n A subset of the index.\n \"\"\"\n if isinstance(index, SeismicIndex):\n index = index.index\n if len(index) != self.n_parts:\n raise ValueError(\"Index length must match the number of parts\")\n return self.from_parts(*[part.create_subset(ix) for part, ix in zip(self.parts, index)], copy_headers=False)\n\n #------------------------------------------------------------------------#\n # Statistics computation methods #\n #------------------------------------------------------------------------#\n\n def collect_stats(self, n_quantile_traces=100000, quantile_precision=2, limits=None, bar=True):\n \"\"\"Collect the following trace data statistics for each survey in the index or a dataset:\n 1. Min and max amplitude,\n 2. Mean amplitude and trace standard deviation,\n 3. Approximation of trace data quantiles with given precision.\n\n Since fair quantile calculation requires simultaneous loading of all traces from the file we avoid such memory\n overhead by calculating approximate quantiles for a small subset of `n_quantile_traces` traces selected\n randomly. Only a set of quantiles defined by `quantile_precision` is calculated, the rest of them are linearly\n interpolated by the collected ones.\n\n After the method is executed all calculated values can be obtained via corresponding attributes of the surveys\n in the index and their `has_stats` flag is set to `True`.\n\n Examples\n --------\n Statistics calculation for the whole index can be done as follows:\n >>> survey = Survey(path, header_index=\"FieldRecord\", header_cols=[\"TraceNumber\", \"offset\"], name=\"survey\")\n >>> index = SeismicIndex(survey).collect_stats()\n\n Statistics can be calculated for a dataset as well:\n >>> dataset = SeismicDataset(index).collect_stats()\n\n After a train-test split is performed, `train` and `test` refer to the very same `Survey` instances. This\n allows for `collect_stats` to be used to calculate statistics for the training set and then use them to\n normalize gathers from the testing set to avoid data leakage during machine learning model training:\n >>> dataset.split()\n >>> dataset.train.collect_stats()\n >>> dataset.test.next_batch(1).load(src=\"survey\").scale_standard(src=\"survey\", use_global=True)\n\n Note that if no gathers from a particular survey were included in the training set its stats won't be\n collected!\n\n Parameters\n ----------\n n_quantile_traces : positive int, optional, defaults to 100000\n The number of traces to use for quantiles estimation.\n quantile_precision : positive int, optional, defaults to 2\n Calculate an approximate quantile for each q with `quantile_precision` decimal places. All other quantiles\n will be linearly interpolated on request.\n limits : int or tuple or slice, optional\n Time limits to be used for statistics calculation. `int` or `tuple` are used as arguments to init a `slice`\n object. If not given, `limits` passed to `Survey.__init__` are used. Measured in samples.\n bar : bool, optional, defaults to True\n Whether to show a progress bar.\n\n Returns\n -------\n self : same type as self\n An index or a dataset with collected stats. Sets `has_stats` flag to `True` and updates statistics\n attributes inplace for each of the underlying surveys.\n \"\"\"\n for part in self.parts:\n for sur in part.surveys_dict.values():\n sur.collect_stats(indices=part.indices, n_quantile_traces=n_quantile_traces,\n quantile_precision=quantile_precision, limits=limits, bar=bar)\n return self\n\n #------------------------------------------------------------------------#\n # Loading methods #\n #------------------------------------------------------------------------#\n\n def get_gather(self, index, part=None, survey_name=None, limits=None, copy_headers=False):\n \"\"\"Load a gather with given `index`.\n\n Parameters\n ----------\n index : int or 1d array-like\n An index of the gather to load. Must be one of `self.indices`.\n part : int\n Index part to get the gather from. May be omitted if index concatenation was not performed.\n survey_name : str or list of str\n Survey name to get the gather from. If several names are given, a list of gathers from corresponding\n surveys is returned. May be omitted if index merging was not performed.\n limits : int or tuple or slice or None, optional\n Time range for trace loading. `int` or `tuple` are used as arguments to init a `slice` object. If not\n given, `limits` passed to the corresponding `Survey.__init__` are used. Measured in samples.\n copy_headers : bool, optional, defaults to False\n Whether to copy the subset of index `headers` describing the gather.\n\n Returns\n -------\n gather : Gather or list of Gather\n Loaded gather instance. List of gathers is returned if several survey names was passed.\n \"\"\"\n if part is None and self.n_parts > 1:\n raise ValueError(\"part must be specified if the index is constructed by concatenation\")\n if part is None:\n part = 0\n index_part = self.parts[part]\n\n if survey_name is None and len(self.survey_names) > 1:\n raise ValueError(\"survey_name must be specified if the index is constructed by merging\")\n if survey_name is None:\n survey_name = self.survey_names[0]\n\n is_single_survey = isinstance(survey_name, str)\n survey_names = to_list(survey_name)\n surveys = [index_part.surveys_dict[name] for name in survey_names]\n\n index_headers = index_part.get_headers_by_indices((index,))\n empty_headers = index_headers[[]] # Handle the case when no headers were loaded for a survey\n gather_headers = [index_headers.get(name, empty_headers) for name in survey_names]\n\n gathers = [survey.load_gather(headers=headers, limits=limits, copy_headers=copy_headers)\n for survey, headers in zip(surveys, gather_headers)]\n if is_single_survey:\n return gathers[0]\n return gathers\n\n def sample_gather(self, part=None, survey_name=None, limits=None, copy_headers=False):\n \"\"\"Load a random gather from the index.\n\n Parameters\n ----------\n part : int\n Index part to sample the gather from. Chosen randomly if not given.\n survey_name : str\n Survey name to sample the gather from. If several names are given, a list of gathers from corresponding\n surveys is returned. Chosen randomly if not given.\n limits : int or tuple or slice or None, optional\n Time range for trace loading. `int` or `tuple` are used as arguments to init a `slice` object. If not\n given, `limits` passed to the corresponding `Survey.__init__` are used. Measured in samples.\n copy_headers : bool, optional, defaults to False\n Whether to copy the subset of index `headers` describing the gather.\n\n Returns\n -------\n gather : Gather or list of Gather\n Loaded gather instance. List of gathers is returned if several survey names was passed.\n \"\"\"\n if part is None:\n part_weights = np.array(self.n_gathers_by_part) / self.n_gathers\n part = np.random.choice(self.n_parts, p=part_weights)\n if survey_name is None:\n survey_name = np.random.choice(self.survey_names)\n index = np.random.choice(self.parts[part].indices)\n return self.get_gather(index, part, survey_name, limits=limits, copy_headers=copy_headers)\n\n #------------------------------------------------------------------------#\n # Index manipulation methods #\n #------------------------------------------------------------------------#\n\n def copy(self, ignore=None):\n \"\"\"Perform a deepcopy of the index by copying its parts. All attributes of each part are deepcopied except for\n indexer, underlying surveys and those specified in `ignore`, which are kept unchanged.\n\n Parameters\n ----------\n ignore : str or array of str, defaults to None\n Part attributes that won't be copied.\n\n Returns\n -------\n copy : SeismicIndex\n Copy of the index.\n \"\"\"\n parts_copy = [part.copy(ignore=ignore) for part in self.parts]\n self_copy = self.from_parts(*parts_copy, copy_headers=False)\n for split_name, split in self.splits.items():\n setattr(self_copy, split_name, split.copy())\n return self_copy\n","repo_name":"71unxv/SeismicPro","sub_path":"seismicpro/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":37568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"32162217707","text":"from random import choice\nfrom random import sample\n\nimport emoji\nfrom faker import Faker\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom flask import jsonify\n\napp = Flask(__name__)\n\nclass RandomNumber:\n @property\n def value(self):\n return choice(range(0, 100000))\n\nclass RandomText:\n def __init__(self):\n self.faker = Faker()\n\n @property\n def value(self):\n return choice(\n [\n self.faker.catch_phrase(),\n \"\".join(self.faker.paragraphs()),\n ]\n )\n\nclass RandomPhoto:\n @property\n def value(self):\n pixels = [100, 150, 200, 250, 300, 350, 400]\n pixelw = choice(pixels)\n pixell = choice(pixels)\n return f\"http://lorempixel.com/{pixelw}/{pixell}/animals/\",\n\nclass RandomEmoji:\n @property\n def value(self):\n emojis = list(emoji.EMOJI_ALIAS_UNICODE_ENGLISH.values())\n number = choice([1,3,5,7,51])\n return \"\".join(sample(emojis, number))\n\nclass IPMsg:\n def __init__(self, ip_address):\n self.ip_address = ip_address\n\n @property\n def value(self):\n return f\"Hello person! Your ip address is {self.ip_address}.\"\n\nclass RandomResponse:\n def __init__(self):\n self.number = RandomNumber()\n self.text = RandomText()\n self.photo = RandomPhoto()\n self.emoji = RandomEmoji()\n self.randoms = [self.number, self.text, self.photo, self.emoji]\n \n def get_random_thing(self):\n return choice(self.randoms).value \n \nresponse_generator = RandomResponse()\n\n@app.route(\"/api/random\", methods=[\"POST\"])\ndef get_random_thing():\n response_generator.randoms.append(IPMsg(request.remote_addr))\n random_value = response_generator.get_random_thing()\n response_generator.randoms.pop(len(response_generator.randoms) - 1)\n if \"lorempixel\" in str(random_value):\n return jsonify({\"randomphoto\": random_value})\n return jsonify({\"random\": random_value})\n\n@app.route(\"/\")\ndef homepage():\n return render_template(\"homepage.html\")\n\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=5000)\n","repo_name":"ecedmondson/randomizer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24811795466","text":"class KalmanFilterXYZ:\n def __init__(self,Q=3.0, R=10.0):\n self.kx=KalmanFilter(Q, R)\n self.ky=KalmanFilter(Q, R)\n self.kz=KalmanFilter(Q, R)\n def set_x(self,x):\n return self.kx.set_val(x)\n def set_y(self, y):\n return self.ky.set_val(y)\n def set_z(self, z):\n return self.kz.set_val(z)\nclass KalmanFilter:\n def __init__(self, Q=3.0, R=10.0):\n self.p_last = 0\n self.ProcessNiose_Q = Q\n self.MeasureNoise_R=R\n self.val_last = None\n def set_val(self, val):\n if self.val_last is None:\n self.val_last=val\n # 卡尔曼滤波 基于下一时刻位置不变模型\n p_mid = self.p_last + self.ProcessNiose_Q\n # 预测本次误差\n kg = p_mid / (p_mid + self.MeasureNoise_R)\n # 更新本次卡尔曼增益\n data_now = self.val_last + kg * (val - self.val_last)\n # 根据本次观测值预测本次输出\n p_now = (1 - kg) * p_mid\n # 更新误差\n self.p_last = p_now\n self.val_last = data_now\n return data_now\n\n\ndef Filter_Kalman(data_now, data_last, p_last, ProcessNiose_Q, MeasureNoise_R):\n \"\"\"\n\n :param data_now: 数值 x/y/z\n :param data_last: 上一次的数值\n :param p_last:\n :param ProcessNiose_Q: 3\n :param MeasureNoise_R: 10\n :return:\n \"\"\"\n # 卡尔曼滤波 基于下一时刻位置不变模型\n result = [[], []]\n p_mid = p_last + ProcessNiose_Q\n # 预测本次误差\n kg = p_mid / (p_mid + MeasureNoise_R)\n # 更新本次卡尔曼增益\n data_now = data_last + kg * (data_now - data_last)\n # 根据本次观测值预测本次输出\n p_now = (1 - kg) * p_mid\n # 更新误差\n p_last = p_now\n result[0] = data_now\n result[1] = p_last\n return result\n\ndef test_kf1():\n x=[1,2,1,2,1]\n p_last = 0\n x_dn=[]\n last_val = 1.0\n for i in range(len(x)):\n ret= Filter_Kalman(x[i],last_val, p_last, 3, 10)\n p_last = ret[1]\n print('ret:',i, ret[0])\n last_val = ret[0]\n x_dn.append(ret[0])\n\n assert x_dn[0]==1.0\n assert x_dn[1]==1.3467336683417086\n assert x_dn[2]==1.2105584375953615\n assert x_dn[3]==1.5336301851417857\n assert x_dn[4]==1.3122030348062983\n\n\ndef test_kf2():\n x=[1,2,1,2,1]\n kf2 = KalmanFilter()\n x_dn=[]\n for i in range(len(x)):\n curr_val = kf2.set_val(x[i])\n print('ret:',curr_val)\n x_dn.append(curr_val)\n\n assert x_dn[0]==1.0\n assert x_dn[1]==1.3467336683417086\n assert x_dn[2]==1.2105584375953615\n assert x_dn[3]==1.5336301851417857\n assert x_dn[4]==1.3122030348062983\n\nif __name__=='__main__':\n test_kf1()\n test_kf2()\n\n\n","repo_name":"Jiangshan00001/pyuwb","sub_path":"pyuwb/KalmanFilter.py","file_name":"KalmanFilter.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"20356953581","text":"import sys\nsys.path.append('../') \n\nfrom modules.utils import count_occurrences\n\nclass well_formed_mx:\n def id_inputs_in_nodes(self):\n '''\n output : boolean\n returns true if the inputs list is in the keys list of the nodes \n '''\n return all(item in self.get_node_ids() for item in self.inputs)\n\n def id_outputs_in_nodes(self):\n '''\n output : boolean\n returns true if the outputs list is in the keys list of the nodes \n '''\n return all(item in self.get_node_ids() for item in self.outputs)\n\n def id_nodes_in_keys(self):\n '''\n return True if the nodes ids are in the keys of the dictionnary\n '''\n for i in range(len(self.nodes)):\n if self.get_nodes()[i].get_id() != self.get_node_ids()[i]:\n return False\n return True \n\n def valide_edges_graph(self):\n '''\n output : boolean\n returns true if j appears n times in the children of index node i \n then i must appear n times in the parents of index node j \n '''\n dict_node = self.get_id_node_map()\n '''\n scrolls through the list of node keys\n watches for each node the first value of his children, then the second\n one to j \n '''\n for i in range(len(self.get_node_ids())):\n for j in range(len(self.nodes[i].get_children_ids())):\n\n '''\n gets the list of children of node i\n '''\n list_children_node_i = dict_node[i].get_children_ids()\n '''\n gets the j-th value from the list of children of node i \n '''\n occurence_j = list_children_node_i[j]\n '''\n gets the list of parents of node j\n '''\n list_parents_node_j = dict_node[occurence_j].get_parent_ids()\n '''\n gets the if of node i\n '''\n occurence_id_node_i = dict_node[i].get_id()\n\n '''\n return false if the number of occurrences of j in the list of children\n of node i is not equal to the number of occurrences of i in the list of\n parents of node j\n '''\n if not(count_occurrences(list_children_node_i,occurence_j) == \n count_occurrences(list_parents_node_j,occurence_id_node_i)):\n return False;\n\n '''\n same thing but with the parents\n '''\n for i in range(len(self.get_node_ids())):\n for j in range(len(self.nodes[i].get_parent_ids())):\n\n list_parents_node_i = dict_node[i].get_parent_ids()\n occurence_j_bis = list_parents_node_i[j]\n list_children_node_j = dict_node[occurence_j_bis].get_children_ids()\n occurence_id_node_i_bis = dict_node[i].get_id()\n\n '''\n returns false if the number of occurrences of j in the parents list of \n node i is not equal to the number of occurrences of i in the children \n list of the node j\n '''\n if not(count_occurrences(list_parents_node_i,occurence_j_bis) ==\n count_occurrences(list_children_node_j,occurence_id_node_i_bis)):\n return False;\n return True;\n\n def is_well_formed(self):\n '''\n output : boolean\n verifies that a graph is always well formed meaning that the functions\n id_inputs_in_nodes(), id_outputs_in_nodes() and valide_edges_graph()\n returns true\n '''\n return (self.id_inputs_in_nodes() and \n self.id_outputs_in_nodes() and \n self.id_nodes_in_keys() and\n self.valide_edges_graph())","repo_name":"alicepetiot/LDD2-Projet-Graphes","sub_path":"modules/open_digraph_mx/well_formed_mx.py","file_name":"well_formed_mx.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1721187788","text":"import json\nimport logging\nimport requests\nimport argparse\nimport external\nimport messaging\nfrom app import get_app\nfrom pathlib import Path\nfrom datanode.bootstrap import bootstrap\n\n\nlogging.basicConfig(\n filename=\"std.log\",\n format=\"%(name)s - %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\n\n\ndef register_to_metadata_server(url, host, port, logger):\n logger.info(f\"Registrating datanode {host}:{port} with metadata server at {url}.\")\n try:\n res = requests.post(f\"{url}/datanodes\", data={\"host\": host, \"port\": port})\n except requests.exceptions.ConnectionError:\n exit(f\"Could not establish connection with metadata server at {url}\")\n\n msg = f\"Registration with metadata server at {url} successful.\"\n if res.status_code == 400:\n msg = \"Datanode already registered.\"\n logger.info(msg)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Process some integers.\")\n\n parser.add_argument(\"--host\", default=\"127.0.0.1\")\n parser.add_argument(\"port\", type=int)\n parser.add_argument(\"dir\")\n\n return parser\n\n\ndef get_config():\n with open(\"conf.json\", \"r\") as f:\n config = json.load(f)\n parser = get_parser()\n args = parser.parse_args()\n config[\"host\"] = args.host\n config[\"port\"] = args.port\n config[\"basedir\"] = str(Path(__file__).resolve().parent)\n config[\"blocks_save_location\"] = args.dir\n return config\n\n\ndef start_consumers(bus):\n for exchange, handlers in external.HANDLERS.items():\n for hndlr in handlers:\n callback = messaging.consumer_factory(hndlr, bus)\n messaging.register(exchange, callback)\n\n\ndef start_webapp(bus, config, host, port, logger):\n meta_host = config[\"meta\"][\"host\"]\n meta_port = config[\"meta\"][\"port\"]\n\n address = f\"{meta_host}:{meta_port}\"\n meta_url = f\"http://{address}/dfs\"\n\n register_to_metadata_server(meta_url, host, port, logger)\n\n app = get_app(bus)\n app.run(host=host, port=port, server=\"paste\", debug=False)\n\n\nif __name__ == \"__main__\":\n config = get_config()\n bus = bootstrap(config)\n\n host = config.get(\"host\")\n port = config.get(\"port\")\n\n logger = logging.getLogger(__name__)\n\n start_consumers(bus)\n start_webapp(bus, config, host, port, logger)\n\n logger.info(f\"SHUTTING DOWN DATANODE SERVER {host}:{port}\")\n","repo_name":"enrique-rodriguez/dfs-datanode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15582948842","text":"import logging\n\nimport click\n\nimport google_oauth2\nimport google_photos\n\n\n@click.group()\n@click.option(\n '--client',\n type=str,\n default='./_test/client_id.json',\n help='Input path for client id json.'\n)\n@click.option(\n '--credential',\n type=str,\n default='./_test/credentials.json',\n help='Input output path for credential json.'\n)\n@click.pass_context\ndef cli(ctx, client: str, credential: str):\n \"\"\" Google Photos Libraryを利用するコマンド\n \"\"\"\n ctx.obj['client'] = client\n ctx.obj['credential'] = credential\n\n\ndef create_logger() -> logging.Logger:\n \"\"\" ログ出力のための設定\n \"\"\"\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter(\n \"%(asctime)s %(levelname)s %(name)s: %(message)s\"))\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n logger.propagate = False\n\n return logger\n\n\n@click.command()\n@click.argument('path')\n@click.option(\n '--album',\n type=str,\n help=''\n)\n@click.option(\n '--id',\n is_flag=True,\n help='set album using id.',\n)\n@click.pass_context\ndef upload(ctx, path: str, album: str, id: bool) -> None:\n \"\"\" 画像をアップロードする\n \"\"\"\n logger = create_logger()\n\n service = google_oauth2.get_authorized_service(\n ctx.obj['client'],\n ctx.obj['credential'],\n logger)\n client = google_photos.GooglePhots(service)\n\n # アルバムに追加する場合は、アルバムIDを探索する。\n # ただし、追加可能なアルバムは一つとするため、見つからない、または複数見つかる場合は、エラーとする。\n album_id = ''\n if album is not None:\n target = 'id' if id else 'title'\n album_list = client.get_album_list()\n albums = [item for item in album_list['albums']\n if item[target] == album]\n if len(albums) == 0:\n logger.error(f\"cannot find album: {album}, id flag = {id}\")\n return\n if len(albums) > 1:\n logger.error(\n f\"find multiple albums, please identify only one album. album = {album}, id flag = {id}\")\n map(lambda x: logger.error(f\"album: {x}\"), albums)\n return\n album_id = albums[0]['id']\n\n # 画像のアップロード\n response = client.upload_image(path, album_id)\n logger.info(response)\n logger.info(response['newMediaItemResults'][0]['status'])\n\n\nif __name__ == \"__main__\":\n cli.add_command(upload)\n cli(obj={})\n","repo_name":"iimuz/til","sub_path":"python/upload_to_google_photos/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"ja","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"2536935669","text":"# encoding:gbk\n\nimport pandas as pd\nimport numpy as np\nimport json\nimport queue\nimport pymongo\nimport time\nimport datetime\nfrom datetime import datetime as dt\nimport threading\nfrom aare.noqa import *\nfrom pathlib import Path\nimport gc\nimport copy\nfrom QAPUBSUB.consumer import subscriber, subscriber_topic, subscriber_routing\nfrom QAPUBSUB.producer import publisher, publisher_topic\n\n\nclass SUB:\n pass\n\n\nparam = SUB()\nparam.tick_sub_list = []\n\nlog_info = SUB()\nlog_info.ticks_count = 0\nlog_info.ticks_loop_count = 0\nlog_info.ticks_last_time = 0\nlog_info.ticks_last_count = 0\n\n\nmongo_ip = '127.0.0.1'\neventmq_ip = '127.0.0.1'\naccount_cookie = '230041917'\n\n\ntime_now = dt.now()\ntoday_str = datetime.datetime.now().strftime(\"%Y%m%d\")\ntrade_day = datetime.datetime.now().date().strftime('%Y%m%d')\ntrade_start = dt.strptime(time_now.strftime(\"%Y-%m-%d 09:16:00\"), \"%Y-%m-%d %H:%M:%S\")\ntrade_open_stop = dt.strptime(time_now.strftime(\"%Y-%m-%d 09:25:25\"), \"%Y-%m-%d %H:%M:%S\")\ntrade_mid_stop = dt.strptime(time_now.strftime(\"%Y-%m-%d 11:30:05\"), \"%Y-%m-%d %H:%M:%S\")\ntrade_mid_start = dt.strptime(time_now.strftime(\"%Y-%m-%d 13:00:00\"), \"%Y-%m-%d %H:%M:%S\")\ntrade_end = dt.strptime(time_now.strftime(\"%Y-%m-%d 15:01:00\"), \"%Y-%m-%d %H:%M:%S\")\n\nsnapshot_pub = publisher_topic(exchange='qmt_stock_snapshot', routing_key='', host=eventmq_ip)\n# snapshot_open_pub = publisher_topic(exchange='qmt_stock_snapshot_open', routing_key='', host=eventmq_ip)\n# amx_tick_pub = publisher_topic(exchange='qmt_stock_amx_tick', routing_key='', host=eventmq_ip)\n\n\ndef stock_trading_time():\n now = datetime.datetime.now()\n if now < trade_start or now < trade_end:\n return False\n\n if now > trade_mid_stop and now < trade_mid_start:\n return False\n return True\n\n\ndef stock_open_time():\n now = datetime.datetime.now()\n if now > trade_start and now < trade_open_stop:\n return True\n else:\n return False\n\n\ndef init(ct):\n ct.set_account(account_cookie)\n sub = subscriber_routing(exchange='control_to_qmt', routing_key='', host=eventmq_ip)\n sub.callback = control_qmt_cb\n threading.Thread(target=sub.start, daemon=True).start()\n\n param.code_nost = code_list_to_qmt(jl_read('code_nost'))\n param.code_amx = code_list_to_qmt(jl_read('code_amx'))\n\n ct.subscribe_whole_quote(param.code_nost, full_quote_cb)\n param.ct = ct\n\n\ndef full_quote_cb(data):\n\n snapshot_pub.pub(json.dumps({'topic': 'quote', 'data': data}, cls=Py36JsonEncoder), routing_key='full')\n\n log_info.ticks_count += len(data)\n log_info.ticks_loop_count += 1\n if log_info.ticks_loop_count % 500 == 0:\n this_count = log_info.ticks_count - log_info.ticks_last_count\n tnow = time.time()\n tsec = tnow - log_info.ticks_last_time\n speed = int(this_count / tsec)\n print(\n f'Ticks loop {log_info.ticks_loop_count}, ticks : {log_info.ticks_count }, times : {tsec:.2f} speed : {speed}'\n )\n log_info.ticks_last_time = tnow\n log_info.ticks_last_count = log_info.ticks_count\n\n\ndef handlebar(ct):\n\n return\n\n\ndef control_qmt_cb(ct, a, b, data):\n try:\n\n r = json.loads(data)\n print(r)\n\n except:\n pass\n","repo_name":"aare1997/xc","sub_path":"qmt_snap.py","file_name":"qmt_snap.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42320857383","text":"import math\n\na, b = [float(s) for s in input().split(' ')]\n\nn = 6\n\np = a / (a+b)\n\nprop = sum(math.comb(n,i)*p**i*(1-p)**(n-i) for i in range(3,n+1))\n\nprint('%.3f' % prop)\n","repo_name":"andiwand/HackerRankSolutions","sub_path":"10 Days of Statistics/Day 4: Binomial Distribution I/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12850045964","text":"\"\"\"\nFunctions for identifying which platform a machine is\n\"\"\"\nimport contextlib\nimport multiprocessing\nimport os\nimport platform\nimport subprocess\nimport sys\n\nimport distro\n\nfrom salt.utils.decorators import memoize as real_memoize\n\n\ndef linux_distribution(full_distribution_name=True):\n \"\"\"\n Simple function to return information about the OS distribution (id_name, version, codename).\n \"\"\"\n if full_distribution_name:\n distro_name = distro.name()\n else:\n distro_name = distro.id()\n # Empty string fallbacks\n distro_version = distro_codename = \"\"\n with contextlib.suppress(subprocess.CalledProcessError):\n distro_version = distro.version(best=True)\n with contextlib.suppress(subprocess.CalledProcessError):\n distro_codename = distro.codename()\n return distro_name, distro_version, distro_codename\n\n\n@real_memoize\ndef is_windows():\n \"\"\"\n Simple function to return if a host is Windows or not\n \"\"\"\n return sys.platform.startswith(\"win\")\n\n\n@real_memoize\ndef is_proxy():\n \"\"\"\n Return True if this minion is a proxy minion.\n Leverages the fact that is_linux() and is_windows\n both return False for proxies.\n TODO: Need to extend this for proxies that might run on\n other Unices\n \"\"\"\n import __main__ as main\n\n # This is a hack. If a proxy minion is started by other\n # means, e.g. a custom script that creates the minion objects\n # then this will fail.\n ret = False\n try:\n # Changed this from 'salt-proxy in main...' to 'proxy in main...'\n # to support the testsuite's temp script that is called 'cli_salt_proxy'\n #\n # Add '--proxyid' or '--proxyid=...' in sys.argv so that salt-call\n # is seen as a proxy minion\n if \"proxy\" in main.__file__ or any(\n arg for arg in sys.argv if arg.startswith(\"--proxyid\")\n ):\n ret = True\n except AttributeError:\n pass\n return ret\n\n\n@real_memoize\ndef is_linux():\n \"\"\"\n Simple function to return if a host is Linux or not.\n Note for a proxy minion, we need to return something else\n \"\"\"\n return sys.platform.startswith(\"linux\")\n\n\n@real_memoize\ndef is_darwin():\n \"\"\"\n Simple function to return if a host is Darwin (macOS) or not\n \"\"\"\n return sys.platform.startswith(\"darwin\")\n\n\n@real_memoize\ndef is_sunos():\n \"\"\"\n Simple function to return if host is SunOS or not\n \"\"\"\n return sys.platform.startswith(\"sunos\")\n\n\n@real_memoize\ndef is_smartos():\n \"\"\"\n Simple function to return if host is SmartOS (Illumos) or not\n \"\"\"\n if not is_sunos():\n return False\n else:\n return os.uname()[3].startswith(\"joyent_\")\n\n\n@real_memoize\ndef is_smartos_globalzone():\n \"\"\"\n Function to return if host is SmartOS (Illumos) global zone or not\n \"\"\"\n if not is_smartos():\n return False\n else:\n try:\n zonename_proc = subprocess.Popen(\n [\"zonename\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n zonename_output = (\n zonename_proc.communicate()[0].strip().decode(__salt_system_encoding__)\n )\n zonename_retcode = zonename_proc.poll()\n except OSError:\n return False\n if zonename_retcode:\n return False\n if zonename_output == \"global\":\n return True\n\n return False\n\n\n@real_memoize\ndef is_smartos_zone():\n \"\"\"\n Function to return if host is SmartOS (Illumos) and not the gz\n \"\"\"\n if not is_smartos():\n return False\n else:\n try:\n zonename_proc = subprocess.Popen(\n [\"zonename\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n zonename_output = (\n zonename_proc.communicate()[0].strip().decode(__salt_system_encoding__)\n )\n zonename_retcode = zonename_proc.poll()\n except OSError:\n return False\n if zonename_retcode:\n return False\n if zonename_output == \"global\":\n return False\n\n return True\n\n\n@real_memoize\ndef is_junos():\n \"\"\"\n Simple function to return if host is Junos or not\n \"\"\"\n return sys.platform.startswith(\"freebsd\") and os.uname().release.startswith(\"JNPR\")\n\n\n@real_memoize\ndef is_freebsd():\n \"\"\"\n Simple function to return if host is FreeBSD or not\n \"\"\"\n return sys.platform.startswith(\"freebsd\")\n\n\n@real_memoize\ndef is_netbsd():\n \"\"\"\n Simple function to return if host is NetBSD or not\n \"\"\"\n return sys.platform.startswith(\"netbsd\")\n\n\n@real_memoize\ndef is_openbsd():\n \"\"\"\n Simple function to return if host is OpenBSD or not\n \"\"\"\n return sys.platform.startswith(\"openbsd\")\n\n\n@real_memoize\ndef is_aix():\n \"\"\"\n Simple function to return if host is AIX or not\n \"\"\"\n return sys.platform.startswith(\"aix\")\n\n\n@real_memoize\ndef is_fedora():\n \"\"\"\n Simple function to return if host is Fedora or not\n \"\"\"\n (osname, osrelease, oscodename) = (\n x.strip('\"').strip(\"'\") for x in linux_distribution()\n )\n return osname == \"Fedora\"\n\n\n@real_memoize\ndef is_photonos():\n \"\"\"\n Simple function to return if host is Photon OS or not\n \"\"\"\n (osname, osrelease, oscodename) = (\n x.strip('\"').strip(\"'\") for x in linux_distribution()\n )\n return osname == \"VMware Photon OS\"\n\n\n@real_memoize\ndef is_aarch64():\n \"\"\"\n Simple function to return if host is AArch64 or not\n \"\"\"\n return platform.machine().startswith(\"aarch64\")\n\n\ndef spawning_platform():\n \"\"\"\n Returns True if multiprocessing.get_start_method(allow_none=False) returns \"spawn\"\n\n This is the default for Windows Python >= 3.4 and macOS on Python >= 3.8.\n Salt, however, will force macOS to spawning by default on all python versions\n \"\"\"\n return multiprocessing.get_start_method(allow_none=False) == \"spawn\"\n","repo_name":"saltstack/salt","sub_path":"salt/utils/platform.py","file_name":"platform.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"12829480921","text":"import argparse\nfrom datamodule import NSDDatamodule\nfrom plmodels import PlVEModel\nfrom read_utils import (\n read_config,\n read_short_config,\n read_score_df,\n list_runs_from_exp_names,\n find_runs_from_exp_dir,\n read_test_voxel_score,\n)\n\nimport os\nimport re\nimport torch\nimport numpy as np\nimport pandas as pd\nimport pytorch_lightning as pl\nimport cortex\n\nfrom PIL import Image\n\nimport copy\n\nimport cortex\nfrom matplotlib import pyplot as plt\n\nplt.style.use(\"dark_background\")\nfrom config import AutoConfig\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Ray Tune\")\n parser.add_argument(\n \"--exp_dir\", type=str, default=\"/nfscc/alg23/xdcac/b3\", help=\"exp dir\"\n )\n parser.add_argument(\n \"--beta\", type=str, default=\"b3\", help=\"beta\"\n )\n parser.add_argument(\n \"--save_dir\", type=str, default=\"/nfscc/fig/alg23/xdcad/\", help=\"save dir\"\n )\n parser.add_argument(\"--overwrite\", action=\"store_true\", help=\"overwrite\")\n return parser\n\n\nargs = get_parser().parse_args()\n\nROIS = (\n []\n + [\"Primary_Visual\", \"Visual\", \"Posterior\", \"Somatomotor\", \"Auditory\", \"Anterior\"]\n + [\"all\"]\n)\n\n# BIG_ROIS = [\"all\", \"Visual\", \"Somatomotor\", \"Auditory\", \"Posterior\", \"Anterior\"]\n\n# VISUAL_ROIS = [\"Primary_Visual\", \"Visual\", \"Posterior\", \"Somatomotor\", \"Auditory\", \"Anterior\"]\n\n\ndef job(run):\n cfg: AutoConfig = read_config(run)\n tune_dict = read_short_config(run)\n subject = cfg.DATASET.SUBJECT_LIST[0]\n # t = cfg.EXPERIMENTAL.T_IMAGE\n # all_t = cfg.EXPERIMENTAL.USE_PREV_FRAME\n # rand = cfg.EXPERIMENTAL.SHUFFLE_IMAGES\n row = tune_dict[\"row\"]\n vs = read_test_voxel_score(run)\n vs = vs[subject][f\"TEST/PearsonCorrCoef/{subject}/all\"]\n dm = NSDDatamodule(cfg)\n dm.setup()\n ds = dm.dss[0][subject]\n roi_dict = ds.roi_dict\n v_list = []\n for roi in ROIS:\n v = vs[roi_dict[roi]].mean()\n v_list.append(v)\n data = (subject, row, run, *v_list)\n return data\n # datas.append(data)\n\nbeta = args.beta\ndf_path = f'/tmp/xdcad_{beta}.pkl'\n# if os.path.exists(df_path):\n# df = torch.load(df_path)\n# else:\nexp_dir = args.exp_dir.replace('b3', beta)\nruns = find_runs_from_exp_dir(exp_dir)\nprint(len(runs))\n\nimport multiprocessing as mp\n\nwith mp.Pool(16) as pool:\n datas = pool.map(job, runs)\n\ndf = pd.DataFrame(\n datas, columns=[\"subject\", \"row\", \"run\", *ROIS]\n).sort_values([\"subject\", \"row\"])\n\ntorch.save(df, df_path)\n \n \ndef print_csv(df):\n print(df.to_csv(index=False, float_format=\"%.3f\"))\n\nhide_col = ['subject', 'run']\ndf = df.drop(columns=hide_col)\n\n# mean over same row\nmean_df = df.groupby(['row']).mean().reset_index()\nstd_df = df.groupby(['row']).std().reset_index()\n\nprint_csv(mean_df)\nprint_csv(std_df)\n\n","repo_name":"huzeyann/MemoryEncodingModel","sub_path":"mem/scripts_paper/xdcad_ablation_table.py","file_name":"xdcad_ablation_table.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"78"} +{"seq_id":"3787524800","text":"class Node():\n def __init__(self,data=None,next=None,previous=None) -> None:\n self.next=next\n self.data=data\n self.previous=previous\n\n\nclass LinkList(self):\n def __init__(self) -> None:\n self.head=Node(\"Head\")\n\n\ndef NewAdd(self,data):\n if self.head==None:\n node=Node(data)\n self.head=node\n else:\n node=Node(data)\n self.head.previous=node \n self.next=self.head\n self.head=node\n\ndef Del(self,data):\n temp=self.head\n if temp.next==None:\n if temp.data==data:\n temp.next.previous=None\n self.head=temp.next\n temp.next=None\n else:\n while temp.next == None:\n if temp.data==data:\n break\n temp=temp.next\n if temp.next:\n temp.previous.next=temp.next\n temp.next.previous=temp.previous \n temp.next=None\n temp.previous=None\n else:\n temp.previous.next=None\n temp.previous=None\n return\n \n","repo_name":"MrTypeError/DSA","sub_path":"Link Lists/DoubliLinkList.py","file_name":"DoubliLinkList.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"6936083656","text":"def mu_online(dungeon_rooms):\n health = 100\n bitcoins = 0\n room_counter = 0\n for room in rooms:\n command, number = room.split()\n number = int(number)\n room_counter += 1\n if health <= 0:\n break\n if command == 'potion':\n health += number\n\n if health > 100:\n health_heal = 100 - (health - number)\n health = 100\n print(f\"You healed for {health_heal} hp.\")\n print(f\"Current health: {health} hp.\")\n elif health <= 100:\n print(f\"You healed for {number} hp.\")\n print(f\"Current health: {health} hp.\")\n else:\n continue\n\n elif command == 'chest':\n bitcoins += number\n print(f\"You found {number} bitcoins.\")\n\n else:\n health -= number\n if health > 0:\n print(f\"You slayed {command}.\")\n else:\n print(f\"You died! Killed by {command}.\")\n print(f\"Best room: {room_counter}\")\n\n if health > 0:\n print(\"You've made it!\")\n print(f\"Bitcoins: {bitcoins}\")\n print(f\"Health: {health}\")\n return dungeon_rooms\n\n\nrooms = input().split('|')\nmu_online(rooms)","repo_name":"IvayloStefanovMitev/Programing-Fundamentals-Pytho-Sept-2022","sub_path":"preparation_for_mid_exam/mu_online.py","file_name":"mu_online.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9435386801","text":"import tkinter as tk\n\n# region 计算器 重写版\nwindow = tk.Tk()\nwindow.title('计算器')\nwindow.geometry('195x182')\n\n\ndef caculate():\n if v.get() == '请输入' or v.get() == '':\n v.set('0')\n try:\n v.set(eval(v.get()))\n except:\n v.set('输入错误')\n\n\ndef show(p):\n if v.get() == '请输入':\n v.set('')\n tmp = v.get()\n v.set(tmp + p)\n\n\nv = tk.StringVar()\nv.set('请输入')\nl = tk.Label(window, bg='white', textvariable=v, height=1, width=27, justify='left').grid(row=0, column=0, columnspan=4)\nc = tk.Button(window, text='C', width=4, height=1, bg='white', command=lambda: v.set('')).grid(row=1, column=0, ipadx=6, pady=1)\nd = tk.Button(window, text='D', width=4, height=1, bg='white', command=lambda: v.set('')).grid(row=1, column=1, ipadx=6, pady=1)\nbk = tk.Button(window, text='←', width=4, height=1, bg='white', command=lambda: v.set(v.get()[:-1])).grid(row=1, column=2, ipadx=6, pady=1)\neq = tk.Button(window, text='=', width=4, height=1, bg='white', command=caculate).grid(row=1, column=3, ipadx=6, pady=1)\nb7 = tk.Button(window, text='7', width=4, height=1, bg='white', command=lambda: show('7')).grid(row=2, column=0, ipadx=6, pady=1)\nb8 = tk.Button(window, text='8', width=4, height=1, bg='white', command=lambda: show('8')).grid(row=2, column=1, ipadx=6, pady=1)\nb9 = tk.Button(window, text='9', width=4, height=1, bg='white', command=lambda: show('9')).grid(row=2, column=2, ipadx=6, pady=1)\njia = tk.Button(window, text='+', width=4, height=1, bg='white', command=lambda: show('+')).grid(row=2, column=3, ipadx=6, pady=1)\nb4 = tk.Button(window, text='4', width=4, height=1, bg='white', command=lambda: show('4')).grid(row=3, column=0, ipadx=6, pady=1)\nb5 = tk.Button(window, text='5', width=4, height=1, bg='white', command=lambda: show('5')).grid(row=3, column=1, ipadx=6, pady=1)\nb6 = tk.Button(window, text='6', width=4, height=1, bg='white', command=lambda: show('6')).grid(row=3, column=2, ipadx=6, pady=1)\njian = tk.Button(window, text='-', width=4, height=1, bg='white', command=lambda: show('-')).grid(row=3, column=3, ipadx=6, pady=1)\nb1 = tk.Button(window, text='1', width=4, height=1, bg='white', command=lambda: show('1')).grid(row=4, column=0, ipadx=6, pady=1)\nb2 = tk.Button(window, text='2', width=4, height=1, bg='white', command=lambda: show('2')).grid(row=4, column=1, ipadx=6, pady=1)\nb3 = tk.Button(window, text='3', width=4, height=1, bg='white', command=lambda: show('3')).grid(row=4, column=2, ipadx=6, pady=1)\nchen = tk.Button(window, text='*', width=4, height=1, bg='white', command=lambda: show('*')).grid(row=4, column=3, ipadx=6, pady=1)\nb0 = tk.Button(window, text='0', width=4, height=1, bg='white', command=lambda: show('0')).grid(row=5, column=0, ipadx=30, pady=1, columnspan=2)\ndian = tk.Button(window, text='.', width=4, height=1, bg='white', command=lambda: show('.')).grid(row=5, column=2, ipadx=6, pady=1)\nchu = tk.Button(window, text='/', width=4, height=1, bg='white', command=lambda: show('/')).grid(row=5, column=3, ipadx=6, pady=1)\n\nwindow.mainloop()\n# endregion\n\n\n'''\n# region 窗口(计算器)\nwindow2 = tk.Tk()\nwindow2.title('计算器')\nwindow2.geometry('500x800')\nx1 = 0\nx2 = 0\nsymbol = ''\nval = tk.StringVar()\nl = tk.Label(window2, bg='white', textvariable=val, font=('Arial', 12), width=20, height=1)\ne = tk.Entry(window2, width=20, show=None)\n\n\n# t = tk.Text(window2, width=10, height=1)\n\n\ndef clean():\n e.delete(0, 'end')\n\n\ndef add_point():\n val = e.get()\n if '.' not in val:\n e.insert('end', '.')\n\n\ndef add_0():\n val = e.get()\n if val != '':\n e.insert('end', '0')\n\n\ndef add_1():\n e.insert('end', '1')\n\n\ndef add_2():\n e.insert('end', '2')\n\n\ndef add_3():\n e.insert('end', '3')\n\n\ndef add_4():\n e.insert('end', '4')\n\n\ndef add_5():\n e.insert('end', '5')\n\n\ndef add_6():\n e.insert('end', '6')\n\n\ndef add_7():\n e.insert('end', '7')\n\n\ndef add_8():\n e.insert('end', '8')\n\n\ndef add_9():\n e.insert('end', '9')\n\n\ndef suan(n1, n2, sym):\n global x1, x2\n if sym == '':\n pass\n elif symbol == '+':\n n1 = n1 + n2\n n2 = 0\n x1 = n1\n x2 = n2\n\n\ndef jia():\n global x1, x2, symbol\n v = e.get()\n if v == '':\n v = '0'\n \n symbol = '+'\n suan(x1, x2, symbol)\n e.delete(0, 'end')\n\n\n# def jian():\n# global result\n# v = e.get()\n# if v != '':\n# result -= float(v)\n# e.delete(0, 'end')\n#\n#\n# def chen():\n# global result\n# v = e.get()\n# if v != '':\n# result *= float(v)\n# e.delete(0, 'end')\n#\n#\n# def chu():\n# global val\n# global result\n# v = e.get()\n# if v != '':\n# result /= float(v)\n# e.delete(0, 'end')\n# else:\n# val.set('被除数不能为0')\n\ndef dengyu():\n global x1, x2, symbol, val\n v = e.get()\n if v == '':\n v = '0'\n x2 = float(v)\n suan(x1, x2, symbol)\n symbol = ''\n e.delete(0, 'end')\n val.set(str(x1))\n\n\nq = tk.Button(window2, text='退出', width=15, height=1, command=window2.quit)\nb_point = tk.Button(window2, text='.', width=15, height=1, command=add_point)\nb0 = tk.Button(window2, text='0', width=15, height=1, command=add_0)\nb1 = tk.Button(window2, text='1', width=15, height=1, command=add_1)\nb2 = tk.Button(window2, text='2', width=15, height=1, command=add_2)\nb3 = tk.Button(window2, text='3', width=15, height=1, command=add_3)\nb4 = tk.Button(window2, text='4', width=15, height=1, command=add_4)\nb5 = tk.Button(window2, text='5', width=15, height=1, command=add_5)\nb6 = tk.Button(window2, text='6', width=15, height=1, command=add_6)\nb7 = tk.Button(window2, text='7', width=15, height=1, command=add_7)\nb8 = tk.Button(window2, text='8', width=15, height=1, command=add_8)\nb9 = tk.Button(window2, text='9', width=15, height=1, command=add_9)\njia = tk.Button(window2, text='+', width=15, height=1, command=jia)\n# jian = tk.Button(window2, text='-', width=15, height=1, command=jian)\n# chen = tk.Button(window2, text='*', width=15, height=1, command=chen)\n# chu = tk.Button(window2, text='/', width=15, height=1, command=chu)\nc = tk.Button(window2, text='C', width=15, height=1, command=clean)\ndengyu = tk.Button(window2, text='=', width=15, height=1, command=dengyu)\n\nl.pack()\ne.pack()\nb_point.pack()\nb0.pack()\nb1.pack()\nb2.pack()\nb3.pack()\nb4.pack()\nb5.pack()\nb6.pack()\nb7.pack()\nb8.pack()\nb9.pack()\nq.pack()\nc.pack()\njia.pack()\n# jian.pack()\n# chen.pack()\n# chu.pack()\ndengyu.pack()\n\nwindow2.mainloop()\n# endregion\n'''\n","repo_name":"glacierck/myProject","sub_path":"学习库/Tkinter可视化操作模块/exercise (计算器).py","file_name":"exercise (计算器).py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29238782202","text":"import time\r\nimport sys\r\nimport cv2\r\nimport numpy as np\r\nimport glob\r\n\r\nfrom ultralytics import YOLO\r\n\r\n# numpy 전체 배열\r\nnp.set_printoptions(threshold=sys.maxsize)\r\n\r\nmodel = YOLO(r'C:\\Users\\sh\\Desktop\\sunghyun\\handalab\\yolo_test\\runs\\segment\\train2\\weights\\best.pt')\r\nresults = model.predict(source=r\"C:\\Users\\sh\\Desktop\\sunghyun\\handalab\\yolo_test\\ALC_TEST_2-2\\test\\images\")\r\npath = glob.glob('./ALC_TEST_2-2/test/images/*.jpg')\r\n\r\nfont = cv2.FONT_HERSHEY_PLAIN\r\n\r\nfor i in range(len(results)):\r\n start = time.time()\r\n masks = results[i].masks\r\n boxes = results[i].boxes\r\n\r\n img = cv2.imread(path[i])\r\n poly = np.zeros((640, 640, 3), dtype=np.uint8)\r\n for j in range(len(masks)):\r\n x, y, w, h = boxes.xywh[j][:4] # box with xywh format, (N, 4)\r\n p1, p2 = (int(boxes.xyxy[j][0]), int(boxes.xyxy[j][1])), (int(boxes.xyxy[j][2]), int(boxes.xyxy[j][3]))\r\n cls = boxes.cls[j] # cls, (N, 1)\r\n\r\n if int(cls) == 0:\r\n clss = 'ALC'\r\n\r\n seg = (masks.segments[j] * 640)\r\n seg = seg.astype(np.int32)\r\n\r\n x_dis = (np.max(seg.T[0]) - np.min(seg.T[0]))\r\n y_dis = (np.max(seg.T[1]) - np.min(seg.T[1]))\r\n poly = cv2.fillPoly(poly, [seg], (0, 0, 255))\r\n\r\n img = cv2.rectangle(img,\r\n (int(x - w / 2),\r\n int(y - h / 2)),\r\n (int(x + w / 2 + (w % 2)),\r\n int(y + h / 2 + (h % 2))),\r\n (0, 0, 222), 2)\r\n\r\n img = cv2.putText(img, \"{}\".format(clss), (p1[0], p1[1] - 2), font, 1.5, (0, 0, 255), 0)\r\n img = cv2.putText(img, f\"X : {x_dis} Y : {y_dis}\", (p1[0] + 50, p1[1] - 2), font, 1.5, (0, 255, 0), 0)\r\n\r\n end = time.time()\r\n print(f\"{end - start:.5f} sec\")\r\n\r\n img = cv2.addWeighted(img, 0.7, poly, 0.3, 12)\r\n cv2.imshow(\"img\", img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\n","repo_name":"Chosunghyun9806/Corp.handalab","sub_path":"yolo/seg_test2.py","file_name":"seg_test2.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19109928831","text":"# prep_write_var1d_val2d_v1.py\r\n# Purpose:\r\n# matches var 1d-list to val 2d-list\r\n# prints var and val[index]\r\n# puts a delay for each index iteration \r\n# prep - write - break - read \r\n# import file/class to create csv file with the result\r\n\r\n# packages \r\nimport time\r\n\r\n# ============================================================\r\n# import files/classes\r\nfrom list_from_filetxt import ListFromText\r\nfrom multiple_filestxt_into2d_matrix import TwoDArrayMAker\r\nfrom write_file_csv import CSVWriter\r\n# ============================================================\r\n\r\nclass AssingVar1DtoVal2D:\r\n\r\n def __init__ (self, onlineapp, input_txt_inputs, input_txt_outputs, input_list, output_csv ):\r\n\r\n self.sleep_t05 = 1 #s\r\n self.sleep_t1 = 2 #s\r\n self.onlineapp = onlineapp\r\n self.input_txt_inputs = input_txt_inputs\r\n self.input_txt_outputs = input_txt_outputs\r\n self.input_list = input_list\r\n self.output_csv = output_csv \r\n\r\n def input_val_output_csv_maker (self):\r\n\r\n # open csv file - write headers\r\n obj_CSVWriter = CSVWriter(\"\", \"\", self.output_csv)\r\n obj_CSVWriter.write_csv()\r\n\r\n # ------------------ list of inputs and outputs --------------\r\n # call function:\r\n # get list of inputs\r\n obj_input_list = ListFromText(self.input_txt_inputs)\r\n list_of_in_var = obj_input_list.list_maker_from_txt()\r\n # get list of outputs\r\n obj_output_list = ListFromText(self.input_txt_outputs)\r\n list_of_out_var = obj_output_list.list_maker_from_txt()\r\n # -------------------------------------------------------------\r\n\r\n # ------------------ list of inputs values -------------------\r\n # call function:\r\n obj_TwoDArrayMAker = TwoDArrayMAker(self.input_list)\r\n input_values_dict = obj_TwoDArrayMAker.list_files_txt_to_dict_maker()\r\n list_of_in_val = obj_TwoDArrayMAker.dict_to_2d_matrix()\r\n # -------------------------------------------------------------\r\n for i in range(len(list_of_in_val)):\r\n\r\n for j in range(len(list_of_in_var)):\r\n \r\n # prep & write value\r\n self.onlineapp.set_prepared_value(list_of_in_var[j],list_of_in_val[i][j])\r\n self.onlineapp.write_prepared_values()\r\n\r\n time.sleep(self.sleep_t05)\r\n \r\n # read value of iVar1\r\n input_val = self.onlineapp.read_value(list_of_in_var[j])\r\n print(list_of_in_var[j])\r\n print(input_val)\r\n\r\n # whites input var/val in csv\r\n obj_CSVWriter = CSVWriter(list_of_in_var[j], list_of_in_val[i][j], self.output_csv)\r\n obj_CSVWriter.append_csv()\r\n\r\n time.sleep(self.sleep_t1)\r\n\r\n for k in range(len(list_of_out_var)):\r\n \r\n # read value of iVar1\r\n output_val = self.onlineapp.read_value(list_of_out_var[k])\r\n print(list_of_out_var[k])\r\n print(output_val)\r\n\r\n # whites output var/val in csv\r\n obj_CSVWriter = CSVWriter(list_of_out_var[k], output_val, self.output_csv)\r\n obj_CSVWriter.append_csv()\r\n\r\n\r\n","repo_name":"kolo0225/display_codesys_auto_pytest","sub_path":"prep_write_var1d_val2d_v1.py","file_name":"prep_write_var1d_val2d_v1.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32515678390","text":"def task_1():\n with open(\"resources/inputFiles/input3.txt\") as f:\n lines = f.readlines()\n prio = 0\n\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n\n comp1 = sorted(line[:int(len(line)/2)])\n comp2 = sorted(line[int(len(line)/2):])\n \n for char in comp1:\n if comp2.__contains__(char):\n prio += getCharValue(char)\n break\n \n return prio\n\n\ndef task_2():\n with open(\"resources/inputFiles/input3.txt\") as f:\n lines = f.readlines()\n prio = 0\n \n for i, _ in enumerate(lines):\n if i % 3 != 0: continue\n \n for char in lines[i]:\n if lines[i+1].__contains__(char) and lines[i+2].__contains__(char):\n prio += getCharValue(char)\n break\n \n return prio\n \n \ndef getCharValue(char:str):\n offset = 38 if char.isupper() else 96\n return ord(char) - offset\n \n \nprint(task_1())\nprint(task_2())","repo_name":"derfium/advent-of-code-2022","sub_path":"src/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42054377973","text":"\"\"\"\r\n Enema module (core): Text / Strings processing\r\n Copyright (C) 2011 Valeriy Bogachuk\r\n \r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\"\"\"\r\n\r\nimport random\r\nimport base64\r\nimport string as strings\r\n\r\n#Convert string to base64:\r\ndef base64proc(string, mode, encoding):\r\n if mode == \"enc\":\r\n readyStr = base64.b64encode(bytes(string, encoding))\r\n else:\r\n try:\r\n readyStr = base64.b64decode(bytes(string, encoding))\r\n except Exception:\r\n return \" - invalid string - \"\r\n return str(readyStr, encoding)\r\n \r\n#Convert string to HEX:\r\ndef strToHex(string, isCmdHex):\r\n hexStr = ''.join((hex(ord(symbol)) for symbol in string))\r\n if isCmdHex:\r\n cmdhex = \"0x\" + hexStr.replace(\"0x\", \"\")\r\n return cmdhex\r\n return hexStr\r\n \r\n#Convert string to SQL char:\r\ndef strToSqlChar(string, dbtype):\r\n if dbtype == \"MySQL\":\r\n encoded = ','.join((str(ord(symbol)) for symbol in string))\r\n else:\r\n encoded = ')+char('.join((hex(ord(symbol)) for symbol in string))\r\n return 'char(' + encoded + \")\"\r\n\r\n#Random changing uppercase\r\ndef rndUpCase(string):\r\n string = ''.join(random.choice([s.upper(), s]) for s in string)\r\n return string\r\n\r\n#Special keywords handler.\r\ndef extractString(string, specKw):\r\n keyword = \"[\" + specKw + \"^\"\r\n fromStr = string.find(keyword)\r\n fromStr += len(keyword)\r\n toStr = string.find(\"^]\", fromStr, len(string))\r\n substring = string[fromStr:toStr]\r\n string = ''.join((string[:fromStr - len(keyword)] + \"ERASEDSUBSTRING\" + string[toStr + 2:]))\r\n return {'str' : string, 'substr' : substring, 'kword' : specKw}\r\n\r\ndef rndString(length):\r\n\talpha_numeric_str = ''.join([random.choice(strings.ascii_letters + strings.digits) for s in range(length)])\r\n\treturn alpha_numeric_str\r\n\r\n#Symbols recovery to readable format\r\ndef recoverSymbols(cmdResult):\r\n symbols = {\r\n '<' : '<',\r\n '>' : '>',\r\n '"' : '\"', \r\n ' ' : chr(160), \r\n ' ' : chr(160)}\r\n for key in symbols:\r\n cmdResult = cmdResult.replace(key, symbols[key])\r\n return cmdResult\r\n\r\n#Rounding time\r\ndef roundTime(tm):\r\n seconds = str(tm).split(\".\")\r\n try:\r\n seconds = int(seconds[0])\r\n except:\r\n return 0\r\n return seconds\r\n \r\n","repo_name":"kaeso/enema","sub_path":"core/txtproc.py","file_name":"txtproc.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"14612394091","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nsns.set(style='whitegrid', color_codes=True)\n\ndef plot_digit(data):\n image = data.reshape(28, 28)\n plt.imshow(image, cmap=matplotlib.cm.binary, interpolation=\"nearest\")\n plt.axis(\"off\")\n plt.show()\n\ndef plot_digits(instances, images_per_row=10, **options):\n size = 28\n images_per_row = min(len(instances), images_per_row)\n images = [instance.reshape(size, size) for instance in instances]\n n_rows = (len(instances) - 1) // images_per_row + 1\n row_images = []\n n_empty = n_rows * images_per_row - len(instances)\n images.append(np.zeros((size, size * n_empty)))\n for row in range(n_rows):\n rimages = images[row * images_per_row : (row+1) * images_per_row]\n row_images.append(np.concatenate(rimages, axis=1))\n image = np.concatenate(row_images, axis=0)\n plt.imshow(image, cmap=matplotlib.cm.binary, **options)\n plt.axis(\"off\")\n plt.show()\n\n\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\", linewidth=2)\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\", linewidth=2)\n plt.xlabel(\"Threshold\", fontsize=16)\n plt.legend(loc=\"center left\", fontsize=16)\n plt.xlim([-700000, 700000])\n plt.show()\n\n\ndef plot_precision_vs_recall(precisions, recalls):\n plt.plot(recalls, precisions, \"b-\", linewidth=2)\n plt.xlabel(\"Recall\", fontsize=16)\n plt.ylabel(\"Precision\", fontsize=16)\n plt.axis([0, 1, 0, 1])\n plt.show()\n\n\ndef plot_roc_curve(fpr, tpr, label=None):\n plt.plot(fpr, tpr, linewidth=2, label=label)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlabel('1-TNR', fontsize=16)\n plt.ylabel('TPR', fontsize=16)\n plt.legend(loc=\"lower right\", fontsize=16)\n plt.show()\n\n\ndef plot_confusion_matrix(matrix):\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(111)\n cax = ax.matshow(matrix)\n fig.colorbar(cax)\n plt.show()\n\n\ndef shift_image(image, dx, dy):\n image = image.reshape((28, 28))\n shifted_image = shift(image, [dy, dx], cval=0, mode='constant')\n return shifted_image.reshape([-1])","repo_name":"Gil-jung/DSBookStudy","sub_path":"HandsOnML/functions/plot_mnist.py","file_name":"plot_mnist.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"56812223","text":"import unittest\n\nfrom . import pivot_index\n\n\nclass PivotIndexTestCases(unittest.TestCase):\n def test_one(self):\n \"\"\"should return 3 from nums=[1,7,3,6,5,6]\"\"\"\n nums = [1, 7, 3, 6, 5, 6]\n expected = 3\n actual = pivot_index(nums)\n self.assertEqual(expected, actual)\n\n def test_two(self):\n \"\"\"should return -1 from nums=[1,2,3]\"\"\"\n nums = [1, 2, 3]\n expected = -1\n actual = pivot_index(nums)\n self.assertEqual(expected, actual)\n\n def test_three(self):\n \"\"\"should return 0 from nums=[2,1,-1]\"\"\"\n nums = [2, 1, -1]\n expected = 0\n actual = pivot_index(nums)\n self.assertEqual(expected, actual)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"BrianLusina/PythonSnips","sub_path":"puzzles/prefix_sum/find_pivot_index/test_find_pivot_index.py","file_name":"test_find_pivot_index.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"28251361143","text":"from functools import reduce\n\ndata = open(\"input.txt\").read()\ngroups = data.split(\"\\n\\n\")\n\np1_groups = [g.replace(\"\\n\", \"\") for g in groups]\nchars = [set(g) for g in p1_groups]\nprint(\"Part 1: \", sum(len(cc) for cc in chars))\n\np2_groups = [[set(l) for l in g.split(\"\\n\")] for g in groups]\nints = [reduce(lambda s1,s2: s1.intersection(s2), g) for g in p2_groups]\nprint(\"Part 2: \", sum(len(i) for i in ints))\n\n\n\n","repo_name":"cvermilion/adventofcode","sub_path":"2020/06/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37725414842","text":"import numpy as np\nimport nest\nfrom ...standardmodels import electrodes, build_translations, StandardCurrentSource\nfrom ...common import Population, PopulationView, Assembly\nfrom ...parameters import ParameterSpace, Sequence\nfrom ..simulator import state\nfrom ..electrodes import NestCurrentSource\n\n\nclass NestStandardCurrentSource(NestCurrentSource, StandardCurrentSource):\n \"\"\"Base class for a nest source of current to be injected into a neuron.\"\"\"\n\n def __init__(self, **parameters):\n NestCurrentSource.__init__(self, **parameters)\n self.phase_given = 0.0 # required for PR #502\n native_parameters = self.translate(self.parameter_space)\n self.set_native_parameters(native_parameters)\n\n def inject_into(self, cells):\n for id in cells:\n if id.local and not id.celltype.injectable:\n raise TypeError(\"Can't inject current into a spike source.\")\n if isinstance(cells, (Population, PopulationView, Assembly)):\n self.cell_list = cells.node_collection\n else:\n self.cell_list = nest.NodeCollection(sorted(cells))\n nest.Connect(self._device, self.cell_list, syn_spec={\"delay\": state.min_delay})\n\n def _delay_correction(self, value):\n \"\"\"\n A change in a device requires a min_delay to take effect at the target\n \"\"\"\n corrected = value - self.min_delay\n # set negative times to zero\n if isinstance(value, np.ndarray):\n corrected = np.where(corrected > 0, corrected, 0.0)\n else:\n corrected = max(corrected, 0.0)\n return corrected\n\n def _phase_correction(self, start, freq, phase):\n \"\"\"\n Fixes #497 (PR #502)\n Tweaks the value of phase supplied to NEST ACSource\n so as to remain consistent with other simulators\n \"\"\"\n phase_fix = ((phase*np.pi/180) - (2*np.pi*freq*start/1000)) * 180/np.pi\n phase_fix.shape = (1)\n phase_fix = phase_fix.evaluate()[0]\n nest.SetStatus(self._device, {'phase': phase_fix})\n\n def _check_step_times(self, times, amplitudes, resolution):\n # ensure that all time stamps are non-negative\n if np.min(times) < 0:\n raise ValueError(\"Step current cannot accept negative timestamps.\")\n # ensure that times provided are of strictly increasing magnitudes\n if len(times) > 1 and np.min(np.diff(times)) <= 0:\n raise ValueError(\"Step current timestamps should be monotonically increasing.\")\n # NEST specific: subtract min_delay from times (set to 0.0, if result is negative)\n times = self._delay_correction(times)\n # find the last element <= dt (we find >dt and then go one element back)\n # this corresponds to the first timestamp that can be used by NEST for current injection\n ctr = np.searchsorted(times, resolution, side=\"right\") - 1\n if ctr >= 0:\n times[ctr] = resolution\n times = times[ctr:]\n amplitudes = amplitudes[ctr:]\n # map timestamps to actual simulation time instants based on specified dt\n # for ind in range(len(times)):\n # times[ind] = self._round_timestamp(times[ind], resolution)\n times = self._round_timestamp(times, resolution)\n # remove duplicate timestamps, and corresponding amplitudes, after mapping\n step_times, step_indices = np.unique(times[::-1], return_index=True)\n step_times = step_times.tolist()\n step_indices = len(times)-step_indices-1\n step_amplitudes = amplitudes[step_indices] # [amplitudes[i] for i in step_indices]\n return step_times, step_amplitudes\n\n def set_native_parameters(self, parameters):\n parameters.evaluate(simplify=True)\n for key, value in parameters.items():\n if key == \"amplitude_values\":\n assert isinstance(value, Sequence)\n step_times = parameters[\"amplitude_times\"].value\n step_amplitudes = parameters[\"amplitude_values\"].value\n\n step_times, step_amplitudes = self._check_step_times(\n step_times, step_amplitudes, self.timestep)\n parameters[\"amplitude_times\"].value = step_times\n parameters[\"amplitude_values\"].value = step_amplitudes\n nest.SetStatus(self._device, {key: step_amplitudes,\n 'amplitude_times': step_times})\n elif key in (\"start\", \"stop\"):\n nest.SetStatus(self._device, {key: self._delay_correction(value)})\n if key == \"start\" and type(self).__name__ == \"ACSource\":\n self._phase_correction(self.start, self.frequency, self.phase_given)\n elif key == \"frequency\":\n nest.SetStatus(self._device, {key: value})\n self._phase_correction(self.start, self.frequency, self.phase_given)\n elif key == \"phase\":\n self.phase_given = value\n self._phase_correction(self.start, self.frequency, self.phase_given)\n elif not key == \"amplitude_times\":\n nest.SetStatus(self._device, {key: value})\n\n def get_native_parameters(self):\n all_params = nest.GetStatus(self._device)[0]\n return ParameterSpace(dict((k, v) for k, v in all_params.items()\n if k in self.get_native_names()))\n\n\nclass DCSource(NestStandardCurrentSource, electrodes.DCSource):\n __doc__ = electrodes.DCSource.__doc__\n\n translations = build_translations(\n ('amplitude', 'amplitude', 1000.),\n ('start', 'start'),\n ('stop', 'stop')\n )\n nest_name = 'dc_generator'\n\n\nclass ACSource(NestStandardCurrentSource, electrodes.ACSource):\n __doc__ = electrodes.ACSource.__doc__\n\n translations = build_translations(\n ('amplitude', 'amplitude', 1000.),\n ('start', 'start'),\n ('stop', 'stop'),\n ('frequency', 'frequency'),\n ('offset', 'offset', 1000.),\n ('phase', 'phase')\n )\n nest_name = 'ac_generator'\n\n\nclass StepCurrentSource(NestStandardCurrentSource, electrodes.StepCurrentSource):\n __doc__ = electrodes.StepCurrentSource.__doc__\n\n translations = build_translations(\n ('amplitudes', 'amplitude_values', 1000.),\n ('times', 'amplitude_times')\n )\n nest_name = 'step_current_generator'\n\n\nclass NoisyCurrentSource(NestStandardCurrentSource, electrodes.NoisyCurrentSource):\n __doc__ = electrodes.NoisyCurrentSource.__doc__\n\n translations = build_translations(\n ('mean', 'mean', 1000.),\n ('start', 'start'),\n ('stop', 'stop'),\n ('stdev', 'std', 1000.),\n ('dt', 'dt')\n )\n nest_name = 'noise_generator'\n","repo_name":"NeuralEnsemble/PyNN","sub_path":"pyNN/nest/standardmodels/electrodes.py","file_name":"electrodes.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","stars":256,"dataset":"github-code","pt":"78"} +{"seq_id":"45969339482","text":"class Node():\n def __init__(self,data=None):\n self.data = data\n self.left = None\n self.right = None\n\n\nclass BinarySearchTree():\n def __init__(self):\n self.root = None\n\n def _insert_recursive(self,data,node):\n if data[\"id\"] < node.data[\"id\"] :\n if node.left == None:\n node.left = Node(data[\"id\"])\n else :\n self._insert_recursive(data[\"id\"],node.left)\n\n elif data[\"id\"] > node.data :\n if node.right == None:\n node.right = Node(data[\"id\"])\n else :\n self._insert_recursive(data[\"id\"],node.right)\n\n else:\n return\n\n def insert(self,data):\n if self.root ==None:\n self.root = Node(data)\n else:\n self._insert_recursive(data, self.root)\n\n\n\n def _search_recursive(self,blogpost_id,node):\n\n if node.left == None and node.right == None:\n return False\n \n if blogpost_id == node.data[\"id\"]:\n return node.data\n\n if blogpost_id < node.data[\"id\"]:\n if blogpost_id == node.left.data[\"id\"]:\n return node.left.data\n else:\n return self._search_recursive(blogpost_id,node.left)\n\n if blogpost_id > node.data[\"id\"]:\n if blogpost_id == node.right.data[\"id\"]:\n return node.right.data\n else:\n return self._search_recursive(blogpost_id,node.right)\n\n\n def search(self,blogpost_id):\n blogpost_id = int(blogpost_id)\n\n if self.root == None:\n return False\n\n return self._search_recursive(blogpost_id,self.root)\n\n ","repo_name":"ayushi6560/FlaskDS","sub_path":"binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"40092929507","text":"import importlib\n\ntry:\n importlib.reload(parsers)\nexcept:\n import parsers\n\ntry:\n importlib.reload(streams2)\nexcept:\n import streams2\n\ntry:\n importlib.reload(pipeCommands)\nexcept:\n import pipeCommands\n\n\nclass Packet:\n \"\"\"Holds the data record and other info passed from stage to stage\"\"\"\n\n eof = False\n record = None\n\n def __init__(self, record=None):\n self.record = record\n\n def __repr__(self):\n if self.eof:\n return \"At EOF\"\n else:\n return self.record\n\n\nclass Streams(list):\n \"\"\"base class for InStreams and OutStreams\"\"\"\n\n count = 0\n\n def add(self, stream):\n self.append(stream)\n self.count += 1\n return self.count # streamNo\n\n\nclass InStreams(Streams):\n \"\"\"Each stage has an instance for all its inStreams.\"\"\"\n\n\nclass OutStreams(Streams):\n \"\"\"Each stage has an instance for all its outStreams.\"\"\"\n\n\nclass InvertibleStage:\n \"\"\"Mixin for stages that can be preceded by N e.g. locate nlocate.\n Either use this or invertible property.\"\"\"\n\n\nclass Stage(parsers.SpecificationParser):\n \"\"\"Base class for all stages.\"\"\"\n\n parsers = None\n streamSpecs = \"!!\" # default primary only, in & out required\n streamCount = 0\n maxStreams = 99 ## FO replace with parsing of streamSpecs\n trace = 0 # can be set from global / local options\n status = \"active\"\n rc = 0\n invertible = False\n first = True\n currentOut = None\n currentIn = None\n BNF = None # some stages override this\n modifier = False\n streamsByID = {} # relate streamID to its stream\n\n def __init__(self, name, pipeLineSet, simplePipeLine, cls, stageSpec):\n if Stage.first:\n # add an instance of each pipeline command to the Stage class\n ## FO explore initially setting these as stubs that replace themselves\n ## with the real thing at first call. Real thing is a method that invokes\n ## a method in the command class\n Stage.first = False\n for func in pipeCommands.functions:\n setattr(Stage, func[0], func[1])\n self.name = name\n self.inStreams = InStreams()\n self.outStreams = OutStreams()\n self.packet = Packet() # for source stages\n self.pipeLineSet = pipeLineSet\n self.simplePipeLine = simplePipeLine\n self.parseSpec(stageSpec, self)\n\n def finalizeMaster(self):\n \"\"\"Called by scanner after pipe assembled to check streams vs streamSpec\n and do any other stage-specific finalization.\"\"\"\n self.finalize()\n\n def finalize(self):\n \"\"\"Called by scanner after all stages created.\n Some stages override for specific tasks.\"\"\"\n\n def log(self, msg):\n print(\"%s %s\" % (self.id, msg))\n\n def initialize(self):\n \"\"\"Called each time pipe is run.\n Some stages override e.g. file i/o to open; count to reset totals.\"\"\"\n\n # def setupMaster(self, *args):\n # self.setup(*args)\n def setup(self, *args):\n \"\"\"Called by scanner after stage is instantiated so stage can process\n its args.\"\"\"\n\n def eof(self):\n \"\"\"Default action when an instream receives an eof packet.\"\"\"\n self.sendeof1() # pass eof to primary output\n self.exit() # terminate stage\n\n def output(self, record, streamNo=None):\n \"\"\"output to specified or selected stream\"\"\"\n if stream == None:\n streamNo = self.currentOut\n getattr(self, \"output%s\" % streamNo)(record)\n\n # following needed for stages that could be sinks (e.g. Console1)\n def output1(self, *arg):\n pass\n\n def sendeof1(self):\n pass\n\n # or have optional secondary outstream (e.g. locate)\n def output2(self, *arg):\n pass\n\n def sendeof2(self):\n pass\n\n def exit(self, RC=0):\n self.status = \"terminated\"\n self.rc = RC\n\n def short():\n self.inStreams[0].run = self.outStreams[0].send\n\n # pipeline commands\n\n def addStream(self, side=\"both\", streamID=None):\n \"\"\"Implements the pipeline command:\n +-BOTH---+\n >>--ADDSTREAm--+--------+-+-----------+-><\n +-INput--+ +-streamID-+\n +-OUTput-+\n Used also by stageFactory when creating a stage\n and when handling a label reference.\"\"\"\n ## FO connect pipeline commands to BNF parser?\n self.streamCount += 1\n side = side.lower()\n specOK = False\n if side in (\"in\", \"both\"):\n self.addOneStream(self.inStreams, streams2.InStream, streamID)\n specOK = True\n if side in (\"out\", \"both\"):\n self.addOneStream(self.outStreams, streams2.OutStream, streamID)\n specOK = True\n if not specOK:\n self.pipeLineSet.addError(\"Invalid side %s.\" % (side,))\n\n def addOneStream(self, container, cls, streamID):\n if len(container) == self.maxStreams:\n self.pipeLineSet.addError(\"Too many streams.\")\n stream = cls(container, self, streamID)\n container.append(stream)\n\n def select(self, streamSpec):\n \"\"\"Select a stream, e.g. 2\n aelf.output = self.output2\n Record currently selected stream.\n Affects callpipe connectors.\n streamSpec is ordinal or streamId.\"\"\"\n\n ## non-trivial callpipe example\n \"\"\"/* Convert userids to names */\n signal on novalue\n signal on error\n do forever\n 'readto in'\n parse var in userid . +8\n 'callpipe',\n ' cms namefind :userid' userid ':name ( file cottage',\n '|append literal ???',\n '|take 1',\n '|*:'\n end\n error: exit RC*(RC!=12)\"\"\"\n\n def callpipe(self, spec): # addpipe also\n \"\"\"Create a subroutine pipeline using the spec.\n Save the parent stage in and out stream connections.\n Attach the subroutine pipeline as needed to the in and out streams of the parent stage.\n Note that the subroutine pipeline does not have a dispatcher stage. It runs as part\n of the current pipeline.\n Connector syntax:\n |--*-+----------------------------------+--:(-1)---|\n +---+--------+-+-------------------+\n +-INput--+ +---+--------+---+\n +-OUTput-+ +-*------+\n +-stream-+\"\"\"\n subPipe = SimplePipeLine(self.pipeLineSet, spec, sub=True)\n # depending on connectors and currently selected stream\n inx = out = 0\n ## modify our inStream and the pipelet's instream\n self.inStreams.connect(inx, subPipe.initialStage.run1)\n\n ## modify our outStream and the pipelet's outStream\n self.outStreams.connect(out, subPipe.stages[-1].output1)\n\n\nclass ModifierStage(Stage):\n \"\"\"CASEI NOT ZONE\"\"\"\n\n modifier = True\n\n\nclass NullStage(Stage):\n \"\"\"stage factory returns instance when error in spec.\"\"\"\n\n def __init__(self):\n pass\n\n\nif __name__ == \"__main__\":\n # test importing & calling pipe command functions\n x = Stage(\"Stage x\", 0, 0)\n assert hasattr(x, \"addStream\")\n x.newaddStream(21) # first call should save things in pipeCommands.syntaxDict\n x.newaddStream(31) # subsequent call retrieves them\n","repo_name":"RossPatterson/python-pipelines","sub_path":"stage2.py","file_name":"stage2.py","file_ext":"py","file_size_in_byte":7256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14617435150","text":"from turtle import *\n\ncolors = ['red', 'blue', 'brown', 'yellow', 'grey']\n\nn=3\n\nfor i in range(len(colors)):\n\n for j in range(n):\n\n pencolor(colors[n-3])\n forward(100)\n left(180-((n-2)*180)/n)\n\n n+=1\n\ndone()","repo_name":"99banroimattroi/trinhdinhhung-fundamentals-c4e26","sub_path":"Session 3/Homework_S3_TrinhDinhHung_C4E26/Turtle_exercises_1.py","file_name":"Turtle_exercises_1.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23861985920","text":"\"\"\"Helper functions for the main Solver Flow\"\"\"\n\nimport pandas as pd\n\nfrom typing import Dict, List, Tuple, Union\nfrom mppsteel.config.mypy_config_settings import MYPY_SCENARIO_TYPE\n\nfrom mppsteel.plant_classes.plant_investment_cycle_class import PlantInvestmentCycle\nfrom mppsteel.plant_classes.capacity_constraint_class import PlantCapacityConstraint\nfrom mppsteel.utility.dataframe_utility import return_furnace_group\nfrom mppsteel.config.model_config import (\n MODEL_YEAR_START,\n INVESTMENT_OFFCYCLE_BUFFER_TOP,\n INVESTMENT_OFFCYCLE_BUFFER_TAIL,\n TECH_MORATORIUM_DATE,\n)\nfrom mppsteel.config.model_scenarios import TECH_SWITCH_SCENARIOS, SOLVER_LOGICS\nfrom mppsteel.config.reference_lists import (\n SWITCH_DICT,\n TECH_REFERENCE_LIST,\n TECHNOLOGY_PHASES,\n FURNACE_GROUP_DICT,\n)\nfrom mppsteel.data_preprocessing.tco_calculation_functions import (\n calculate_green_premium,\n)\nfrom mppsteel.model_solver.tco_and_abatement_optimizer import get_best_choice\nfrom mppsteel.plant_classes.plant_choices_class import PlantChoices\nfrom mppsteel.model_solver.material_usage_class import (\n MaterialUsage,\n create_material_usage_dict,\n)\nfrom mppsteel.utility.log_utility import get_logger\n\n\nlogger = get_logger(__name__)\n\n\ndef return_best_tech(\n tco_reference_data: pd.DataFrame,\n abatement_reference_data: pd.DataFrame,\n business_case_ref: dict,\n variable_costs_df: pd.DataFrame,\n green_premium_timeseries: pd.DataFrame,\n tech_availability: pd.DataFrame,\n tech_avail_from_dict: dict,\n plant_capacities: dict,\n scenario_dict: MYPY_SCENARIO_TYPE,\n investment_container: PlantInvestmentCycle,\n plant_choice_container: PlantChoices,\n capacity_constraint_container: PlantCapacityConstraint,\n material_usage_dict_container: MaterialUsage,\n year: int,\n plant_name: str,\n region: str,\n country_code: str,\n base_tech: str = None,\n transitional_switch_mode: bool = False,\n) -> str:\n \"\"\"Function generates the best technology choice from a number of key data and scenario inputs.\n\n Args:\n tco_reference_data (pd.DataFrame): DataFrame containing all TCO components by plant, technology and year.\n abatement_reference_data (pd.DataFrame): DataFrame containing all Emissions Abatement components by plant, technology and year.\n business_case_ref (dict): Standardised Business Cases.\n variable_costs_df (pd.DataFrame): Variable Costs DataFrame.\n green_premium_timeseries (pd.DataFrame): The timeseries containing the green premium values.\n tech_availability (pd.DataFrame): Technology Availability DataFrame\n tech_avail_from_dict (dict): A condensed version of the technology availability DataFrame as a dictionary of technology as key, availability year as value.\n plant_capacities (dict): A dictionary containing plant: capacity/inital tech key:value pairs.\n scenario_dict (dict): Scenario dictionary containing the model run's scenario settings.\n investment_container (PlantInvestmentCycle): The PlantInvestmentCycle Instance containing each plant's investment cycle.\n plant_choice_container (PlantChoices): The PlantChoices Instance containing each plant's choices.\n material_usage_dict_container (MaterialUsage): Container class object that is used to track the material usage within the application.\n year (int): The current model year to get the best technology for.\n plant_name (str): The plant name.\n region (str): The plant's region.\n country_code (str): The country code related to the plant.\n base_tech (str, optional): The current base technology. Defaults to None.\n transitional_switch_mode (bool, optional): Boolean flag that determines if transitional switch logic is active. Defaults to False.\n\n Raises:\n ValueError: If there is no base technology selected, a ValueError is raised because this provides the foundation for choosing a switch technology.\n\n Returns:\n str: Returns the best technology as a string.\n \"\"\"\n proportions_dict = TECH_SWITCH_SCENARIOS[str(scenario_dict[\"tech_switch_scenario\"])]\n solver_logic = SOLVER_LOGICS[str(scenario_dict[\"solver_logic\"])]\n tech_moratorium = bool(scenario_dict[\"tech_moratorium\"])\n enforce_constraints = bool(scenario_dict[\"enforce_constraints\"])\n green_premium_scenario = str(scenario_dict[\"green_premium_scenario\"])\n scenario_name = str(scenario_dict[\"scenario_name\"])\n regional_scrap = bool(scenario_dict[\"regional_scrap_constraint\"])\n\n tco_ref_data = tco_reference_data.copy()\n\n ## ## RECCOMMENDED TO RUN MODEL WITH green_premium_scenario SWITCHED OFF AS THIS FEATURE IS NOT FULLY TESTED.\n if green_premium_scenario != \"off\":\n logger.info(\"Running the model with green_premium_scenario switched off\")\n usd_to_eur_rate = float(scenario_dict[\"usd_to_eur\"])\n discounted_green_premium_values = calculate_green_premium(\n variable_costs_df,\n plant_capacities,\n green_premium_timeseries,\n country_code,\n plant_name,\n year,\n usd_to_eur_rate,\n )\n for technology in TECH_REFERENCE_LIST:\n for tco_col in [\"tco_regular_capex\", \"tco_gf_capex\"]:\n current_tco_value = tco_ref_data.loc[\n (year, country_code, technology), tco_col\n ]\n tco_ref_data.loc[(year, country_code, technology), tco_col] = (\n current_tco_value - discounted_green_premium_values[technology]\n )\n\n if not base_tech:\n raise ValueError(\n f\"Issue with base_tech not existing: {plant_name} | {year} | {base_tech}\"\n )\n\n if not isinstance(base_tech, str):\n raise ValueError(\n f\"Issue with base_tech not being a string: {plant_name} | {year} | {base_tech}\"\n )\n\n # Valid Switches\n combined_available_list: List[str] = [\n tech for tech in SWITCH_DICT if tech in SWITCH_DICT[base_tech]\n ]\n\n # Transitional switches\n if transitional_switch_mode and (base_tech not in TECHNOLOGY_PHASES[\"end_state\"]):\n # Cannot downgrade tech\n # Must be current or transitional tech\n # Must be within the furnace group\n combined_available_list = list(\n set(combined_available_list).intersection(\n set(return_furnace_group(FURNACE_GROUP_DICT, base_tech))\n )\n )\n\n # Availability checks\n combined_available_list = [\n tech\n for tech in combined_available_list\n if tech_availability_check(\n tech_availability, tech, year, tech_moratorium=tech_moratorium\n )\n ]\n\n # Add base tech if the technology is technically unavailable but is already in use\n if (base_tech not in combined_available_list) & (\n year < tech_avail_from_dict[base_tech]\n ):\n combined_available_list.append(base_tech)\n\n if transitional_switch_mode:\n cycle_length = investment_container.return_cycle_lengths(plant_name)\n # Adjust tco values based on transistional switch years\n tco_ref_data[\"tco_gf_capex\"] = (\n tco_ref_data[\"tco_gf_capex\"]\n * cycle_length\n / (\n cycle_length\n - (INVESTMENT_OFFCYCLE_BUFFER_TOP + INVESTMENT_OFFCYCLE_BUFFER_TAIL)\n )\n )\n\n best_choice = get_best_choice(\n tco_ref_data,\n abatement_reference_data,\n country_code,\n year,\n base_tech,\n solver_logic,\n scenario_name,\n proportions_dict,\n combined_available_list,\n transitional_switch_mode,\n regional_scrap,\n plant_choice_container,\n enforce_constraints,\n business_case_ref,\n plant_capacities,\n material_usage_dict_container,\n plant_name,\n region,\n )\n\n if not isinstance(best_choice, str):\n raise ValueError(\n f\"Issue with get_best_choice function returning a nan: {plant_name} | {year} | {base_tech} | {combined_available_list}\"\n )\n\n switch_type = \"Trans Switch\" if transitional_switch_mode else \"Main Switch\"\n\n capacity_constraint_container.update_potential_plant_switcher(\n year, plant_name, plant_capacities[plant_name], switch_type\n )\n\n if best_choice != base_tech:\n capacity_transaction_result = (\n capacity_constraint_container.subtract_capacity_from_balance(\n year, plant_name\n )\n )\n if not capacity_transaction_result:\n best_choice = base_tech\n\n else:\n capacity_constraint_container.remove_plant_from_waiting_list(year, plant_name)\n\n if enforce_constraints:\n create_material_usage_dict(\n material_usage_dict_container,\n plant_capacities,\n business_case_ref,\n plant_name,\n region,\n year,\n best_choice,\n regional_scrap=regional_scrap,\n override_constraint=True,\n apply_transaction=True,\n )\n\n return best_choice\n\n\ndef active_check_results(\n steel_plant_df: pd.DataFrame, year_range: range, inverse: bool = False\n) -> dict:\n \"\"\"Checks whether each plant in `steel_plant_df` is active for each year in `year_range`.\n\n Args:\n steel_plant_df (pd.DataFrame): The Steel Plant DataFrame.\n year_range (range): The year range used run each plant check for.\n inverse (bool, optional): Boolean that determines whether the reverse the order of the dictionary. Defaults to False.\n\n Returns:\n dict: A dictionary with the plant names as keys and the boolean active check values as values. Or inversed if `inverse` is set to True.\n \"\"\"\n\n def final_active_checker(row: pd.Series, year: int) -> bool:\n if year < row.start_of_operation:\n return False\n if row.end_of_operation and year >= row.end_of_operation:\n return False\n return True\n\n active_check: Dict[Union[int, str], Dict[Union[int, str], bool]] = {}\n if inverse:\n for year in year_range:\n active_check[year] = {}\n for row in steel_plant_df.itertuples():\n active_check[year][row.plant_name] = final_active_checker(row, year)\n return active_check\n else:\n for row in steel_plant_df.itertuples():\n active_check[row.plant_name] = {}\n for year in year_range:\n active_check[row.plant_name][year] = final_active_checker(row, year)\n return active_check\n\n\ndef resort_primary_switchers(\n primary_switchers_df: pd.DataFrame, waiting_list_dict: dict\n) -> pd.DataFrame:\n waiting_list_plants = waiting_list_dict.keys()\n just_waiting_list_plant_df = primary_switchers_df[\n primary_switchers_df[\"plant_name\"].isin(waiting_list_plants)\n ]\n not_waiting_list_plant_df = primary_switchers_df[\n ~primary_switchers_df[\"plant_name\"].isin(waiting_list_plants)\n ]\n return pd.concat(\n [just_waiting_list_plant_df, not_waiting_list_plant_df]\n ).reset_index(drop=True)\n\n\ndef get_current_technology(\n PlantChoiceContainer: PlantChoices,\n year: int,\n plant_name: str,\n year_founded: int,\n initial_technology: str,\n) -> str:\n current_tech = \"\"\n if (year == MODEL_YEAR_START) or (year == year_founded):\n current_tech = initial_technology\n else:\n current_tech = PlantChoiceContainer.get_choice(year - 1, plant_name)\n return current_tech\n\n\ndef create_solver_entry_dict(\n PlantChoiceContainer: PlantChoices,\n year: int,\n plant_name: str,\n current_tech: str,\n switch_tech: str,\n switch_type: str,\n update_record: bool = True,\n update_choice: bool = True,\n) -> dict:\n entry = {\n \"year\": year,\n \"plant_name\": plant_name,\n \"current_tech\": current_tech,\n \"switch_tech\": switch_tech,\n \"switch_type\": switch_type,\n }\n if update_record:\n PlantChoiceContainer.update_records(\"choice\", entry)\n if update_choice:\n PlantChoiceContainer.update_choice(year, plant_name, switch_tech)\n return entry\n\n\ndef return_initial_tech(initial_tech_ref: dict, plant_name: str) -> str:\n return initial_tech_ref[plant_name]\n\n\ndef split_primary_plant_switchers(\n primary_switchers_df: pd.DataFrame,\n PlantInvestmentCycleContainer: PlantInvestmentCycle,\n PlantChoiceContainer: PlantChoices,\n year: int,\n) -> Tuple[dict, dict, dict, dict]:\n closed_plants_current_techs = {}\n new_open_plants = {}\n main_cycle_plants = {}\n trans_switch_plants = {}\n for row in primary_switchers_df.itertuples():\n year_founded = PlantInvestmentCycleContainer.plant_start_years[row.plant_name]\n switch_type = PlantInvestmentCycleContainer.return_plant_switch_type(\n row.plant_name, year\n )\n current_tech = get_current_technology(\n PlantChoiceContainer,\n year,\n row.plant_name,\n year_founded,\n row.initial_technology,\n )\n if current_tech == \"Close plant\":\n closed_plants_current_techs[row.plant_name] = current_tech\n elif (year == year_founded) and (row.status == \"new model plant\"):\n new_open_plants[row.plant_name] = current_tech\n elif switch_type == \"main cycle\":\n main_cycle_plants[row.plant_name] = {\n \"current_tech\": current_tech,\n \"country_code\": row.country_code,\n \"region\": row.rmi_region,\n }\n elif switch_type == \"trans switch\":\n trans_switch_plants[row.plant_name] = {\n \"current_tech\": current_tech,\n \"country_code\": row.country_code,\n \"region\": row.rmi_region,\n }\n return (\n closed_plants_current_techs,\n new_open_plants,\n main_cycle_plants,\n trans_switch_plants,\n )\n\n\ndef map_technology_state(tech: str) -> str:\n \"\"\"Returns the technology phase according to a technology phases dictionary.\n\n Args:\n tech (str): The technology you want to return the technology phase for.\n\n Returns:\n str: The technology phase of `tech`.\n \"\"\"\n for tech_state in TECHNOLOGY_PHASES.keys():\n if tech in TECHNOLOGY_PHASES[tech_state]:\n return tech_state\n return \"\"\n\n\ndef read_and_format_tech_availability(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Formats the technology availability DataFrame.\n\n Args:\n df (pd.DataFrame): A Technology availability DataFrame.\n\n Returns:\n pd.DataFrame: A formatted technology availability DataFrame.\n \"\"\"\n df_c = df.copy()\n df_c.columns = [col.lower().replace(\" \", \"_\") for col in df_c.columns]\n df_c = df_c[\n ~df_c[\"technology\"].isin(\n [\"Close plant\", \"Charcoal mini furnace\", \"New capacity\"]\n )\n ]\n df_c[\"technology_phase\"] = df_c[\"technology\"].apply(\n lambda x: map_technology_state(x)\n )\n col_order = [\n \"technology\",\n \"main_technology_type\",\n \"technology_phase\",\n \"year_available_from\",\n \"year_available_until\",\n ]\n return df_c[col_order].set_index(\"technology\")\n\n\ndef tech_availability_check(\n tech_df: pd.DataFrame,\n technology: str,\n year: int,\n tech_moratorium: bool = False,\n default_year_unavailable: int = 2200,\n) -> bool:\n \"\"\"Checks whether a technology is available in a given year.\n\n Args:\n tech_df (pd.DataFrame): The technology availability DataFrame.\n technology (str): The technology to check availability for.\n year (int): The year to check whether a specified `technology` is available or not.\n tech_moratorium (bool, optional): Boolean flag that determines whether a specified technology is available or not. Defaults to False.\n default_year_unavailable (int): Determines the default year a given technology will not be available from - will be altered according to function logic. Defaults to 2200.\n\n Returns:\n bool: A boolean that determines whether a specified `technology` is available in the specified `year`.\n \"\"\"\n row = tech_df.loc[technology]\n year_available_from = row.loc[\"year_available_from\"]\n technology_phase = row.loc[\"technology_phase\"]\n year_available_until = default_year_unavailable\n\n if tech_moratorium and (technology_phase in [\"initial\", \"transitional\"]):\n year_available_until = TECH_MORATORIUM_DATE\n if int(year_available_from) <= int(year) < int(year_available_until):\n # Will be available\n return True\n if int(year) <= int(year_available_from):\n # Will not be ready yet\n return False\n if int(year) > int(year_available_until):\n # Will become unavailable\n return False\n return False\n","repo_name":"missionpossiblepartnership/mpp-steel-model","sub_path":"mppsteel/model_solver/solver_flow_helpers.py","file_name":"solver_flow_helpers.py","file_ext":"py","file_size_in_byte":16852,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"21671258122","text":"N = int(input())\nM = int(input())\ntxt = input()\nI = N + 1\nO = N\nTotal = I + O\ncompare = ''\nfor i in range(Total):\n if i % 2 == 0:\n compare += 'I'\n else:\n\n compare += 'O'\ncnt=0\nfor i in range(len(txt)):\n if txt[i:i + Total] ==compare:\n cnt+=1\nprint(cnt)","repo_name":"djs02027/python_algorithm-study","sub_path":"solve.ac/class 3/5525_IOIOI/5525_IOIOI(50점 부분 성공).py","file_name":"5525_IOIOI(50점 부분 성공).py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36365791866","text":"import numpy as np\nfrom function import as_array\nimport weakref\nfrom config import Config\nfrom parent import print\n\n# 변수 사용성 개선\n\nclass Variable():\n def __init__(self, data, name=None):\n if data is not None:\n if not isinstance(data, np.ndarray):\n raise TypeError('{}은(는) 지원하지 않습니다.'.format(type(data)))\n\n self.data = data\n self.name = name\n self.grad = None\n self.creator = None\n self.generation = 0\n\n def set_creator(self, func):\n self.creator = func\n self.generation = func.generation + 1\n\n def backward(self, retain_grad=False):\n if not self.grad:\n self.grad = np.ones_like(self.data)\n\n funcs = []\n seen_set = set()\n\n def add_func(f):\n if f not in seen_set:\n funcs.append(f)\n seen_set.add(f)\n funcs.sort(key=lambda x: x.generation)\n\n add_func(self.creator)\n \n while funcs:\n f = funcs.pop()\n gys = [output().grad for output in f.outputs]\n gxs = f.backward(*gys)\n if not isinstance(gxs, tuple):\n gxs = (gxs,)\n\n for x, gx in zip(f.inputs, gxs):\n if x.grad is None:\n x.grad = gx\n else:\n x.grad = x.grad + gx \n\n if x.creator is not None:\n add_func(x.creator)\n \n if not retain_grad:\n for y in f.outputs:\n y().grad = None\n\n def cleargrad(self):\n self.grad = None\n\n @property\n def shape(self):\n return self.data.shape\n\n @property\n def ndim(self):\n return self.data.ndim\n\n @property\n def size(self):\n return self.data.size\n\n @property\n def dtype(self):\n return self.data.dtype\n\n @property\n def shape(self):\n return self.data.shape\n\n def __len__(self):\n return len(self.data)\n\n def __repr__(self):\n if self.data is None:\n return 'Variable(None)'\n else:\n p = str(self.data).replace('\\n', '\\n'+ ' '*9)\n return f'Variable({p})'\n\n def __mul__(self, other):\n return mul(self, other)\n\n\nclass Function():\n def __call__(self, *inputs):\n xs = [x.data for x in inputs]\n ys = self.forward(*xs)\n if not isinstance(ys, tuple):\n ys = (ys,)\n outputs = [Variable(as_array(y)) for y in ys]\n \n if Config.enable_backprop: # 역전파 활성 모드\n self.generation = max([x.generation for x in inputs])\n for output in outputs:\n output.set_creator(self)\n \n self.inputs = inputs\n self.outputs = [weakref.ref(output) for output in outputs]\n \n return outputs if len(outputs) > 1 else outputs[0]\n \n def forward(self, x):\n raise NotImplementedError\n\n def backward(self, gy):\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n x = Variable(np.array([[1, 2, 3], [4, 5, 6]]), name='똥')\n \n print('x.name', x.name)\n print('x.shape', x.shape)\n print('x.ndim', x.ndim)\n print('x.size', x.size)\n print('x.dtype', x.dtype, '\\n')\n print('len(x)', len(x))\n print(x, '\\n')\n","repo_name":"star14ms/Deep_Learning_3","sub_path":"big_step2/step19.py","file_name":"step19.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25636256189","text":"from django.conf.urls import url\nfrom cric.views import *\nfrom cric.utils import match_schedule_generator\n\nurlpatterns = [\n # ---- urls for cric app ----#\n url(r'^rest-api/team-list/$', CricketTeamList.as_view(), name='rest-team-lists'),\n url(r'^rest-api/points-table/$', PointTableList.as_view(), name='rest-points-lists'),\n url(r'^rest-api/team-player-list/(?P[0-9a-f-]+)/$', TeamPlayerList.as_view(), name='rest-team-player-list'),\n url(r'^rest-api/match-list/$', MatchScheduleList.as_view(), name='rest-match-list'),\n url(r'^team-list/$', TeamListingView.as_view(), name='team-list'),\n url(r'^team-details/(?P[0-9a-f-]+)/$', TeamDetailsView.as_view(), name='team-details'),\n url(r'^points-table/$', PointsTableListingView.as_view(), name='points-table'),\n url(r'^match-schedule-generator/$', Matchschedule.as_view(), name='match-schedule-generator'),\n]","repo_name":"AbhiRawat95/CricketTeamAssignment","sub_path":"cric/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23297640189","text":"import os\nimport re\n\nfrom model.article import Article\nfrom read_file.word_util import ChineseToDate\n\n\n# 把所有文章读取分配到模型中\ndef read_file(file_dir_path):\n # 读取全部文章\n articles = []\n files = os.listdir(file_dir_path)\n write_time = None\n background = \"\"\n content = \"\"\n annotation = \"\"\n for file in files:\n print(file.title())\n if file.title().__contains__(\"Store\"):\n continue\n title = get_article_title(file.title())\n back_line = 0\n anno_line = 0\n lines = open(file_dir_path + \"/\" + file, \"r\", encoding=\"utf-8\").readlines()\n for i in range(lines.__len__()):\n lines[i] = lines[i].strip()\n line = lines[i]\n if line.isspace():\n continue\n if match_chinese_data(line):\n write_time = ChineseToDate(line.strip().replace(\" \",\"\"))\n continue\n # 背景\n if line.startswith(\">\"):\n background = line.replace(\">\", \"\").strip()\n back_line = i\n continue\n # 正文之后是注释, 跳过分割线\n if match_division(line):\n anno_line = i\n continue\n if anno_line == 0:\n anno_line = len(lines)\n content = \"\".join(lines[back_line+1: anno_line])\n annotation = \"\".join(lines[anno_line+1: len(lines)])\n\n article = Article(title, write_time, background, content, annotation)\n title = \"\"\n write_time = None\n background = \"\"\n content = \"\"\n annotation = \"\"\n articles.append(article)\n\n return articles\n\n\n# 从md标题获取文章标题\ndef get_article_title(file_title):\n pattern = \"[0-9]{3}-(.*)\\.[mM]d\"\n return re.findall(pattern, file_title)[0]\n\n\n# 正则判断是否是分割线\ndef match_division(line):\n pattern = \"\\-{1,50}\"\n result = re.match(pattern, line.strip())\n return result is not None\n\n\n# 判断是否是注释开始\ndef match_annotation(line):\n pattern = \"注  释\"\n return line.strip() == pattern\n\n\n# 正则判断是否是中文的时间\ndef match_chinese_data(line):\n pattern1 = \"[((]一九[一二三四五六七八九○]{2}年[一二三四五六七八九十]{1,2}月([一二三四五六七八九十]{1,3}日)?[))]\"\n #pattern2 = \"(一九[一二三四五六七八九○]{2}年[一二三四五六七八九十]{1,2}月)\"\n return re.match(pattern1, line.strip().replace(\" \", \"\")) is not None\n #or re.match(pattern2, line.strip().replace(\" \", \"\"))\n\n\nif __name__ == \"__main__\":\n print(match_chinese_data(\"(一九五二年一月一日)\"))\n # print(match_chinese_data(\"(一九四五年八月十一日)\"))\n arts = read_file(os.path.abspath(\"..\") + \"/test\")\n for ar in arts:\n print(ar.title)\n print(ar.write_time)\n print(ar.background)\n print(ar.content)\n print(ar.annotation)\n print(\"--------------------\")\n # print(arts.__len__())\n # print(ChineseToDate(\"一九四六年十月三日\"))\n # print(ChineseToDate(\"一九四六年十一月\"))\n\n","repo_name":"steveliu13/maoxuan_analyse","sub_path":"read_file/file_parser.py","file_name":"file_parser.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14363574535","text":"import asyncio\nimport signal\nimport sys\nimport uuid\nfrom datetime import datetime\n\nimport tornado.gen\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.log\nimport tornado.options\nimport tornado.web\n\n\ntornado.options.define(\"port\", default=8888, type=int)\n\nGRACEFUL_SHUTDOWN_CAP_TIMEOUT = 20 # seconds\nGRACEFUL_SHUTDOWN_STEP_TIMEOUT = 5 # seconds\n\nLOG = tornado.log.gen_log\n\n\nasync def heavy_task():\n res = datetime.utcnow()\n await asyncio.sleep(13)\n return str(res)\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n\n def initialize(self):\n self.request_id = uuid.uuid4() # noqa\n LOG.info(\"Request %s started\", self.request_id)\n\n async def prepare(self):\n if self.application.is_shutdown_in_progress():\n LOG.warning(\"Request %s aborted\", self.request_id)\n raise tornado.web.HTTPError(503)\n\n self.application.add_request(self.request_id)\n\n def on_finish(self):\n self.application.remove_request(self.request_id)\n LOG.info(\"Request %s finished\", self.request_id)\n\n\nclass MainHandler(BaseHandler):\n\n async def get(self):\n res = await heavy_task()\n self.write({\"res\": res})\n\n\nclass MyApplication(tornado.web.Application):\n\n active_requests = set()\n sigterm_dt = None\n\n def is_shutdown_in_progress(self):\n return bool(self.sigterm_dt)\n\n def add_request(self, request_id):\n self.active_requests.add(request_id)\n\n def remove_request(self, request_id):\n self.active_requests.discard(request_id)\n\n async def graceful_shutdown(self):\n self.sigterm_dt = datetime.utcnow()\n\n while True:\n if not self.active_requests:\n LOG.info(\"Graceful shutdown: exiting, no active requests.\")\n self.shutdown(exit_code=0)\n\n delta_sec = (datetime.utcnow() - self.sigterm_dt).total_seconds()\n if delta_sec > GRACEFUL_SHUTDOWN_CAP_TIMEOUT:\n LOG.warning(\"Graceful shutdown: timeout exceeded, exiting. \"\n \"%d active request(s)\", len(self.active_requests))\n self.shutdown(exit_code=1)\n\n LOG.info(\"Graceful shutdown: waiting, %d active request(s)\",\n len(self.active_requests))\n\n await asyncio.sleep(GRACEFUL_SHUTDOWN_STEP_TIMEOUT)\n\n @staticmethod\n def shutdown(exit_code=0):\n tornado.ioloop.IOLoop.instance().stop()\n sys.exit(exit_code)\n\n\napplication = MyApplication([\n (r\"/\", MainHandler),\n])\n\n\ndef shutdown_handler(signum, frame):\n LOG.warning(\"SIGTERM signal received\")\n tornado.ioloop.IOLoop.instance().add_callback_from_signal(\n application.graceful_shutdown)\n\n\ndef main():\n tornado.options.parse_command_line()\n signal.signal(signal.SIGTERM, shutdown_handler)\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(tornado.options.options.port)\n tornado.ioloop.IOLoop.current().start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Etoneja/tensor_school","sub_path":"11/backend/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13747365344","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, QRadioButton, QComboBox, QSlider, \\\n QPushButton\nfrom PyQt5.QtCore import Qt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy import optimize\n\n\nclass Polynomial:\n\n def __init__(self, *coefficients):\n \"\"\" input: coefficients are in the form a_n, ...a_1, a_0\n \"\"\"\n self.coefficients = list(coefficients) # tuple is turned into a list\n\n def __repr__(self):\n \"\"\"\n method to return the canonical string representation\n of a polynomial.\n \"\"\"\n return \"Polynomial\" + str(tuple(self.coefficients))\n\n def __str__(self):\n\n def x_expr(degree):\n if degree == 0:\n res = \"\"\n elif degree == 1:\n res = \"x\"\n else:\n res = \"x^\" + str(degree)\n return res\n\n degree = len(self.coefficients) - 1\n res = \"\"\n\n for i in range(0, degree + 1):\n coeff = self.coefficients[i]\n # nothing has to be done if coeff is 0:\n if abs(coeff) == 1 and i < degree:\n # 1 in front of x shouldn't occur, e.g. x instead of 1x\n # but we need the plus or minus sign:\n res += f\"{'+' if coeff > 0 else '-'}{x_expr(degree - i)}\"\n elif coeff != 0:\n res += f\"{coeff:+g}{x_expr(degree - i)}\"\n\n return res.lstrip('+') # removing leading '+'\n\n def __call__(self, x):\n res = 0\n for coeff in self.coefficients:\n res = res * x + coeff\n return res\n\n\ndef main():\n app = QApplication(sys.argv)\n ex = Main()\n ex.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MichalSzczekocki/SIOO_lab2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72634701052","text":"import cv2\nimport mediapipe as mp\nimport time # to check the frame rate\n\ncap = cv2.VideoCapture(0)\n\nmpHands = mp.solutions.hands\nhands = mpHands.Hands()\nmpDraw = mp.solutions.drawing_utils\n\npTime = 0\ncTime = 0\n\nwhile True:\n success, img = cap.read()\n imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n results = hands.process(imgRGB)\n #print(results.multi_hand_landmarks) # to Check whether it detects a hand\n\n #Extracting each hand\n if results.multi_hand_landmarks:\n for handLms in results.multi_hand_landmarks:\n #Get the information of hands. Landmarks information(x&Y coordinates) and ID number\n for id,lm in enumerate(handLms.landmark):\n #print(id,'\\n',lm)\n h,w,c = img.shape\n cx,cy = int(lm.x*w),int(lm.y*h)\n print(id,cx,cy) #Get the pixel location\n # if id==0:\n # cv2.circle(img,(cx,cy),10,(255,0,0),cv2.FILLED) #Drawing a Circle on the ID 0 location\n mpDraw.draw_landmarks(img,handLms,mpHands.HAND_CONNECTIONS)\n\n cTime = time.time()\n fps = 1/(cTime - pTime)\n pTime = cTime\n\n cv2.putText(img,str(int(fps)),(10,70),cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)\n\n\n cv2.imshow(\"image\",img)\n cv2.waitKey(1)\n\n","repo_name":"ThejakaSEP/HandTracking","sub_path":"HandTrackingMin.py","file_name":"HandTrackingMin.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4892296978","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\nProject Name: Pandas\nFile Name: 07时间日期.py\nAuthor: lsy\nCreate Date: 2021-11-23\n-------------------------------------------------\n\"\"\"\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# 0.读取数据\nfpath = \"res/beijing_tianqi_2018.csv\"\ndf = pd.read_csv(fpath)\n# 替换掉温度的后缀℃\ndf.loc[:, \"bWendu\"] = df[\"bWendu\"].str.replace(\"℃\", \"\").astype('int32')\ndf.loc[:, \"yWendu\"] = df[\"yWendu\"].str.replace(\"℃\", \"\").astype('int32')\ndf.head()\n\n# 1.转换日期格式\npd.to_datetime(df[\"ymd\"])\ndf.set_index(pd.to_datetime(df[\"ymd\"]), inplace=True)\ndf.head()\ndf.index[0]\n\n# 2.查询\n# 筛选固定的某一天\ndf.loc['2018-01-05']\n# 日期区间\ndf.loc['2018-01-05':'2018-01-10']\n# 按月份前缀筛选\ndf.loc['2018-03']\n# 按月份前缀筛选\ndf.loc[\"2018-07\":\"2018-09\"]\n# 按年份前缀筛选\ndf.loc[\"2018\"]\n\n# 3.获取周、月、季度\n# 周数字列表\ndf.index.week\n# 月数字列表\ndf.index.month\n# 季度数字列表\ndf.index.quarter\n\n# 4.按照周、月、季度统计\n# 统计每周的数据\ndf.groupby(df.index.week)[\"bWendu\"].max()\n# 统计每个月的数据\ndf.groupby(df.index.month)[\"bWendu\"].max()\n# 统计每个季度的数据\ndf.groupby(df.index.quarter)[\"bWendu\"].max()\n\n# 二、数据缺失\ndf = pd.DataFrame({\n \"pdate\": [\"2019-12-01\", \"2019-12-02\", \"2019-12-04\", \"2019-12-05\"],\n \"pv\": [100, 200, 400, 500],\n \"uv\": [10, 20, 40, 50],\n})\n\n\ndf # 缺失了2019-12-03的数据\ndf.set_index(\"pdate\").plot()\nplt.show()\n\n# 方法1:reindex\ndf_date = df.set_index(\"pdate\")\ndf_date\ndf_date.index\n# 1.将df的索引设置为日期索引\ndf_date = df_date.set_index(pd.to_datetime(df_date.index))\ndf_date\ndf_date.index\n# 2.使用pandas.reindex填充缺失的索引\n# 生成完整的日期序列\npdates = pd.date_range(start=\"2019-12-01\", end=\"2019-12-05\")\npdates\n# 填充0\ndf_date_new = df_date.reindex(pdates, fill_value=0)\ndf_date_new\ndf_date_new.plot() # 画图\nplt.show()\n\n# 方法2:resample\n# 1.先将索引变成日期索引\ndf_new2 = df.set_index(pd.to_datetime(df[\"pdate\"])).drop(\"pdate\", axis=1)\ndf_new2\ndf_new2.index\n# 2.使用resample的方法按照天重采样\n# 由于采样会让区间变成一个值,所以需要指定mean等采样值的设定方法\ndf_new2 = df_new2.resample(\"D\").mean().fillna(0)\ndf_new2\n# resample的使用方式\ndf_new2.resample(\"2D\").mean()","repo_name":"liusiyuan1111/Data_Analytics","sub_path":"Pandas/07时间日期.py","file_name":"07时间日期.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33630478955","text":"import streamlit as st\nfrom statsmodels.stats.proportion import proportion_confint\nst.title('Business Ads Costing Example DSI')\nst.write('Use this app to figure out how good is the spam takedown machine on FB')\n\nnum_spam_takedowns = st.number_input('Number of spam takedown samples', value=500)\nnum_successes = st.number_input('Est number of successful takedown samples', value=300)\nsample_cost = st.number_input('Cost of 1 Sample in $', value=1)\ntime_window = st.selectbox('How often do you want this metric?', ['Weekly','Biweekly','Monthly'])\n#inputs\n#confidence interval\n#get the estimated precision of the group\nupper_val, lower_val = proportion_confint(count=num_successes, nobs=num_spam_takedowns)\nconfint_width = abs(upper_val - lower_val)\ncost = num_spam_takedowns * sample_cost\nif time_window == 'Weekly':\n cost = cost*52\nelif time_window == 'Monthly':\n cost = cost*26\nst.write(upper_val)\nst.write(lower_val)\nst.write(confint_width)\nst.write(cost)","repo_name":"tylerjrichards/DSI_demo","sub_path":"live_coding_file.py","file_name":"live_coding_file.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31752595199","text":"from fractions import Fraction as frac\n\ndef mixed_fraction(s):\n #for negative fractions\n if '-' in s:\n if s.count('-') == 1:\n s = s.replace('-','')\n\n f = frac(s)\n n = f.numerator\n d = f.denominator\n int_ = n//d\n frac_n = n%d\n frac_d = d\n return f'-{int_} {frac_n}/{frac_d}'\n f = frac(s)\n n = f.numerator\n d = f.denominator\n int_ = n//d\n frac_n = n%d\n frac_d = d\n if n == 0:\n return '0'\n # for positive fractions\n if n > 0 and d > 0:\n if frac_n == 0:\n return f'{int_}'\n return f'{int_} {frac_n}/{frac_d}'\n","repo_name":"2022practice/codewars","sub_path":"5kyu/simple_fraction_to_mixed_number_converter/simple_fraction_to_mixed_number_converter_00.py","file_name":"simple_fraction_to_mixed_number_converter_00.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43918319390","text":"# Listing 1-13. Example code for identity operators\nfrom typing import List\n\nvar1: int = 5\nvar1 = 5\n\nvar2: str = 'Hello'\nvar2 = 'Hello'\n\nvar3: List[int] = [1, 2, 3]\nvar3 = [1, 2, 3]\n\nprint(var1 is not var1)\nprint(var2 is var2)\nprint(var3 is var3)\n","repo_name":"narayansiddharth/Python","sub_path":"MasteringMachineLearningWithPythonInSixSteps/Chapter1/1-13-IdentityOperator.py","file_name":"1-13-IdentityOperator.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33894731102","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC # Создадим необходимые для задания датафреймы \n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import col, explode, split, when\n\n# COMMAND ----------\n\ndf_products = spark.createDataFrame([\n (1, \"Cat\"),\n (2, \"Dog\"),\n (3, \"Pen\"),\n (4, \"Paper\"),\n (5, \"Rock\"),\n (6, \"Scissor\"),\n (7, \"Shrek\"),\n (8, \"Ball\"),\n (9, \"PS4\"),\n (10, \"Coffee\")\n], [\"product_id\", \"product_name\"])\n\ndf_categories = spark.createDataFrame([\n (1, \"Animals\"),\n (2, \"Toys\"),\n (3, \"Misc\"),\n (4, \"Music\"),\n (5, \"Fun\"),\n (6, \"Films\"),\n (7, \"Games\"),\n (8, \"Food\"),\n (9, \"Tech\"),\n (10, \"Drinks\")\n], [\"category_id\", \"category_name\"])\n\ndf_relationships = spark.createDataFrame([\n (\"1\", \"1\"),\n (\"2\", \"2\"),\n (\"3\", \"3\"),\n (\"4\", None),\n (\"5\", \"4 5\"),\n (\"6\", \"6 7\"),\n (\"7\", \"7 8\"),\n (\"8\", \"8 9\"),\n (\"9\", \"9 10\"),\n (\"10\", \"10 1 2\")\n], [\"product_id\", \"category_ids\"])\n\ndf_products.show()\ndf_categories.show()\ndf_relationships.show()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Видоизменим df_relationships, разбив каждую строку со множестовом category_ids (one-to-many) так, чтобы у нас появились пары product_id - category_id (one-to-one)\n\n# COMMAND ----------\n\ndf_relationships = df_relationships.withColumn(\"category_id\", explode(split(col(\"category_ids\"), \"\\\\s+\"))).drop(\"category_ids\")\n\ndf_relationships.show()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Объеденим все таблицы в одну по id и выберем из нее нужные нам столбцы \n\n# COMMAND ----------\n\ndf_product_category = df_products.join(df_relationships, on=\"product_id\", how=\"left\") \\\n .join(df_categories, on=\"category_id\", how=\"left\") \\\n .select(col(\"product_name\"), col(\"category_name\"))\n\ndf_product_category.show()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Обработаем null значения в category_name\n\n# COMMAND ----------\n\ndf_product_category = df_product_category.withColumn(\"category_name\", when(col(\"category_name\").isNull(), \"No Category\").otherwise(col(\"category_name\")))\ndisplay(df_product_category)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Таблица готова\n","repo_name":"Nikovr/Product-Category","sub_path":"Product - Category.py","file_name":"Product - Category.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5688211555","text":"class Solution:\n def ladderLength(self, start: str, end: str, dictionary: List[str]) -> int:\n dict = set(dictionary)\n visited = {start}\n queue = collections.deque([start])\n steps = 0\n while queue:\n steps += 1\n for _ in range(len(queue)):\n word = queue.popleft()\n if word == end:\n return steps\n for next_word in self.next_words(word):\n if next_word in dict and next_word not in visited:\n queue.append(next_word)\n visited.add(next_word)\n\n return 0\n\n def next_words(self, word):\n words = []\n for i in range(len(word)):\n left, right = word[:i], word[i + 1:]\n for char in \"abcdefghijklmnopqrstuvwxyz\":\n if word[i] == char:\n continue\n words.append(left + char + right)\n return words","repo_name":"raymond212/leetcode-solutions","sub_path":"python/0127_Word_Ladder.py","file_name":"0127_Word_Ladder.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"32016258311","text":"from hello import db\n\n\n\"\"\"\n데이터베이스가 이미 존재할 경우 \n테이블을 새로 생성하거나 업데이트 하지 않기에\n데이터베이스를 지우고 다시 만든다.\n\"\"\"\ndb.drop_all()\ndb.create_all() # database 생성\n\n\n\"\"\"행 추가\"\"\"\nfrom hello import Role, User\nadmin_role = Role(name='Admin')\nmod_role = Role(name='Moderator')\nuser_role = Role(name='User')\nuser_john = User(username='john', role=admin_role)\nuser_susan = User(username='susan', role=user_role)\nuser_david = User(username='david', role=user_role)\n\n\n\"\"\"\n오브젝트는 단지 파이썬에만 존재하며 아직 데이터베이스는 작성되지 않았다.\nid값이 아직 설정되지 않았기 때문\n\"\"\"\nprint(admin_role.id)\nprint(mod_role.id)\nprint(user_role.id)\n\n\n\"\"\" 데이터베이스에 대한 변경 사항은 데이터베이스 세션을 통해 관리됨. 데이터베이스 작성을 위해\n오브젝트를 준비하기 위해서는 오브젝트가 세션에 추가되어야 한다.\"\"\"\ndb.session.add(admin_role)\ndb.session.add(mod_role)\ndb.session.add(user_role)\ndb.session.add(user_john)\ndb.session.add(user_susan)\ndb.session.add(user_david)\n\n\"\"\"이런식으로 한꺼번에 저장이 가능하니, 캐싱했다가 모아서 보내서 성능향상하는 것도 가능할듯?\"\"\"\n\"\"\"\ndb.session.add_all([admin_role, mod_role, user_role, \n user_john, user_susan, user_david])\n\"\"\"\n\n\"\"\" 디비에 오브젝트를 작성하기 위해서는 세션이 commit() 메소드를 호출하여 컴닛해야 한다.\"\"\"\ndb.session.commit()\n\nprint(admin_role.id)\nprint(mod_role.id)\nprint(user_role.id)\n\n\n\"\"\"db.session의 add()는 모델을 업데이트하는 데도 사용됨\"\"\"\n\nprint(\"Before modifying: %r\" % (admin_role.name))\nadmin_role.name = 'Administrator'\ndb.session.add(admin_role)\ndb.session.commit()\n\nprint(\"After modifying: %r\" % (admin_role.name))\n\n\n\"\"\"role 삭제\"\"\"\nprint(\"Before deleting: %r\" % (mod_role.id))\ndb.session.delete(mod_role)\ndb.session.commit()\n\nprint(\"After deleting: %r\" % (mod_role.id))\n\n\n\n\n\n\n","repo_name":"EminentStar/flask-web-oreily","sub_path":"before_large_application_architecture/db_practice.py","file_name":"db_practice.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15491047616","text":"import itertools\r\n\r\nnumbers = input().split(', ')\r\nn = len(numbers)\r\n\r\npermutations = set(itertools.permutations(['-'] * n + ['+'] * n, n))\r\n\r\nfor perm in permutations:\r\n expr = ''.join(itertools.chain(*zip(perm, numbers)))\r\n print(f\"{expr}={eval(expr)}\")\r\n","repo_name":"ayk-dev/python-advanced","sub_path":"05-functions/expressions.py","file_name":"expressions.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1116548414","text":"from openerp.osv import osv, fields, orm\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\nimport psycopg2\nfrom openerp import netsvc\n\n\nclass translation_price(osv.osv):\n _name=\"translation.price\"\n _description = \"Prices forming\"\n \n def digitron(self, cr, uid, ids, origin, parent, discount, price, context=None):\n if not parent : return True \n if context is None : context={}\n base=self.pool.get('translation.price').browse(cr, uid, parent).price\n res={}\n \n if origin =='price':\n if round((base*(100-discount)/100),2) == price:\n return True\n else :\n res['discount'] = 100-(price*100/base)\n elif origin == 'discount':\n if price == 0: price = base\n if round(100-(price*100/base),2) == discount:\n return True\n else : \n res['price'] = base*(100-discount)/100\n return {'value':res}\n \n def get_default_price(self, cr, uid, context=None):\n return self.pool.get('translation.price').search(cr, uid, [])[0]\n \n _columns = {\n 'name':fields.char('Price name', size=64),\n 'parent_id':fields.many2one('translation.price','Price base', 'Base price used for calculating actual price'),\n 'child_ids':fields.one2many('translation.price','parent_id', string=\"Child product price\"),\n 'price':fields.float('Price', digits_compute=dp.get_precision('Product Price'), help=\"Price for translation\"),\n 'discount_name':fields.char('Discount description', size=128),\n 'discount':fields.float('Discount %' ,digits_compute=dp.get_precision('Product Price'), help=\"Percentage as number (20)% (not decimal 0,2)!\"),\n }\n \n \n def name_get(self, cr, uid, ids, context=None):\n res=[]\n for r in self.read(cr, uid, ids,['name','price']):\n res.append((r['id'],'%s - %s' %(r['price'],r['name'])))\n return res\n \nclass translation_evidention(osv.Model):\n _inherit = 'translation.evidention'\n \n _get_product_type = [(1,'Task=Product (translate and lectoring)'),\n (2,'(Task=Product Translate, Product Lectoring)'),\n (3,'Document=Product (aaa trans to bbb,ccc) (T+L)'),\n (4,'(Document=Product Translate, Product Lector)'),\n (5,'(Single product, all tasks in description)')]\n \n def onchange_partner_id(self, cr, uid, ids, partner, context=None):\n if not partner:\n return {'value': {'payment_term': False, 'fiscal_position': False}}\n if ids and ids[0]:\n self.write(cr, uid, ids[0],{})\n part = self.pool.get('res.partner').browse(cr, uid, partner, context=context)\n payment_term = part.property_payment_term and part.property_payment_term.id or False\n fiscal_position = part.property_account_position and part.property_account_position.id or False\n return {'value': {'payment_term': payment_term, 'fiscal_position': fiscal_position}}\n \n _columns = {\n 'marketing_id':fields.many2one('translation.marketing','Marketing'),\n 'price_id':fields.many2one('translation.price','Price template'),\n 'product_id':fields.one2many('translation.product','evidention_id','Translations'),\n 'product_type':fields.selection(_get_product_type, 'Product type', help=\"Rules for generating and invoicing translation\", required=1),\n 'avans':fields.float('Advace ammount',digits_compute=dp.get_precision('Product Price')),\n 'so_ids':fields.many2many('sale.order','translation_evidention_so_rel','translation_evidention_id','sale_order_id','Sale orders'),\n 'company_id':fields.many2one('res.company','Company'),\n 'fiscal_position':fields.many2one('account.fiscal.position', 'Fiscal position'),\n 'payment_term':fields.many2one('account.payment.term', 'Payment term')\n #'orders':fields.one2many('sale.order', 'id', '' )\n }\n \n _defaults = {\n 'price_id': 1,\n 'product_type' :1,\n 'company_id': 1 #TODO MULTICOMPANY...\n }\n \n def evidention_invoice_generate(self, cr, uid, ids, context=None):\n if context==None : \n context={}\n invoice_obj = self.pool.get('account.invoice')\n \n \n \n for evidention in self.browse(cr, uid, ids):\n inv_vals = self._prepare_invoice_vals(cr, uid, evidention)\n inv = invoice_obj.create(self, cr, uid, inv_vals)\n \n for prod in evidention.product_id:\n line_vals=self._prepare_invoice_line(cr, uid, evidention, prod, inv)\n return True\n \n def _prepare_invoice_line(self, cr, uid, evidention, prod, inv, context=None):\n uos = self.pool.get('product.uom').search(cr, uid, [('name', '=', 'card')] )[0]\n values={\n 'account_id':prod.product_id.property_account_income.id,\n 'name':prod.description, #description\n 'invoice_id':inv,\n 'price_unit':prod.price,\n 'company_id':1, #TODO MULTI COMPANY!\n 'discount':prod.discount,\n 'quantity':prod.units,\n 'uos_id': uos,\n 'invoice_line_tax_id': [(6, 0, [x.id for x in prod.tax_ids])],\n 'partner_id':evidention.partner_id.id,\n 'product_id':prod.product_id.id,\n }\n \n def _prepare_invoice_vals(self, cr, uid, evidention, context=None):\n user_obj=self.pool.get ('res.users').browse(cr, uid, uid)\n invoice_vals = {\n 'name': '',\n 'origin': evidention.name,\n 'type': 'out_invoice',\n 'reference': evidention.name,\n 'account_id': evidention.partner_id.property_account_receivable.id,\n 'partner_id': evidention.partner_id.id,\n #ovo uzmem iz defaulta od usera\n 'journal_id': user_obj.journal_id,\n 'uredjaj_id':user_obj.uredjaj_id,\n #'nac_plac':(),\n #'invoice_line': [(6, 0, lines)],\n 'currency_id': 1, #order.pricelist_id.currency_id.id,\n 'comment': evidention.note or '',\n 'payment_term': evidention.payment_term and evidention.payment_term.id or False,\n 'fiscal_position': evidention.partner_id.property_account_position.id,\n #'date_invoice': context.get('date_invoice', False),\n 'company_id': 1, # TODO MULTI! evidention.company_id.id,\n 'user_id': uid}\n return invoice_vals\n \n \n \n def check_translated_cards(self, cr, uid, ids, context=None):\n for evidention in self.browse(cr, uid, ids):\n for prod in evidention.product_id:\n if prod.units != prod.task_id.trans_cards:\n prod.write({'units':prod.task_id.trans_cards})\n \n return True\n \n def evidention_quotation_generate(self, cr, uid, ids, context=None):\n for evidention in self.browse(cr, uid, ids):\n so = self.create_sale_order(cr, uid, evidention)\n for product in evidention.product_id:\n self.create_sale_order_line(cr, uid, so, product, evidention.fiscal_position.id)\n view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')\n view_id = view_ref and view_ref[1] or False,\n return {\n 'type': 'ir.actions.act_window',\n 'name': _('Sales Order'),\n 'res_model': 'sale.order',\n 'res_id': so,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': view_id,\n 'target': 'current',\n 'nodestroy': True,\n }\n \n def create_sale_order_line(self, cr, uid, so, product, fiscal_position=False, context=None ):\n uom_id = self.pool.get('product.uom').search(cr, uid, [('name', '=', 'card')] )[0]\n taxes=[]\n for tax in product.tax_ids:\n taxes.append(tax.id) \n values = {'name':product.description,\n 'order_id':so,\n 'product_id':product.product_id.id,\n 'price_unit':product.price, \n 'product_uom':uom_id,\n 'product_uos':uom_id,\n 'tax_id':taxes and [(6,0,taxes)] or False,\n 'product_uom_qty':product.units,\n 'product_uos_qty':product.units,\n 'discount':product.discount,\n }\n return self.pool.get('sale.order.line').create(cr, uid, values)\n \n def create_sale_order(self, cr, uid, evidention, fis_position=None, payment=None, context=None):\n values = {\n 'partner_id':evidention.partner_id.id,\n 'partner_invoice_id':evidention.partner_id.id,\n 'partner_shipping_id':evidention.partner_id.id,\n 'fiscal_position':evidention.fiscal_position.id,\n 'payment_term':evidention.payment_term.id,\n 'pricelist_id': 1, #pricelist, \n 'origin':evidention.ev_sequence,\n 'trans_evid_ids':[(6,0,[evidention.id])]\n }\n return self.pool.get('sale.order').create(cr, uid, values)\n \n def _get_partner_fiscal_position(self, cr, uid, partner, context=None):\n if not partner:\n return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}}\n part = self.pool.get('res.partner').browse(cr, uid, partner, context=context)\n payment_term = part.property_payment_term and part.property_payment_term.id or False\n fiscal_position = part.property_account_position and part.property_account_position.id or False\n fis_pos = part.property_account_position or False\n return fiscal_position, fis_pos, payment_term\n \n def evidention_products_generate(self, cr, uid, ids, context=None):\n for evidention in self.browse(cr, uid, ids):\n if not evidention.product_id:\n new_product_list= self._generate_product_list(cr, uid, ids, evidention)\n self._create_translation_products(cr, uid, new_product_list)\n return True\n \n def _create_translation_products(self, cr, uid, new_product_list, context=None ):\n trans_product = self.pool.get('translation.product')\n for product in new_product_list:\n prod_id = self._create_product_product(cr, uid, product, context=None) #radim pravi proizvod\n tprod_vals={\n 'name':product['name'],\n 'description':product['description'],\n 'price_id':product['price_id'],\n 'price':product['price'],\n 'units':product['units'],\n 'tax_ids':[(6,0, product['tax_ids'])],\n 'evidention_id':product['evidention_id'],\n 'document_id':product['document_id'],\n 'task_id':product['task_id'],\n 'product_type':product['product_type'],\n 'product_id':prod_id,\n 'partner_id':product['partner_id'],\n }\n trans_product.create(cr, uid, tprod_vals)\n return True\n \n def _create_product_product(self, cr, uid, product, context=None):\n template= self._create_product_template(cr, uid, product)\n prod_vals = {\n 'product_tmpl_id':template,\n 'name_template':product['name'],\n }#'default_code':product.name,\n return self.pool.get('product.product').create(cr, uid, prod_vals)\n \n def _create_product_template(self, cr, uid, product, context=None):\n uom_id=self.pool.get('product.uom').search(cr, uid, [('name', '=', 'card')] )[0]\n ir_values = self.pool.get('ir.values')\n product['tax_ids'] = ir_values.get_default(cr, uid, 'product.product', 'taxes_id', company_id=1)\n \n assert product['tax_ids'] , 'You need to define default taxes'\n prod_template = {\n 'name':product['name'],\n 'description': product['description'],\n 'uom_id':uom_id,\n 'uos_id':uom_id,\n \n 'uom_po_id':uom_id,\n 'tax_ids':[(6,0, product['tax_ids'])],\n 'list_price':product['price'],\n 'type':'service'\n }#'category_id': kategorija,\n return self.pool.get('product.template').create(cr, uid, prod_template)\n \n def _generate_product_list(self, cr ,uid, ids, evidention, prod_type=None, context=None):\n if prod_type==None:\n prod_type=evidention.product_type\n product_list=[]\n for document in evidention.document_ids: \n \n #description2 = _(\"%s %s from %s to \") %(work, document.name, document.language_id.trans_from)\n desc_languages = \"\"\n for task in document.task_ids:\n desc_languages += ', ' + task.language_id.trans_to\n if prod_type == 1 :\n prod_={}\n prod_['name'] = task.name\n work = task.certified and _('Certified translation of') or _('Translation of')\n prod_['description'] = _('%s %s \\nfrom %s to %s') % (work, document.name, document.language_id.trans_from, task.language_id.trans_to)\n prod_['units'] = document.cards_estm\n prod_['evidention_id'] = evidention.id\n prod_['document_id'] = document.id\n prod_['task_id']= task.id\n prod_['product_type'] = 1\n prod_['price'] = evidention.price_id.price\n prod_['partner_id']=evidention.partner_id.id\n prod_['price_id']=evidention.price_id.id or 1\n product_list.append(prod_)\n \n if evidention.product_type == 3:\n pass\n return product_list\n#############################################################################################\n##############################################################################################\nclass translation_document(osv.Model):\n _inherit = 'translation.document'\n _columns = {\n 'price_id':fields.many2one('translation.price','Price template'), #depreciated .. TO BE REMOVED \n 'product_id':fields.one2many('translation.product','document_id','Products') #depreciated .. TO BE REMOVED \n }\n \n\nclass translation_document_task(osv.Model):\n _inherit = 'translation.document.task'\n _columns = {\n 'price_id':fields.many2one('translation.price','Price template'), #depreciated .. TO BE REMOVED \n 'product_id':fields.one2many('translation.product','task_id','Products'), \n }\n\nclass sale_order(osv.osv):\n _inherit= 'sale.order'\n _columns = {\n 'trans_evid_ids':fields.many2many('translation.evidention','translation_evidention_so_rel','sale_order_id','translation_evidention_id','Evidentions')\n }\n \nclass account_tax(osv.Model):\n _inherit = \"account.tax\"\n _columns = {\n 't_prod_ids':fields.many2many('translation.product','trans_product_taxes_rel','tax_ids','t_prod_ids','Taxes'),\n }\n \nclass translation_product(osv.Model):\n _name = \"translation.product\"\n _description = \"Translation products - pre sale\"\n \n \n \n def _get_total(self, cr, uid, ids, field_names, field_value, context=None):\n res={}\n tax_obj=self.pool.get('account.tax')\n for prod in self.browse(cr, uid, ids):\n total_untaxed = prod.price*prod.units*(100.00-prod.discount)/100\n total_tax=1\n for tax in prod.tax_ids:\n total_tax = total_tax * tax_obj.browse(cr, uid, tax.id).amount\n # OUCH! NOT SO GOOD , this only works for tax on price, \n #if complicated taxes applied use with caution\n total_tax = total_tax * total_untaxed\n total = total_untaxed + total_tax\n res[prod.id] = {'total_untaxed':total_untaxed,\n 'total_tax': total_tax,\n 'total': total}\n return res\n\n \n def onchange_product_price(self, cr, uid, ids, price_id, context=None):\n res={}\n new_price = self.pool.get('translation.price').browse(cr, uid, price_id).price\n self.write(cr, uid, ids[0], {'price':new_price})\n res['price'] = new_price\n return {'value':res}\n \n _columns = {\n 'name':fields.char('Name', size=128),\n 'description':fields.text('Description'),\n 'price_id':fields.many2one('translation.price','Price type'),\n 'price':fields.float('Price', digits_compute= dp.get_precision('Product Price'), readonly=True ),# dodati float na dvije decimale!\n 'units':fields.float('Units'),\n 'discount':fields.float('Discount'),\n 'tax_ids':fields.many2many('account.tax','trans_product_taxes_rel','t_prod_ids','tax_ids','Taxes'),\n 'total_untaxed':fields.function(_get_total, type=\"float\", digits_compute= dp.get_precision('Product Price'), string='Total untaxed', multi='total'),\n 'total_tax':fields.function(_get_total, type=\"float\",digits_compute= dp.get_precision('Account'), string=\"Tax\", multi='total'),\n 'total':fields.function(_get_total, type=\"float\", string=\"Total\", multi='total'),\n 'evidention_id':fields.many2one('translation.evidention','Evidention'),\n 'document_id':fields.many2one('translation.document','Document'),\n 'task_id':fields.many2one('translation.document.task','Task'),\n 'product_type':fields.integer('Product type'),#translation generator type\n 'product_id':fields.many2one('product.product','Product'),\n 'partner_id':fields.many2one('res.partner','Partner'),\n }\n\n\n","repo_name":"badbole/lingua","sub_path":"translations_sale/translations_sale.py","file_name":"translations_sale.py","file_ext":"py","file_size_in_byte":18620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28656531630","text":"import sys\nfrom time import sleep\n\nimport pygame\n\nfrom bullet import Bullet\nfrom alien import Alien\nfrom stars import Star\n\ndef check_keydown_events(event, ai_settings, screen, ship, bullets):\n \"\"\"响应keydown\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n ship.moving_left = True\n elif event.key == pygame.K_SPACE:\n fire_bullet(ai_settings, screen, ship, bullets)\n elif event.key == pygame.K_q:\n sys.exit()\n\ndef check_keyup_events(event, ship):\n \"\"\"响应松开\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n ship.moving_left = False\n\ndef check_play_button(ai_settings, screen, ship, aliens, bullets,\n stats, play_button, mouse_x, mouse_y, sb):\n \"\"\"在玩家单击Play按钮时开始新游戏\"\"\"\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n # 重置游戏设置\n ai_settings.initialize_dynamic_settings()\n # 隐藏光标\n pygame.mouse.set_visible(False)\n # 重置\n stats.reset_stats()\n stats.game_active = True\n # 分数等级重置\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()\n # 清空敌人和子弹\n aliens.empty()\n bullets.empty()\n # 创建敌人, 飞船重置\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()\n\ndef check_events(ai_settings, screen, ship, bullets, aliens, \n stats, play_button, sb):\n \"\"\"响应按键和鼠标事件\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ai_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_play_button(ai_settings, screen, ship, aliens, bullets,\n stats, play_button, mouse_x, mouse_y, sb)\n\ndef update_screen(ai_settings, screen, ship, bullets, aliens, stars,\n stats, play_button, sb):\n \"\"\"更新屏幕上的图像, 并切换到新屏幕\"\"\"\n # 每次循环时都重绘屏幕\n screen.fill(ai_settings.bg_color)\n stars.draw(screen)\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n ship.blitme()\n aliens.draw(screen)\n sb.show_score()\n # 若游戏处于非活动状态, 绘制play按钮\n if not stats.game_active:\n play_button.draw_button()\n\n # 让最新绘制的屏幕可见\n pygame.display.flip()\n\ndef update_bullets(ai_settings, screen, ship, bullets, aliens, stats, sb):\n \"\"\"更新子弹的位置, 并删除已消失的子弹\"\"\"\n # 更新子弹的位置\n bullets.update()\n # 删除已消失的子弹\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n check_bullets_alien_collisions(ai_settings, screen, ship, bullets, aliens, stats, sb)\n \ndef check_bullets_alien_collisions(ai_settings, screen, ship, bullets, aliens, stats, sb):\n # 检查是否击中敌人, 击中就删除子弹和外星人\n collisions = pygame.sprite.groupcollide(bullets, aliens, False, True)\n\n if collisions:\n for aliens in collisions.values():\n stats.score += ai_settings.alien_points * len(aliens)\n sb.prep_score()\n check_high_score(stats, sb)\n # 当外星人全灭\n if len(aliens) == 0:\n # 更新等级\n stats.level += 1\n sb.prep_level()\n # 删除现有的子弹并新建一群外星人\n bullets.empty()\n ai_settings.increase_speed()\n create_fleet(ai_settings, screen, ship, aliens)\n\ndef fire_bullet(ai_settings, screen, ship, bullets):\n \"\"\"如果还没有到达限制, 就发射一颗子弹\"\"\"\n # 创建新子弹, 并将其加入到编组bullets中\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)\n\ndef get_number_aliens_x(ai_settings, alien_width):\n \"\"\"计算每行可容纳多少外星人\"\"\"\n available_space_x = ai_settings.screen_width - 2 * alien_width\n number_aliens_x = int(available_space_x / (2 * alien_width))\n return number_aliens_x\n\ndef get_number_rows(ai_settings, ship_height, alien_height):\n \"\"\"计算屏幕可容纳多少行外星人\"\"\"\n available_space_y = (ai_settings.screen_height -\n (2 * alien_height) - ship_height)\n number_rows = int(available_space_y / (2 * alien_height))\n return number_rows\n\ndef create_alien(ai_settings, screen, aliens, alien_number, row_number):\n \"\"\"创建一个外星人并将其放在当前行\"\"\"\n alien = Alien(ai_settings, screen)\n alien_width = alien.rect.width\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x\n alien_height = alien.rect.height\n alien.y = alien_height + 2 * alien_height * row_number\n alien.rect.y = alien.y\n aliens.add(alien)\n\ndef create_fleet(ai_settings, screen, ship, aliens):\n \"\"\"创建外星人群\"\"\"\n # 创建一个外星人, 并计算一行可容纳多少个\n alien = Alien(ai_settings, screen)\n number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\n number_rows = get_number_rows(ai_settings, \n ship.rect.height, alien.rect.height)\n\n # 创建多行外星人\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n # 创建一个外星人并将其加入当前行\n create_alien(ai_settings, screen, aliens, alien_number, row_number)\n\ndef check_fleet_edges(ai_settings, aliens):\n \"\"\"有外星人到边缘是采取相应的措施\"\"\"\n for alien in aliens.sprites():\n if alien.check_edges():\n change_fleet_direction(ai_settings, aliens)\n break\n\ndef change_fleet_direction(ai_settings, aliens):\n \"\"\"将全部外星人下移, 并改变方向\"\"\"\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1\n\ndef update_aliens(ai_settings, stats, screen, ship, aliens, bullets, sb):\n \"\"\"检查是否有外星人位于边缘, 并更新整体位置\"\"\"\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n # 检测外星人和飞船之间的碰撞\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, stats, screen, ship, aliens, bullets, sb)\n\n check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets, sb)\n\ndef update_stars(ai_settings, stars):\n stars.update()\n\ndef create_stars(ai_settings, screen, stars):\n for number in range(ai_settings.stars_number):\n star = Star(ai_settings, screen)\n stars.add(star)\n\ndef ship_hit(ai_settings, stats, screen, ship, aliens, bullets, sb):\n \"\"\"响应外星人撞到的飞船\"\"\"\n\n if stats.ships_left > 0:\n # 将ships_left 减一\n stats.ships_left -= 1\n # 更新飞船数\n sb.prep_ships()\n # 清空外星人列表和子弹列表\n aliens.empty()\n bullets.empty()\n\n # 创建新敌人, 并重置飞船\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()\n\n # 暂停\n sleep(0.5)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n\ndef check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets, sb):\n \"\"\"检查是否有敌人到底边\"\"\"\n screen_rect = screen.get_rect()\n for alien in aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n # 像飞船被撞到一样进行处理\n ship_hit(ai_settings, stats, screen, ship, aliens, bullets, sb)\n break\n\ndef check_high_score(stats, sb):\n \"\"\"检查最高分\"\"\"\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n # 更新最高分图像\n sb.prep_high_score()\n","repo_name":"Iknowyouwill/alien_invasion","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17719182618","text":"import socket\nimport time\nfrom debug import debug_print\n\nBUFFER_SIZE = 2048\n\nclass chat_socket:\n\t\n\tdef __init__(self, sock=None, blocking=True, debug=False):\n\t\tif sock is None:\n\t\t\tsock = socket.socket(\n\t\t\t\tsocket.AF_INET, socket.SOCK_STREAM)\n\t\t\n\t\tself.sock = sock\n\t\tself.blocking = blocking\n\t\tself.sock.setblocking(blocking)\n\t\tself.debug = debug\n\t\tself._debug('Created new socket')\n\n\tdef _debug(self, msg, *args):\n\t\tif self.debug:\n\t\t\tdebug_print(msg, args)\n\n\tdef connect(self, *args):\n\t\tself.sock.connect(*args)\n\n\tdef close(self):\n\t\ttry:\n\t\t\tself._debug('Closing socket: {0}', self.sock.getpeername())\n\t\t\tself.sock.shutdown(1)\n\t\texcept:\n\t\t\tself._debug('Sloppy socket close ')\n\n\t\tif self.sock != None:\n\t\t\tself.sock.close()\n\t\t\tself.sock = None\n\n\tdef send(self, msg):\n\t\tself._debug('Sending from socket: {0}', self.sock.getpeername())\n\t\ttotal_sent = 0\n\t\tmsg_length = len(msg)\n\t\tif len(str(len(msg))) > 5:\n\t\t\traise RuntimeError('Message size too long')\n\n\t\tformatted_size = str(len(msg)).zfill(5)\n\t\tmsg = '{0}{1}'.format(formatted_size, msg)\n\n\t\twhile total_sent < msg_length:\n\t\t\tsent = self.sock.send(msg[total_sent:])\n\t\t\tself._debug('{0} sent: {1}', self.sock.getpeername(), sent)\n\n\t\t\tif sent == 0:\n\t\t\t\traise RuntimeError('socket connection broken')\n\n\t\t\ttotal_sent += sent\n\n\tdef read(self):\n\t\tglobal BUFFER_SIZE\n\t\tchunks = []\n\t\tbytes_recd = 0\n\t\tmsg_length = '' # Must be converted from string -> num\n\n\t\ttimout = time.time() + 60*5\n\n\t\t# Loops through beginning of message to retrieve \n\t\t# message length: example format: '00011hello world'\n\t\twhile True:\n\t\t\tchunk = self.sock.recv(5)\n\t\t\tif not chunk:\n\t\t\t\tself.close()\n\t\t\t\traise RuntimeError('socket connection broken unable to read')\n\t\t\t\treturn\n\n\t\t\tmsg_length = msg_length.join(chunk)\n\n\t\t\tif len(msg_length) == 5:\n\t\t\t\tbreak\n\n\t\t\tif time.time() > timout:\n\t\t\t\tself.close()\n\t\t\t\traise RuntimeError('socket connection broken unable to read')\n\t\t\t\treturn\n\n\n\n\t\ttry:\n\t\t\tmsg_length = int(msg_length)\n\t\texcept:\n\t\t\traise RuntimeError('socket message was not prefaced with message size')\n\n\t\twhile bytes_recd < msg_length:\n\t\t\tchunk = self.sock.recv(min(msg_length - bytes_recd, BUFFER_SIZE))\n\t\t\tif not chunk:\n\t\t\t\traise RuntimeError('socket connection broken')\n\n\t\t\tchunks.append(chunk)\n\t\t\tbytes_recd += len(chunk)\n\n\t\t#return (msg_length, ''.join(chunks))\n\t\treturn ''.join(chunks)\n\n\n\n\n\n","repo_name":"sirkaiserkai/terminal_chat","sub_path":"chat_socket.py","file_name":"chat_socket.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3399685853","text":"import requests\r\nfrom settings import API_KEY, CX\r\nimport click\r\nimport pandas as pd\r\nimport arrow\r\n\r\n\"\"\"\r\nThis quick program is a command line interface to searching google. You'll need to get an API key from Google,\r\nso you can programmatically search.\r\n\r\nExample:\r\n\r\n>>> python google_searcher.py \"John Connelly\"\r\n\r\nThat will search for John Connelly.\r\n\"\"\"\r\n\r\ndef basic_google_search(query, start=1):\r\n\r\n u = r'https://www.googleapis.com/customsearch/v1'\r\n\r\n params = {\r\n 'cx': CX,\r\n 'q': query,\r\n 'key': API_KEY,\r\n 'start': start\r\n }\r\n\r\n r = requests.get(u, params=params)\r\n return r.json()\r\n\r\n\r\ndef google_search(query, pages=5):\r\n results = []\r\n formatted_results = []\r\n urls = []\r\n cnt = 1\r\n dbl = 0\r\n for p in range(pages):\r\n\r\n if p == 0:\r\n p = 1\r\n else:\r\n p *= 10\r\n\r\n google_results = basic_google_search(query, start=p)\r\n items = google_results.get('items')\r\n if items is not None:\r\n for ix, res in enumerate(items):\r\n res['query'] = query\r\n results.append(res)\r\n # print(res.keys())\r\n link = res.get('link')\r\n htmlFormattedUrl = res.get('htmlFormattedUrl')\r\n htmlSnippet = res.get('htmlSnippet')\r\n formattedUrl = res.get('formattedUrl')\r\n htmlTitle = res.get('htmlTitle')\r\n kind = res.get('kind')\r\n displayLink = res.get('displayLink')\r\n title = res.get('title')\r\n cacheId = res.get('cacheId')\r\n snippet = res.get('snippet')\r\n pagemap = res.get('pagemap')\r\n\r\n if link not in urls:\r\n urls.append(link)\r\n formatted_results.append([title, link, snippet, query])\r\n cnt += 1\r\n else:\r\n dbl += 1\r\n\r\n return results, formatted_results\r\n\r\n\r\n@click.command()\r\n@click.option('--search', help='Enter your search term...')\r\n@click.option('--pages', default=2, help=\"The number of pages you'd like to return...\")\r\n@click.option('--url', default=False, help=\"Type True if you want the url instead of the title...\")\r\ndef main(search, pages, url):\r\n results, formatted_results = google_search(search, pages=pages)\r\n click.echo('Searching for {}...'.format(search))\r\n for row in formatted_results:\r\n print_item = row[0]\r\n if url:\r\n print_item = row[1] # url\r\n try:\r\n print('\\t', print_item)\r\n except UnicodeDecodeError:\r\n pass\r\n\r\n df = pd.DataFrame(formatted_results, columns=['search_title', 'search_url', 'search_blurb', 'search_term'])\r\n writer = pd.ExcelWriter('search_results_{}.xlsx'.format(arrow.now().format('MMDDYYYY_hhmmss')))\r\n df.to_excel(writer)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"johnconnelly75/apra_2016","sub_path":"google_searcher.py","file_name":"google_searcher.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5026275054","text":"from transformers import BertTokenizer,BertModel\nimport torch\n\nbert_path = '/home/data_ti6_d/lich/pretrain_model/bert-base-uncased'\nbert_tokenizer = BertTokenizer.from_pretrained(bert_path)\nbert_model = BertModel.from_pretrained(bert_path)\n\n# Sample\nmax_len = 0\ntext = ['I want to drink water','the drink is a good choice for me','The sun is so hot']\ntoken_list = []\nfor i in range(len(text)):\n tokens = bert_tokenizer.tokenize(text[i])\n if len(tokens) > max_len:\n max_len = len(tokens)\n token_list.append(tokens)\n\ninput_ids = []\ninput_masks = []\nfor i in range(len(token_list)):\n current_str = ['[CLS]'] + token_list[i]\n current_str = current_str + ['[SEP]']\n input_id = bert_tokenizer.convert_tokens_to_ids(current_str)\n\n input_mask = [1] * len(input_id)\n padding = [0] * (max_len + 2 - len(input_id))\n input_id += padding\n input_mask += padding\n input_ids.append(input_id)\n input_masks.append(input_mask)\n\nprint('input_ids:',input_ids)\nprint('input_masks:',input_masks)\n\ninput_tensor = torch.tensor(input_ids).to(torch.long)\ninput_masks = torch.tensor(input_masks).to(torch.long)\n\nprint('input_tensor:',input_tensor)\nprint('input_masks:',input_masks)\n\noutput = bert_model(input_ids = input_tensor, attention_mask = input_masks)[0] #pooler_output\nprint('output:',output.shape)\n\noutput = bert_model(input_ids = input_tensor, attention_mask = input_masks)[1]\nprint('output:',output.shape)\n\n\n","repo_name":"grandchicken/master_code","sub_path":"bert_sample.py","file_name":"bert_sample.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70083451134","text":"#!/usr/bin/python3\nimport logging\nimport time\nimport subprocess\n\n# set up logging to a file\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s', # display time, log level and message formot for log file\n datefmt='%m-%d %H:%M',\n filename='ec2.log',# name of log file\n filemode='w')# this ensures the log file is no longer appended to, so the messages from earlier runs are lost.\n\n# define a Handler which writes INFO messages or higher to the sys.stderr\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n\n# setting a format which is simpler for console use\nformatter = logging.Formatter('%(levelname)-8s: %(message)s')\n\n# telling the handler to use this format\nconsole.setFormatter(formatter)\n\n# adding the handler to the root logger\nlogging.getLogger('').addHandler(console)\n\n\n\ndef wrong():\n print(\"please try again\")# simple function to reduce repetition. Used when a user types the wrong input\n time.sleep(2)\n subprocess.run([\"clear\"], shell=True) # bash command to clear the screen.\n\n\n\nclass ec2:\n\n def __init__(self, msg):\n print(msg)# default constructor to print the message but not stored in the log file\n\n def info(self):\n logging.info(self)# Display to ec2.log file & console.\n\n def warn(self):\n logging.warning(self)\n\n def debug(self):\n logging.debug(self)# any msg labeled debug won't be displayed on the console but will be recorded for the ec2.log file.\n","repo_name":"jowijoke/ec2-aws-python-script","sub_path":"logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"32401980027","text":"import unittest\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CPSDefault.tests.CPSTestCase import CPSTestCase\n\nfrom AccessControl import Unauthorized\nfrom DateTime.DateTime import DateTime\nfrom Products.CPSUtil.http import set_if_modified_since_header\nfrom Products.CPSDefault.voidresponses import ImsResponseHandler\n\nclass VoidResponsesTestCase(CPSTestCase):\n\n def afterSetUp(self):\n \"\"\"Setup a subsection, and last modified dates for it and the portal.\n \"\"\"\n sections = self.sections = self.portal.sections\n self.request = self.app.REQUEST\n self.response = self.request.RESPONSE\n\n self.login('manager')\n wftool = self.wftool = getToolByName(self.portal, 'portal_workflow')\n wftool.invokeFactoryFor(sections, 'Section', 'subs')\n subs = self.subs = sections.subs\n\n enable_ims = ImsResponseHandler.enableIfModifiedSince\n enable_ims(subs, DateTime('2011/01/01'))\n enable_ims(self.sections, DateTime('2011/06/01'))\n self.logout()\n\n def test_ims_response(self):\n set_if_modified_since_header(self.request,\n 'Tue, 1 Mar 2011, 00:00:00 GMT')\n\n # enabled, and modified after\n handler = ImsResponseHandler(self.sections, self.request)\n self.assertFalse(handler.respond())\n self.failIfEqual(self.response.getStatus(), 304)\n\n # not enabled\n handler = ImsResponseHandler(self.portal, self.request)\n self.assertFalse(handler.respond())\n self.failIfEqual(self.response.getStatus(), 304)\n\n # enabled, and modified before\n handler = ImsResponseHandler(self.subs, self.request)\n self.assertTrue(handler.respond())\n self.assertEquals(self.response.getStatus(), 304)\n\n def test_ims_response_authenticated(self):\n # no 304 response for authenticated sessions\n set_if_modified_since_header(self.request,\n 'Tue, 1 Mar 2011, 00:00:00 GMT')\n self.login('manager')\n handler = ImsResponseHandler(self.subs, self.request)\n self.assertFalse(handler.respond())\n self.failIfEqual(self.response.getStatus(), 304)\n\n def test_getLastModificationDate(self):\n get_lmd = ImsResponseHandler.getLastModificationDate\n self.assertEquals(get_lmd(self.portal), None)\n self.assertEquals(get_lmd(self.sections), DateTime('2011/06/01'))\n\n def test_enable_ims(self):\n enable_ims = ImsResponseHandler.enableIfModifiedSince\n now = DateTime()\n\n def check(folder):\n enable_ims(folder)\n self.assertTrue(\n ImsResponseHandler.getLastModificationDate(folder) >= now)\n\n self.login('manager')\n check(self.portal.workspaces) # brand new\n check(self.portal.sections) # already existing\n\n self.logout()\n self.assertRaises(Unauthorized, check, self.portal.workspaces)\n\n def test_invalidations(self):\n # publish something inside of sections (must invalidate),\n # but outside of sub-section (musn't)\n get_lmd = ImsResponseHandler.getLastModificationDate\n wftool = self.wftool\n self.login('manager')\n\n ws = self.portal.workspaces\n wftool.invokeFactoryFor(ws, 'Document', 'doc')\n # sanity check : nothing happening in workspaces should change dates\n self.assertEquals(get_lmd(self.sections), DateTime('2011/06/01'))\n\n pub_time = DateTime() - 1/86400.0 # tool uses 1 sec safety\n wftool.doActionFor(ws.doc, 'copy_submit', dest_container='sections',\n initial_transition='publish')\n\n self.assertEquals(get_lmd(self.subs), DateTime('2011/01/01'))\n self.assertTrue(get_lmd(self.sections) >= pub_time)\n\n\ndef test_suite():\n return unittest.TestSuite((\n unittest.makeSuite(VoidResponsesTestCase),\n ))\n\n","repo_name":"nuxeo-cps/products--CPSDefault","sub_path":"tests/test_voidresponses.py","file_name":"test_voidresponses.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71869299132","text":"import pygame\nfrom gameobject import GameObject\nfrom random import randint, choice\nfrom constants import lanes\n\nclass Bird(GameObject):\n def __init__(self):\n super(Bird, self).__init__(0, 0, 'bird40x48.png')\n self.dx = (randint(0, 200) / 100) + 1\n self.dy = 0\n self.reset() # call reset here! \n\n def move(self):\n self.x += self.dx\n self.y += self.dy\n # Check the y position of the Bird\n if self.x > 500: \n self.reset()\n \n def reset(self):\n self.x = -64\n self.y = choice(lanes)","repo_name":"merkoyep/Pygame-Tutorial-main","sub_path":"app/bird.py","file_name":"bird.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27032199339","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\tIan Mallarino\n\tIntro to Image and Video Processing\n\tEmbeds a watermark in to an image\n\"\"\"\n\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom PIL import Image,ImageTk\n\nclass Gui(tk.Frame):\n\t\"\"\"Main window for program\"\"\"\n\tdef __init__(self,master=None):\n\t\t\"\"\"Constructor\"\"\"\n\t\tsuper(Gui,self).__init__()\n\t\ttk.Frame.__init__(self,master)\n\t\tmaster.protocol('WM_DELETE_WINDOW',quit)\n\t\tself.pack(fill=\"both\",expand=True)\n\t\tself.master.title(\"Watermark\")\n\t\tself.lImg=Image.new(\"RGB\",(640,480),\"black\")\n\t\tself.rImg=Image.new(\"RGB\",(160,120),\"black\")\n\t\tself.load()\n\t\tself.update()\n\t\t\n\tdef load(self):\n\t\t\"\"\"Loads the contents within the window\"\"\"\n\t\tself.isOpening=False\n\t\t#image preview\n\t\tself.pre=tk.Frame(self)\n\t\tself.pre.pack(side=\"bottom\")\n\t\t#left image\n\t\tself.lPre=tk.Label(self.pre)\n\t\tself.lPre.pack(side=\"left\")\n\t\t#right image\n\t\tself.rPre=tk.Label(self.pre)\n\t\tself.rPre.pack(side=\"right\")\n\t\t#buttons for user\n\t\tself.buttons=tk.Frame(self)\n\t\tself.buttons.pack(side=\"bottom\")\n\t\t#\"open image\" button\n\t\tself.openI=tk.Button(self.buttons,text=\"Open Image\",command=self.openImg)\n\t\tself.openI.pack(side=\"left\")\n\t\t#\"open watermark\" button\n\t\tself.openWM=tk.Button(self.buttons,text=\"Open Watermark\",command=self.openWatermark)\n\t\tself.openWM.pack(side=\"left\")\n\t\t#\"embed\" button\n\t\tself.embed=tk.Button(self.buttons,text=\"Embed\",command=self.embed)\n\t\tself.embed.pack(side=\"left\")\n\t\t#\"extract\" button\n\t\tself.extract=tk.Button(self.buttons,text=\"Extract\",command=self.extract)\n\t\tself.extract.pack(side=\"left\")\n\t\t#\"save\" button\n\t\tself.save=tk.Button(self.buttons,text=\"Save\",command=self.saveImg)\n\t\tself.save.pack(side=\"left\")\n\t\t\n\tdef update(self):\n\t\t\"\"\"Recalls itself constantly\"\"\"\n\t\tself.lPreview=ImageTk.PhotoImage(self.lImg.resize((640,480)))\n\t\tself.lPre.configure(image=self.lPreview)\n\t\tself.rPreview=ImageTk.PhotoImage(self.rImg.resize((160,120)))\n\t\tself.rPre.configure(image=self.rPreview)\n\t\tself.after(50,self.update)\n\t\treturn\n\t\t\n\tdef openImg(self):\n\t\t\"\"\"Opens the image to the GUI\"\"\"\n\t\tfilePath=filedialog.askopenfilename(\n\t\t\tparent=self,\n\t\t\tinitialfile=\"out.png\",\n\t\t\tfiletypes=[\n\t\t\t\t(\"Image files\",\"*.*\"),\n\t\t\t\t(\"BMP\",\"*.bmp,*.dib\"),\n\t\t\t\t(\"GIF\",\"*.gif\"),\n\t\t\t\t(\"PNG\",\"*.png\"),\n\t\t\t\t(\"JPEG\",\"*.jpg,*.jpe,*.jpeg\"),\n\t\t\t\t(\"PSD\",\"*.psd\"),\n\t\t\t\t(\"TIFF\",\"*.tif,*.tiff\")\n\t\t\t]\n\t\t)\n\t\ttry:\n\t\t\tself.lImg=Image.open(filePath)\n\t\t\tself.rImg=self.rImg.resize((int(self.lImg.size[0]/4),int(self.lImg.size[1]/4)))\n\t\texcept:\n\t\t\tprint(\"Couldn't open file.\")\n\t\treturn\n\t\t\n\tdef openWatermark(self):\n\t\t\"\"\"Open a new watermark image\"\"\"\n\t\tfilePath=filedialog.askopenfilename(\n\t\t\tparent=self,\n\t\t\tinitialfile=\"out.png\",\n\t\t\tfiletypes=[\n\t\t\t\t(\"Image files\",\"*.*\"),\n\t\t\t\t(\"BMP\",\"*.bmp,*.dib\"),\n\t\t\t\t(\"GIF\",\"*.gif\"),\n\t\t\t\t(\"PNG\",\"*.png\"),\n\t\t\t\t(\"JPEG\",\"*.jpg,*.jpe,*.jpeg\"),\n\t\t\t\t(\"PSD\",\"*.psd\"),\n\t\t\t\t(\"TIFF\",\"*.tif,*.tiff\")\n\t\t\t]\n\t\t)\n\t\ttry:\n\t\t\tself.rImg=Image.open(filePath)\n\t\t\tself.rImg=self.rImg.resize((int(self.lImg.size[0]/4),int(self.lImg.size[1]/4)))\n\t\texcept:\n\t\t\tprint(\"Couldn't open file.\")\n\t\treturn\n\t\t\n\tdef embed(self):\n\t\t\"\"\"Embed a watermark in to an image\"\"\"\n\t\tprint(\"Embedding...\")\n\t\tpixel=self.lImg.load()\n\t\tval=0\n\t\tbit=0\n\t\trCol=0\n\t\trX=0\n\t\trY=0\n\t\tfor x in range(0,int(self.lImg.size[0]/4)*4,2):\n\t\t\tfor y in range(0,int(self.lImg.size[1]/4)*4):\n\t\t\t\tfor lCol in range(0,3):\n\t\t\t\t\t#collect the color\n\t\t\t\t\tval=(self.rImg.getpixel((rX,rY))[rCol]>>bit)&1\n\t\t\t\t\tpixel[x,y]=(\n\t\t\t\t\t\tself.lImg.getpixel((x,y))[0]&(~1)|val if lCol==0 else self.lImg.getpixel((x,y))[0],\n\t\t\t\t\t\tself.lImg.getpixel((x,y))[1]&(~1)|val if lCol==1 else self.lImg.getpixel((x,y))[1],\n\t\t\t\t\t\tself.lImg.getpixel((x,y))[2]&(~1)|val if lCol==2 else self.lImg.getpixel((x,y))[2],\n\t\t\t\t\t)\n\t\t\t\t\tbit+=1\n\t\t\t\t\tif(bit>=8):\n\t\t\t\t\t\tbit=0\n\t\t\t\t\t\trCol+=1\n\t\t\t\t\t\tif(rCol==3):\n\t\t\t\t\t\t\trCol=0\n\t\t\t\t\t\t\trX+=1\n\t\t\t\t\t\t\tif(rX>=self.rImg.size[0]):\n\t\t\t\t\t\t\t\trX=0\n\t\t\t\t\t\t\t\trY+=1\n\t\treturn\n\t\t\n\tdef extract(self):\n\t\t\"\"\"Extract a watermark from an image\"\"\"\n\t\tval=0\n\t\tbit=0\n\t\trCol=0\n\t\trX=0\n\t\trY=0\n\t\tpixel=self.rImg.load()\n\t\tfor x in range(0,int(self.lImg.size[0]/4)*4,2):\n\t\t\tfor y in range(0,int(self.lImg.size[1]/4)*4):\n\t\t\t\tfor lCol in range(0,3):\n\t\t\t\t\t#collect the color\n\t\t\t\t\tval+=(self.lImg.getpixel((x,y))[lCol]&1)<=8):\n\t\t\t\t\t\tbit=0\n\t\t\t\t\t\t#write to the color\n\t\t\t\t\t\tpixel[rX,rY]=(\n\t\t\t\t\t\t\tval if rCol==0 else self.rImg.getpixel((rX,rY))[0],\n\t\t\t\t\t\t\tval if rCol==1 else self.rImg.getpixel((rX,rY))[1],\n\t\t\t\t\t\t\tval if rCol==2 else self.rImg.getpixel((rX,rY))[2]\n\t\t\t\t\t\t)\n\t\t\t\t\t\tval=0\n\t\t\t\t\t\trCol+=1\n\t\t\t\t\t\tif(rCol>=3):\n\t\t\t\t\t\t\trCol=0\n\t\t\t\t\t\t\trX+=1\n\t\t\t\t\t\t\tif(rX>=self.rImg.size[0]):\n\t\t\t\t\t\t\t\trX=0\n\t\t\t\t\t\t\t\trY+=1\n\t\treturn\n\t\t\n\tdef saveImg(self):\n\t\t\"\"\"Outputs the image to the harddrive TODO: \"\"\"\n\t\tself.lImg.save(\"out.png\",\"png\")\n\t\treturn\n\nroot=tk.Tk()\ngui=Gui(master=root)\ngui.mainloop()\ngui.destroy()","repo_name":"imallarino2015/COT4930-Image-Processing-Projects","sub_path":"Project 2/watermark.py","file_name":"watermark.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26956112762","text":"# # https://leetcode.com/problems/happy-number/\n# 202. Happy Number\n# Easy\n# 9.6K\n# 1.3K\n# Companies\n# Write an algorithm to determine if a number n is happy.\n\n# A happy number is a number defined by the following process:\n\n# Starting with any positive integer, replace the number by the sum of the squares of its digits.\n# Repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1.\n# Those numbers for which this process ends in 1 are happy.\n# Return true if n is a happy number, and false if not.\n\n \n\n# Example 1:\n\n# Input: n = 19\n# Output: true\n# Explanation:\n# 12 + 92 = 82\n# 82 + 22 = 68\n# 62 + 82 = 100\n# 12 + 02 + 02 = 1\n# Example 2:\n\n# Input: n = 2\n# Output: false\n\n\n\ndef happy_number(n: int ) -> bool:\n happy_set=set()\n while n!=1:\n if n in happy_set:\n return False\n happy_set.add(n)\n n=sum([int(i) ** 2 for i in str(n)])\n else:\n return True\n\n\nif __name__ == \"__main__\":\n #n=19\n n=2\n print (\"{}\".format(happy_number(n)))","repo_name":"smohapatra1/scripting","sub_path":"python/practice/start_again/2023/10162023/happy_number.py","file_name":"happy_number.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13562091654","text":"from models import SentimentModel\nimport tensorflow as tf\nimport numpy as np\nimport math\n\nclass DPSentimentModel(SentimentModel):\n def __init__(self, dp_epsilon, dp_delta, attack_bound, n_draws=20, batch_size = 64, vocab_size=10000, max_len=200, lstm_size=64,\n embeddings_dim=50, keep_probs=0.9, is_train=True):\n \"\"\"\n A sentiment analysis classifier with an addition of a noise layer after the embedding layer.\n :param dp_epsilon: Epsilon parameter for DP bounds\n :param dp_delta: Delta parameter for DP bounds\n :param attack_bound: The fraction of words in a sentence that the attack can replace.\n :param n_draws: At prediction time, how many prediction are used to get the mean prediction value.\n\n *** The rest of the parameters are exactly as the ones in the classifier trained by Alzantot et al. ***\n \"\"\"\n self.dp_epsilon = dp_epsilon\n self.dp_delta = dp_delta\n self.attack_bound = attack_bound\n self.batch_size = batch_size\n self.vocab_size = vocab_size\n self.max_len = max_len\n self.lstm_size = lstm_size\n self.keep_probs = keep_probs\n self.embeddings_dim = embeddings_dim\n self.is_train = is_train\n self.n_draws = n_draws\n self.build_private_model()\n\n\n def _noise_layer(self, x, sensitivity_norm):\n \"\"\"\n Pixeldp noise layer.\n :param x: The input for which noise is added.\n :param sensitivity_norm: String - can be l1 or l2.\n If l1, we use laplace noise. If l2, we use gaussian noise.\n :return: x + sampled noise.\n \"\"\"\n input_shape = tf.shape(x)\n # experimental value\n word_sensitivity = tf.ones(input_shape, dtype=tf.float32) * 0.25\n sentence_sensitivity = self.attack_bound * self.seq_len\n\n if sensitivity_norm == 'l1':\n # Use the Laplace mechanism\n dp_mult = sentence_sensitivity / self.dp_epsilon\n loc = tf.zeros(input_shape, dtype=tf.float32)\n scale = tf.ones(input_shape, dtype=tf.float32)\n noise = tf.distributions.Laplace(loc, scale).sample()\n\n if sensitivity_norm == 'l2':\n # Use the Gaussian mechanism\n dp_mult = sentence_sensitivity * math.sqrt(2 * math.log(1.25 / self.dp_delta)) / self.dp_epsilon\n noise = tf.random_normal(input_shape, mean=0, stddev=1)\n\n dp_mult = tf.reshape(dp_mult, [tf.shape(dp_mult)[0], 1, 1])\n noise_scale = dp_mult * word_sensitivity\n noise = noise_scale * noise\n return x + noise\n\n def build_private_model(self):\n \"\"\"\n Build a sentiment analysis classifier with an added noise layer after the embeddings layer.\n \"\"\"\n # shape = (batch_size, sentence_length, word_id)\n self.x_holder = tf.placeholder(tf.int32, shape=[None, self.max_len])\n self.y_holder = tf.placeholder(tf.int64, shape=[None])\n self.seq_len = tf.cast(tf.reduce_sum(tf.sign(self.x_holder), axis=1), tf.float32)\n with tf.device(\"/cpu:0\"):\n # embeddings matrix\n self.embedding_w = tf.get_variable('embed_w', shape=[self.vocab_size, self.embeddings_dim],\n initializer=tf.random_uniform_initializer(), trainable=True)\n\n\n # embedded words\n self.e = tf.nn.embedding_lookup(self.embedding_w, self.x_holder)\n\n self.noised_e = self._noise_layer(self.e, \"l2\")\n\n lstm = tf.contrib.rnn.BasicLSTMCell(self.lstm_size)\n if self.is_train:\n self.noised_e = tf.nn.dropout(self.noised_e, self.keep_probs)\n self.init_state = lstm.zero_state(batch_size=self.batch_size, dtype=tf.float32)\n rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=lstm,\n inputs=self.noised_e,\n initial_state=self.init_state,\n sequence_length=self.seq_len)\n relevant = tf.reduce_mean(rnn_outputs, axis=1)\n last_output = relevant\n if self.is_train:\n last_output = tf.nn.dropout(last_output, self.keep_probs)\n self.w = tf.get_variable(\"w\", shape=[self.lstm_size, 2],\n initializer=tf.truncated_normal_initializer(stddev=0.2))\n self.b = tf.get_variable(\"b\", shape=[2], dtype=tf.float32)\n logits = tf.matmul(last_output, self.w) + self.b\n self.y = tf.nn.softmax(logits)\n self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n labels=tf.one_hot(self.y_holder, depth=2), logits=logits))\n self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y_holder, tf.argmax(self.y, 1)), tf.float32))\n\n if self.is_train:\n self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)\n self.train_op = self.optimizer.minimize(self.cost)\n\n def predict(self, sess, test_x):\n predictions = []\n for i in range(self.n_draws):\n # Similar to PixelDP, during prediction we call the\n # classifier several time and return the average prediction.\n pred_y = sess.run(self.y, feed_dict={self.x_holder: test_x})\n predictions.append(pred_y)\n res = np.mean(predictions, axis=0)\n return res\n\n def train_for_epoch(self, sess, train_x, train_y):\n assert self.is_train, 'Not training model'\n batches_per_epoch = train_x.shape[0] // self.batch_size\n epoch_loss = 0.0\n epoch_accuracy = 0.0\n for idx in range(batches_per_epoch):\n batch_idx = np.random.choice(train_x.shape[0], size=self.batch_size, replace=False)\n batch_xs = train_x[batch_idx,:]\n batch_ys = train_y[batch_idx]\n batch_loss, _, batch_accuracy = sess.run([self.cost, self.train_op, self.accuracy],\n feed_dict={self.x_holder: batch_xs,\n self.y_holder: batch_ys})\n\n epoch_loss += batch_loss\n epoch_accuracy += batch_accuracy\n return epoch_loss / batches_per_epoch, epoch_accuracy / batches_per_epoch","repo_name":"ellaneeman/DP-Project","sub_path":"DPSentimentModel.py","file_name":"DPSentimentModel.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18056239691","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom enum import Enum\n\nclass ModelName(str, Enum):\n alexnet = \"alexnet\"\n resnet = \"resnet\"\n lenet = \"lenet\"\n\nclass Personne(BaseModel):\n nom: str\n prenom: str\n age: int\n taille: float | None = None\n poids: float | None = None\n sport: str | None = None\n\n\napp = FastAPI()\n\n@app.get(\"/models/{model_name}\")\nasync def get_model(model_name: ModelName):\n if model_name is ModelName.alexnet:\n return {\"model_name\": model_name, \"message\": \"Deep Learning FTW!\"}\n if model_name.value == \"lenet\":\n return {\"model_name\": model_name, \"message\": \"LeCNN all the images\"}\n return {\"model_name\": model_name, \"message\": \"Have some residuals\"}\n\n@app.get(\"/items/{item_id}\")\nasync def read_item(item_id: int):\n return {\"item_id\": item_id}\n\n@app.post(\"/personne/\")\ndef create_item(personne: Personne):\n return personne","repo_name":"Quentin1402/depot-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39911876363","text":"import asyncio\nfrom typing import Optional\nfrom main import os\n\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom aiogram.utils import exceptions, executor\n\nfrom models import User\nimport database\nfrom loguru import logger as log\nfrom celery import Celery\n\ncelery_app = Celery('tasks', broker=os.environ.get('AMQP_URL'), backend=os.environ.get('DATABASE_URL'))\n\n\n@celery_app.task()\ndef ping():\n log.info('Celery task triggered')\n return 'pong'\n\n\nasync def send_message(user_id: int,\n text: str,\n buttons: Optional[list[dict[str, str]]] = None,\n disable_notification: bool = False) -> bool:\n \"\"\"\n Safe messages sender\n\n :param user_id:\n :param text:\n :param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}].\n A button can have all the same keys that InlineKeyboardButton() take\n :param disable_notification:\n :return:\n \"\"\"\n\n from main import bot\n\n try:\n await bot.send_message(user_id, text, reply_markup=InlineKeyboardMarkup(\n row_width=2,\n resize_keyboard=True,\n one_time_keyboard=True, ).add(\n *[InlineKeyboardButton(**button) for button in buttons])\n if buttons else None,\n disable_notification=disable_notification)\n log.info(f\"Sent message to target [ID:{user_id}]\")\n except exceptions.BotBlocked:\n log.error(f\"Target [ID:{user_id}]: blocked by user\")\n except exceptions.ChatNotFound:\n log.error(f\"Target [ID:{user_id}]: invalid user ID\")\n except exceptions.RetryAfter as e:\n log.error(f\"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.\")\n await asyncio.sleep(e.timeout)\n return await send_message(user_id, text, buttons) # Recursive call\n except exceptions.UserDeactivated:\n log.error(f\"Target [ID:{user_id}]: user is deactivated\")\n except exceptions.TelegramAPIError:\n log.exception(f\"Target [ID:{user_id}]: failed\")\n else:\n log.info(f\"Target [ID:{user_id}]: success\")\n return True\n return False\n\n\nasync def broadcaster(text: str,\n buttons: Optional[list[dict[str, str]]] = None) -> int:\n \"\"\"\n Simple broadcaster\n\n :return: Count of messages\n \"\"\"\n\n # Init Tortoise database first\n await database.init()\n\n count = 0\n try:\n async for user in User.all():\n if await send_message(user.pk, text, buttons):\n log.info(f'Sent a message to user [ID:{user.pk}] [USERNAME:{user.name}]')\n count += 1\n await asyncio.sleep(.05) # 20 messages per second (Limit: 30 messages per second)\n finally:\n log.info(f\"{count} messages successful sent.\")\n\n return count\n\n\n@celery_app.task()\ndef broadcast_message(text: str,\n buttons: Optional[list[dict[str, str]]] = None, *args):\n \"\"\"\n Celery task used to broadcast new messages to users\n\n :param text: Text to be sent #TODO: [11/13/2020 by Mykola] Add formatting, such as HTML or Markdown\n :param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}]\n :return:\n \"\"\"\n from main import dp\n executor.start(dp, broadcaster(text, buttons))\n","repo_name":"Salz0/Derzhavets_bot","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7173691292","text":"from bottle import default_app, request, get, post, redirect, response,run, view, template\nimport json\nimport jwt\nimport uuid\nfrom send_sms import send_sms\nfrom send_email import send_email\nfrom secret import secret\nfrom models import User, save, get_user, save_token\nfrom get_phone import phone\nfrom get_code import generate_code\nfrom generate_token import generate_token\n\n\n@get(\"/\")\n@view(\"index\")\ndef _():\n return\n\n\n\n@post(\"/validate_token\")\n@view(\"code\")\ndef _():\n jwtdata = json.load(request.body)\n try:\n data = jwt.decode(jwtdata, key=secret, algorithms=['HS256', ])\n print(data)\n code = generate_code()\n send_sms(code)\n send_email(code)\n \n user = User(mobile= str(phone), code= str(code), cpr=data['cpr'])\n \n # print(user.dict())\n user_id = str(uuid.uuid4())\n save(user_id, dict(user)) \n response.set_cookie(name='userId', value=user_id)\n response.status == 200\n return redirect(\"code\") \n except jwt.InvalidSignatureError:\n response.status == 400\n return redirect('/')\n\n@get(\"/code\")\n@view(\"code\")\ndef _():\n return\n\n@post(\"/validate_code\")\ndef _():\n user_id = request.get_cookie(\"userId\")\n print(user_id)\n code = request.forms.get(\"code\")\n print(code)\n user = get_user(user_id)\n if user == False:\n return redirect('/')\n else:\n user_code = user[b\"code\"].decode()\n if user_code == code:\n print(f'this is the code: {code}')\n return redirect(\"/welcome_esb\")\n else:\n print(f'code not found')\n return redirect('/')\n\n@get(\"/welcome_esb\")\n@view(\"welcome_esb\")\ndef _():\n user_id = request.get_cookie(\"userId\")\n if not user_id:\n return redirect(\"/\")\n else:\n print(user_id)\n token = generate_token()\n response.delete_cookie(\"userId\")\n saved_token = save_token(user_id,token)\n if saved_token:\n return dict(token=token)\n else:\n return dict(token=\"Token not valid, Try again\")\n\n\ntry:\n #Server AWS (Production)\n import production\n application = default_app()\nexcept:\n #local Machine(Development)\n run(host=\"127.0.0.1\", port=8000, debug=True, reloader=True, server=\"paste\")","repo_name":"aaronalayo/2022-Sem-1-System-Integration","sub_path":"jwt_validation/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27916585396","text":"\"\"\"\r\nActividad Evaluada\r\n\r\n1 Defina una función en python que acepte el radio y devuelva el valor del\r\narea de un círculo de esas dimensiones. (4pts)\r\n\r\n2 Defina una función en python que acepte 3 valores y devuelva solo el maximo\r\nde los tres. (7pts)\r\n\r\n3 Dado una lista de enteros, defina una función en python que devuelva la suma\r\nde solo los valores impares de dicha lista. (7pts)\r\n\r\n4 Desarrolle una función en python que acepte una variable string como primer\r\nparámetro y la cantidad de caracteres de como segundo parámetro. La función\r\ndebe retornar un nuevo string que consista en el string original y el número\r\ncorrecto de caracteres necesarios para que el string se salga centrado. \r\nNo agregue caracteres al final del string. (10pts)\r\n\r\nFecha de Entrega: Lunes 16/05 \"\"\"\r\nejer1='ejercicio numero 1'\r\nejer2='ejercicio numero 2'\r\nejer3='ejercicio numero 3'\r\nejer4='ejercicio numero 4'\r\n\r\n#####################################################################################\r\nprint(format(ejer1, '#^50'))\r\nprint()\r\n# 1\r\n\r\nfrom math import pi\r\n\r\ndef area_circulo(r):\r\n\tarea = pi * r ** 2\r\n\treturn area\r\nr = area_circulo (float(input(\"coloque radio a usar:\")))\r\nprint(\"el area del circulo es:\", r)\r\n\r\n#######################################################################################\r\nprint()\r\nprint(format(ejer2, '#^50'))\r\nprint()\r\n# 2\r\n\r\n\r\ndef valor_maximo(valores):\r\n mayor = valores[0]\r\n\r\n for i in range(1, len(valores)):\r\n if valores[i] > mayor:\r\n mayor = valores[i]\r\n return mayor\r\n\r\n\r\na = float(input(\"indique un numero:\")),\r\nb = float(input(\"indique un numero:\")),\r\nc = float(input(\"indique un numero:\")),\r\n\r\nnumeros = a, b, c,\r\nprint(\"el numero mayor es:\", (valor_maximo(numeros)))\r\n#########################################################################################\r\nprint()\r\nprint(format(ejer3, '#^50'))\r\nprint()\r\n# 3\r\n\r\ndef\tImpar_detector(numeros):\r\n\timpares = []\r\n\r\n\tfor n in numeros:\r\n\t\tif n % 2 == 1:\r\n\t\t\timpares.append(n)\r\n\r\n\treturn impares\r\n#lista_numeros = [1,2,3,4,5]\r\nlista_numeros = list(range(1,21))\r\nprint(\"mi lista de numeros\",lista_numeros)\r\nfiltro = Impar_detector(lista_numeros)\r\nprint(\"Numeros impares de la lista anterior\",filtro)\r\n##########################################################################################\r\nprint()\r\nprint(format(ejer4, '#^50'))\r\nprint()\r\n# 4\r\n\r\n\r\n\r\nStrings = str(input(\"Ingresé caracteres: \"))\r\nLongitud = len(Strings)\r\n\r\n\r\n\r\ndef getConct(String, Long):\r\n return String, \" \", str(Long)\r\n\r\nretFun = getConct(Strings, Longitud)\r\n\r\n#print(retFun)\r\n\r\na=5\r\nb=10\r\nc=15\r\nd=20\r\n\r\nwhile Longitud <= a:\r\n print(format(Strings,'>16'))\r\n a = 0\r\n b = 0\r\n c = 0\r\n d = 0\r\nwhile Longitud <= b :\r\n print(format(Strings,'>20'))\r\n a = 0\r\n b = 0\r\n c = 0\r\n d = 0\r\n\r\nwhile Longitud <= c:\r\n print(format(Strings,'>24'))\r\n a = 0\r\n b = 0\r\n c = 0\r\n d = 0\r\n\r\nwhile Longitud <= d:\r\n print(format(Strings,'>28'))\r\n a = 0\r\n b = 0\r\n c = 0\r\n d = 0\r\n#agregar de hacer falta letra+while de hacer falta\r\n","repo_name":"Gamix2Tos/Programacio1-G1","sub_path":"tareas/asignacion_03.py","file_name":"asignacion_03.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27718434481","text":"import json\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\nimport webbrowser\r\nimport selenium\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport time\r\nfrom multiprocessing import Process\r\nimport pandas\r\nimport csv\r\nimport sys, traceback\r\n\r\n\r\nwebbrowser = webdriver.Chrome(executable_path = 'D:\\Chrome Driver\\chromedriver.exe')#, options=options)\r\nloginurl = 'http://etaal.gov.in/etaal/ServiceDirectory.aspx'\r\n\r\n# dataframe to store rows\r\ndf = pandas.DataFrame(columns = ['SI', 'State/Ministry', 'Service', 'Desc', 'URL'])\r\n\r\n# appends rows to the csv\r\nf = open(\"results.csv\", \"a\")\r\ndf.to_csv(f)\r\n\r\ndef get_and_use_page(button_xpath):\r\n '''\r\n opens the link and clicks the default search button\r\n '''\r\n webbrowser.get(loginurl)\r\n btn = webbrowser.find_element_by_xpath(button_xpath)\r\n btn.click()\r\n\r\n\r\ndef get_and_use_link(index):\r\n '''\r\n opens the page number for fetching more results\r\n '''\r\n btn = webbrowser.find_element_by_link_text(index)\r\n btn.click()\r\n\r\n\r\ndef get_table_row_path(num):\r\n return webbrowser.find_element_by_xpath('//*[@id=\"ContentPlaceHolder1_gdvSearch\"]/tbody/tr['+str(num)+']')\r\n\r\n\r\ndef get_child_data_path(num, ind):\r\n return webbrowser.find_element_by_xpath('//*[@id=\"ContentPlaceHolder1_gdvSearch\"]/tbody/tr['+str(num)+']/td['+str(ind)+']')\r\n\r\n\r\ndef get_header_path(ind):\r\n return webbrowser.find_element_by_xpath('//*[@id=\"ContentPlaceHolder1_gdvSearch\"]/tbody/tr[1]/th['+str(ind)+']')\r\n\r\n\r\ndef find_subsequent_links(ind):\r\n return webbrowser.find_element_by_xpath('//*[@id=\"ContentPlaceHolder1_gdvSearch\"]/tbody/tr[12]/td/table/tbody/tr/td['+ind+']/a')\r\n\r\n\r\ndef get_table_data_from_current_page():\r\n '''\r\n scrapes the data from all rows in the current page\r\n '''\r\n global df\r\n\r\n for i in range(2, 12):\r\n try:\r\n table_row_path = get_table_row_path(i)\r\n except:\r\n break\r\n li = []\r\n for j in [1, 2, 3, 4, 5]:\r\n try:\r\n child_path = get_child_data_path(i, j)\r\n except:\r\n webbrowser.close()\r\n return\r\n\r\n li.append(str(child_path.text))\r\n \r\n df.loc[i-2] = li\r\n df.to_csv(f, header=False)\r\n\r\n\r\nget_and_use_page('//*[@id=\"ContentPlaceHolder1_Button1\"]')\r\nget_table_data_from_current_page()\r\n\r\ngeneral_path = '//*[@id=\"ContentPlaceHolder1_gdvSearch\"]/tbody/tr[12]/td/table/tbody/tr/td['\r\nback_path = ']/a'\r\n\r\n# links = \"javascript:__doPostBack('ctl00$ContentPlaceHolder1$gdvSearch','Page$\"\r\n# links_tail = \"')\"\r\ncurr = 2\r\npage = 0\r\nwhile curr <= 12:\r\n page += 1\r\n try:\r\n path = general_path+str(curr)+back_path\r\n btn = webbrowser.find_element_by_xpath(path)\r\n btn.click()\r\n time.sleep(30)\r\n # webbrowser.implicitly_wait(100)\r\n get_table_data_from_current_page()\r\n except ElementClickInterceptedException:\r\n continue\r\n if btn.text == \"...\":\r\n print('Last page: ', page)\r\n curr = 2\r\n curr += 1\r\n\r\nf.close()\r\nwebbrowser.close()\r\n\r\n# //*[@id=\"ContentPlaceHolder1_gdvSearch\"]/tbody/tr[12]/td/table/tbody/tr/td[3]/a","repo_name":"shivam18shah/eTaal-scrap","sub_path":"eTaal_parse.py","file_name":"eTaal_parse.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19574209309","text":"\nfrom invoice.models import *\n\nprint(\"Starting test\")\nc1 = Customer(name=\"Sai\", phone_number=111, address=\"U Chit MG\")\nc1.save()\n\np1 = Product(name=\"SK\", price=50, quantity=2)\np2 = Product(name=\"RM\", price=60, quantity=3)\np3 = Product(name=\"FL\", price=70, quantity=4)\n\np1.save()\np2.save()\np3.save()\n\ni1 = Invoice(customer=c1,payment=\"K\", payment_id=1)\ni1.save()\n\n\n\n","repo_name":"saiherng/cosmos-app","sub_path":"cosmos/invoice/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38108909176","text":"import hifi_utils\nimport hifi_android\nimport hashlib\nimport os\nimport platform\nimport re\nimport shutil\nimport tempfile\nimport json\nimport xml.etree.ElementTree as ET\nimport functools\n\n# The way Qt is handled is a bit complicated, so I'm documenting it here.\n#\n# 1. User runs cmake\n# 2. cmake calls prebuild.py, which is referenced in /CMakeLists.txt\n# 3. prebuild.py calls this code.\n# 4. hifi_qt.py determines how to handle cmake: do we need to download a package, and which?\n# 4.a - Using system Qt\n# No download, most special paths are turned off.\n# We build in the same way a normal Qt program would.\n# 4.b - Using an user-provided Qt build in a custom directory.\n# We just need to set the cmakePath to the right dir (qt5-install/lib/cmake)\n# 4.c - Using a premade package.\n# We check the OS and distro and set qtUrl to the URL to download.\n# After this, it works on the same pathway as 4.b.\n# 5. We write /qt.cmake, which contains paths that are passed down to SetupQt.cmake\n# The template for this file is in CMAKE_TEMPLATE just below this comment\n# and it sets the QT_CMAKE_PREFIX_PATH variable used by SetupQt.cmake.\n# 6. cmake includes /qt.cmake receiving our information\n# In the case of system Qt, this step is skipped.\n# 7. cmake runs SetupQt.cmake which takes care of the cmake parts of the Qt configuration.\n# In the case of system Qt, SetupQt.cmake is a no-op. It runs but exits immediately.\n#\n# The format for a prebuilt qt is a package containing a top-level directory named\n# 'qt5-install', which contains the result of a \"make install\" from a build of the Qt source.\n\nprint = functools.partial(print, flush=True)\n\n# Encapsulates the vcpkg system\nclass QtDownloader:\n CMAKE_TEMPLATE = \"\"\"\n# this file auto-generated by hifi_qt.py\nget_filename_component(QT_CMAKE_PREFIX_PATH \"{}\" ABSOLUTE CACHE)\nget_filename_component(QT_CMAKE_PREFIX_PATH_UNCACHED \"{}\" ABSOLUTE)\n\n# If the cached cmake toolchain path is different from the computed one, exit\nif(NOT (QT_CMAKE_PREFIX_PATH_UNCACHED STREQUAL QT_CMAKE_PREFIX_PATH))\n message(FATAL_ERROR \"QT_CMAKE_PREFIX_PATH has changed, please wipe the build directory and rerun cmake\")\nendif()\n\"\"\"\n def __init__(self, args):\n self.args = args\n self.configFilePath = os.path.join(args.build_root, 'qt.cmake')\n self.version = os.getenv('VIRCADIA_USE_QT_VERSION', '5.15.2')\n\n # OS dependent information\n system = platform.system()\n\n qt_found = False\n system_qt = False\n\n # Here we handle the 3 possible cases of dealing with Qt:\n if os.getenv('VIRCADIA_USE_SYSTEM_QT', \"\") != \"\":\n # 1. Using the system provided Qt. This is only recommended for Qt 5.15.0 and above,\n # as it includes a required fix on Linux.\n #\n # This path only works on Linux as neither Windows nor OSX ship Qt.\n\n if system != \"Linux\":\n raise Exception(\"Using the system Qt is only supported on Linux\")\n\n self.path = None\n self.cmakePath = None\n\n qt_found = True\n system_qt = True\n print(\"Using system Qt\")\n\n elif os.getenv('VIRCADIA_QT_PATH', \"\") != \"\":\n # 2. Using an user-provided directory.\n # VIRCADIA_QT_PATH must point to a directory with a Qt install in it.\n\n self.path = os.getenv('VIRCADIA_QT_PATH')\n self.fullPath = self.path\n self.cmakePath = os.path.join(self.fullPath, 'lib', 'cmake')\n\n qt_found = True\n print(\"Using Qt from \" + self.fullPath)\n\n else:\n # 3. Using a pre-built Qt.\n #\n # This works somewhat differently from above, notice how path and fullPath are\n # used differently in this case.\n #\n # In the case of an user-provided directory, we just use the user-supplied directory.\n #\n # For a pre-built qt, however, we have to unpack it. The archive is required to contain\n # a qt5-install directory in it.\n\n self.path = os.path.expanduser(\"~/vircadia-files/qt\")\n self.fullPath = os.path.join(self.path, 'qt5-install')\n self.cmakePath = os.path.join(self.fullPath, 'lib', 'cmake')\n\n if (not os.path.isdir(self.path)):\n os.makedirs(self.path)\n\n qt_found = os.path.isdir(self.fullPath)\n print(\"Using a packaged Qt\")\n\n\n if not system_qt:\n if qt_found:\n # Sanity check, ensure we have a good cmake directory\n qt5_dir = os.path.join(self.cmakePath, \"Qt5\")\n if not os.path.isdir(qt5_dir):\n raise Exception(\"Failed to find Qt5 directory under \" + self.cmakePath + \". There should be a \" + qt5_dir)\n else:\n print(\"Qt5 check passed, found \" + qt5_dir)\n\n # I'm not sure why this is needed. It's used by hifi_singleton.\n # Perhaps it stops multiple build processes from interferring?\n lockDir, lockName = os.path.split(self.path)\n lockName += '.lock'\n if not os.path.isdir(lockDir):\n os.makedirs(lockDir)\n\n self.lockFile = os.path.join(lockDir, lockName)\n\n if qt_found:\n print(\"Found pre-built Qt5\")\n return\n\n if 'Windows' == system:\n self.qtUrl = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_WIN_URLS').split(\";\")\n self.qtSha512 = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_WIN_SHA512')\n elif 'Darwin' == system:\n self.qtUrl = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_MAC_URLS').split(\";\")\n self.qtSha512 = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_MAC_SHA512')\n elif 'Linux' == system:\n import distro\n cpu_architecture = platform.machine()\n\n if 'x86_64' == cpu_architecture:\n try:\n u_major = int( distro.major_version() )\n except ValueError:\n u_major = 0\n\n if distro.id() == 'ubuntu' or distro.id() == 'linuxmint':\n if (distro.id() == 'ubuntu' and u_major == 18) or distro.id() == 'linuxmint' and u_major == 19:\n self.qtUrl = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_LINUX_URLS').split(\";\")\n self.qtSha512 = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_LINUX_SHA512')\n elif (distro.id() == 'ubuntu' and u_major > 18) or (distro.id() == 'linuxmint' and u_major > 19):\n self.__no_qt_package_error()\n else:\n self.__unsupported_error()\n else:\n self.__no_qt_package_error()\n\n\n elif 'aarch64' == cpu_architecture:\n if distro.id() == 'ubuntu':\n try:\n u_major = int( distro.major_version() )\n except ValueError:\n u_major = 0\n\n if u_major == 18:\n self.qtUrl = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_UBUNTU_AARCH64_URLS').split(\";\")\n self.qtSha512 = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_UBUNTU_AARCH64_SHA512')\n elif u_major > 19:\n self.__no_qt_package_error()\n else:\n self.__unsupported_error()\n\n elif distro.id() == 'debian':\n try:\n u_major = int( distro.major_version() )\n except ValueError:\n u_major = 0\n\n if u_major == 10:\n self.qtUrl = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_DEBIAN_AARCH64_URLS').split(\";\")\n self.qtSha512 = hifi_utils.readEnviromentVariableFromFile(self.args.build_root, 'EXTERNAL_QT_DEBIAN_AARCH64_SHA512')\n elif u_major > 10:\n self.__no_qt_package_error()\n else:\n self.__unsupported_error()\n\n else:\n self.__no_qt_package_error()\n else:\n raise Exception('UNKNOWN CPU ARCHITECTURE!!!')\n\n else:\n print(\"System : \" + platform.system())\n print(\"Architecture: \" + platform.architecture())\n print(\"Machine : \" + platform.machine())\n raise Exception('UNKNOWN OPERATING SYSTEM!!!')\n\n def showQtBuildInfo(self):\n print(\"\")\n print(\"It's also possible to build Qt for your distribution, please see the documentation at:\")\n print(\"https://github.com/vircadia/vircadia/tree/master/tools/qt-builder\")\n print(\"\")\n print(\"Alternatively, you can try building against the system Qt by setting the VIRCADIA_USE_SYSTEM_QT environment variable.\")\n print(\"You'll need to install the development packages, and to have Qt 5.15.0 or later.\")\n\n def writeConfig(self):\n print(\"Writing cmake config to {}\".format(self.configFilePath))\n # Write out the configuration for use by CMake\n cmakeConfig = QtDownloader.CMAKE_TEMPLATE.format(self.cmakePath, self.cmakePath).replace('\\\\', '/')\n with open(self.configFilePath, 'w') as f:\n f.write(cmakeConfig)\n\n def installQt(self):\n if not os.path.isdir(self.fullPath):\n print(\"Fetching Qt from {} to {}\".format(self.qtUrl, self.path))\n hifi_utils.downloadAndExtract(self.qtUrl, self.path, self.qtSha512)\n else:\n print ('Qt has already been downloaded')\n\n\n def __unsupported_error(self):\n import distro\n cpu_architecture = platform.machine()\n\n print('')\n hifi_utils.color('red')\n print(\"Sorry, \" + distro.name(pretty=True) + \" on \" + cpu_architecture + \" is too old and won't be officially supported.\")\n hifi_utils.color('white')\n print(\"Please upgrade to a more recent Linux distribution.\")\n hifi_utils.color('clear')\n print('')\n raise hifi_utils.SilentFatalError(3)\n\n def __no_qt_package_error(self):\n import distro\n cpu_architecture = platform.machine()\n\n print('')\n hifi_utils.color('red')\n print(\"Sorry, we don't have a prebuilt Qt package for \" + distro.name(pretty=True) + \" on \" + cpu_architecture + \".\")\n hifi_utils.color('white')\n print('')\n print(\"If this is a recent distribution, dating from 2021 or so, you can try building\")\n print(\"against the system Qt by running this command, and trying again:\")\n print(\" export VIRCADIA_USE_SYSTEM_QT=1\")\n print(\"\")\n hifi_utils.color('clear')\n print(\"If you'd like to try to build Qt from source either for building Vircadia, or\")\n print(\"to contribute a prebuilt package for your distribution, please see the\")\n print(\"documentation at: \", end='')\n hifi_utils.color('blue')\n print(\"https://github.com/vircadia/vircadia/tree/master/tools/qt-builder\")\n hifi_utils.color('clear')\n print('')\n raise hifi_utils.SilentFatalError(2)\n","repo_name":"vircadia/vircadia-native-core","sub_path":"hifi_qt.py","file_name":"hifi_qt.py","file_ext":"py","file_size_in_byte":11538,"program_lang":"python","lang":"en","doc_type":"code","stars":523,"dataset":"github-code","pt":"78"} +{"seq_id":"73930328573","text":"#words_alt.py\n#code from CSEV (original author) in Python3 \n#uses slightly different syntax ln15 list function\n#uses '=='' as opposed to 'is' line 16\n\nname = input('Enter file:')\nhandle = open(name, 'r')\ntext = handle.read()\nwords = text.split()\ncounts = dict()\nfor word in words: \n counts[word] = counts.get(word,0) + 1\n\nbigcount = None\nbigword = None\nfor word,count in list(counts.items()):\n if bigcount == None or count > bigcount:\n bigword = word \n bigcount = count \n\nprint(\"The word that show up the most is '{}', which shows up {} \\\ntimes\".format(bigword, bigcount))\n","repo_name":"Bayaz/GWPython","sub_path":"words_alt.py","file_name":"words_alt.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74618669373","text":"#!/usr/bin/python3\nimport random\nnumber = random.randint(-10000, 10000)\n# *************************\nmessage = f\"null\"\nlast_digit = number % 10 if number > 0 else number % -10\nif last_digit != 0:\n message = f\"and is greater than 5\" if last_digit > 5 \\\n else f\"and is less than 6 and not 0\"\nelse:\n message = \"and is 0\"\n\nprint(f\"Last digit of {number:d} is {last_digit:d} {message}\")\n","repo_name":"husamrio/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/1-last_digit.py","file_name":"1-last_digit.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5760123191","text":"# -*- coding: utf-8 -*-\n\nfrom typing import List, Optional\n\nfrom sqlalchemy.schema import Column\n\nfrom .entity import *\nfrom ..meta import MetaSpec, meta_spec\n\n\nclass BillSpec:\n def __init__(self, meta_spec_: MetaSpec):\n self.__meta_spec = meta_spec_\n\n @property\n def businesses(self) -> List[str]:\n return self.__meta_spec.businesses\n\n def is_satisfied_by(self, bill: Bill) -> (bool, Optional[str]):\n ok1, err1 = self._is_business_valid(bill.business_modelx_code)\n ok2, err2 = self._is_bill_subject_valid(bill.bill_subject_name)\n ok3, err3 = self._is_provider_valid(bill.provider_name)\n\n if ok1 and ok2 and ok3:\n return True, None\n else:\n return False, \"\\n\".join([str(e) for e in [err1, err2, err3] if e is not None])\n\n def _is_business_valid(self, business: str) -> (bool, Optional[str]):\n return self.__meta_spec.is_business_valid(business)\n\n def _is_bill_subject_valid(self, bill_subject: str) -> (bool, Optional[str]):\n return self.__meta_spec.is_bill_subject_valid(bill_subject)\n\n def _is_provider_valid(self, provider: str) -> (bool, Optional[str]):\n return self.__meta_spec.is_provider_valid(provider)\n\n\nclass OriginalBillAutoFixer:\n\n @classmethod\n def auto_fix_and_mark_exception(cls, original_bill: OriginalBill):\n \"\"\" 检测并修复已知的异常数据,并标记异常\n\n 例如,/ 表示无,可以自动替换为空值或对应类型的默认值\n\n \"\"\"\n # for attr in cls.__get_attrs_that_can_be_auto_fixed():\n # auto_fix_func = getattr(cls.__dict__[f'_{cls.__name__}__auto_fix_{attr}'], '__func__')\n # attr_value = getattr(original_bill, attr)\n # setattr(original_bill, attr, auto_fix_func(attr_value))\n cls.__auto_fix_type_mistakes(original_bill)\n\n @classmethod\n def __auto_fix_type_mistakes(cls, original_bill: OriginalBill):\n \"\"\" 自动修复类型不匹配的属性,修复方法是将值置空 \"\"\"\n for col_ in OriginalBill.iter_columns():\n col: Column = col_\n\n if col.name in [\"create_time\", \"update_time\", \"deleted_at\"]:\n continue\n\n if col.name == 'actually_paid':\n cls.__auto_fix_actually_paid(original_bill)\n continue\n\n col_value = getattr(original_bill, col.name)\n\n if col_value is not None:\n col_type = col.type.python_type\n try:\n col_type(col_value)\n except (ValueError, TypeError):\n original_bill.append_exception(\n f'{col.comment}=\"{col_value}\"类型({col_type.__name__})校验不通过,已置空,请检查并更新该属性;')\n setattr(original_bill, col.name, None)\n\n @classmethod\n def __auto_fix_actually_paid(cls, original_bill: OriginalBill):\n attr_name = 'actually_paid'\n\n actually_paid = getattr(original_bill, attr_name)\n if actually_paid is not None:\n try:\n round(float(actually_paid), 2)\n except (ValueError, TypeError):\n original_bill.append_exception(\n f'实付金额=\"{actually_paid}\"类型(decimal(11, 2))校验不通过,已置空,请检查并更新该属性;')\n setattr(original_bill, attr_name, None)\n\n # @classmethod\n # def __get_attrs_that_can_be_auto_fixed(cls) -> List[str]:\n # attrs = []\n #\n # for attr in cls.__dict__.keys():\n # if attr.startswith(f'_{cls.__name__}__auto_fix_'):\n # attrs.append(attr[attr.find('auto_fix_') + 9:])\n #\n # return attrs\n #\n # @staticmethod\n # def __auto_fix_unit_price(val):\n # if val == '/':\n # return None\n #\n # if val is not None:\n # try:\n # float(val)\n # except ValueError:\n # return None\n #\n # return val\n #\n # @staticmethod\n # def __auto_fix_statistic_cnt(val):\n # if val == '/':\n # return None\n #\n # if val is not None:\n # try:\n # float(val)\n # except ValueError:\n # return None\n #\n # return val\n\n\nclass OriginalBillSpec(BillSpec):\n def __init__(self, meta_spec_: MetaSpec):\n super().__init__(meta_spec_)\n self.auto_fixer = OriginalBillAutoFixer\n\n def fix_known_exception_cases(self, original_bill: OriginalBill):\n self.auto_fixer.auto_fix_and_mark_exception(original_bill)\n\n\nclass LedgerBillSpec(BillSpec):\n def __init__(self, meta_spec_: MetaSpec):\n super().__init__(meta_spec_)\n\n def _is_business_valid(self, business: str) -> (bool, Optional[str]):\n if business in self.businesses:\n return True, None\n else:\n return False, f'业务名称\"{business}\"非法,参考值:{\"、\".join(self.businesses[:3])}...'\n\n\ndef original_bill_spec() -> OriginalBillSpec:\n return OriginalBillSpec(meta_spec())\n\n\ndef ledger_bill_spec() -> LedgerBillSpec:\n return LedgerBillSpec(meta_spec())\n","repo_name":"Ethan-zhengyw/mvc_ddd","sub_path":"service/controls/domain/bill/spec.py","file_name":"spec.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} diff --git a/4301.jsonl b/4301.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..42223dbf64d4e7de8c25b338e382cf9209ecdaf4 --- /dev/null +++ b/4301.jsonl @@ -0,0 +1,736 @@ +{"seq_id":"21061841","text":"from bs4 import BeautifulSoup\nfrom string import Template\nimport sys\nimport requests\nimport aiohttp\nimport asyncio\nimport random\nimport webbrowser\nimport os,time\nfrom useragents import USER_AGENT_LIST\n\n\nclass DramaHighScoreAlerter:\n '''it finds all dramas aired currently then finds their scores from douban, \n show the results that have the score higher than or equal to the 'highscore' argument. \n \n it has 2 modes: sync(using requests) and async(using aiohttp and asyncio).\n async is much faster but likely to trigger douban's anti-spider system, \n so for personal usage, sync is preferred. if async is used, set a proper concurrent \n connection number\n \n command line usage:\n python highscorealerts.py `highscore` `sync` `connections`\n \n command line examples:\n \n python highscorealerts.py \n python highscorealerts.py 8.5 \n python highscorealerts.py 8.5 1\n # async, concurrent connection number 5\n python highscorealerts.py 8.5 0 5 \n '''\n \n def __init__(self,highscore=8.5,sync=True,connections=5):\n '''only if sync is False, connections is used'''\n \n self.highscore = highscore\n self.sync = sync\n self.dramas = []\n self.scores = []\n self.alerts = []\n self.semaphore = asyncio.Semaphore(connections) \n \n def get_headers(self):\n # use a random user agent from a preset list\n USER_AGENT = random.choice(USER_AGENT_LIST)\n headers = {'user-agent': USER_AGENT}\n return headers\n \n def parse_dramas(self):\n '''parse all dramas that are active this week'''\n \n drama_site = r'https://www.meijutt.com/'\n html = requests.get(drama_site,headers=self.get_headers()).content\n soup = BeautifulSoup(html, 'html.parser')\n week = soup.select_one(\"div.r.week-day\")\n tabs = week.select(\".tabs-list\")\n dramas = []\n for tab in tabs:\n drama = tab.find_all('a')\n for d in drama:\n dramas.append(d)\n \n self.dramas = dramas\n \n async def _parse_score(self,drama):\n result = await self.parse_score_async(drama)\n if result is not None:\n self.scores.append(result)\n \n def parse_scores_async(self):\n loop = asyncio.get_event_loop()\n tasks = [self._parse_score(drama) for drama in self.dramas]\n loop.run_until_complete(asyncio.gather(*tasks))\n loop.close()\n \n def parse_scores_sync(self):\n for drama in self.dramas:\n result = self.parse_score_sync(drama)\n if result is not None:\n self.scores.append(result)\n \n def parse_scores(self):\n if self.sync:\n html = self.parse_scores_sync()\n else:\n html = self.parse_scores_async()\n\n def output_html(self):\n '''show alerts in browser'''\n \n tpl ='''\n \n %s\n %s\n \n '''\n rows = ''\n html = ''\n for score in self.alerts:\n rows += tpl % (score[2],score[0],score[1])\n with open('alerts_tpl.html','r') as f:\n html_tpl = f.read()\n html = html_tpl.replace('${rows}', rows)\n f.close()\n with open('alerts.html','w') as f:\n f.write(html)\n f.close()\n \n url = 'file://' + os.path.dirname(os.path.abspath(__file__)) + \"/alerts.html\"\n webbrowser.open(url)\n \n async def parse_score_async(self,drama):\n '''get the drama score from douban aync. it is much faster but likely to\n trigger douban anti spider system'''\n\n url,drama_name = self.parse_score_setup(drama)\n \n #limit asyncio concurrent number so it is less likely to get banned\n async with self.semaphore:\n async with aiohttp.ClientSession() as session:\n async with session.get(url, headers=self.get_headers(), timeout=10) as response:\n html = await response.read()\n return self.parse_result(html,drama_name)\n \n def parse_score_setup(self,drama):\n\n drama_site_search = r'https://www.douban.com/search?q=%s'\n\n splits = drama.get('title').split('第')\n \n if len(splits) > 1:\n splits[1] = '第' + splits[1]\n drama_name = (' ').join(splits).strip()\n \n url = drama_site_search % drama_name\n return url,drama_name\n \n def parse_score_sync(self,drama):\n '''get the drama score from douban the sync way, slower but less likely to get banned'''\n\n url,drama_name = self.parse_score_setup(drama)\n \n html = requests.get(url,headers=self.get_headers()).content\n return self.parse_result(html,drama_name)\n \n def handle_error(self,drama_name):\n #TODO unify different tranlation names of two sites\n #print(\"can't find %s\" % drama_name)\n pass\n \n def parse_result(self,html,drama_name):\n\n soup = BeautifulSoup(html, 'html.parser')\n result = soup.select_one('.result-list > .result')\n if result is None:\n self.handle_error(drama_name)\n return\n a = result.select_one('.title a')\n title = a.string.strip()\n drama_url = a.get('href')\n if title != drama_name:\n self.handle_error(drama_name)\n return \n\n rating =result.select_one('.rating-info span:nth-of-type(2)')\n if rating is None:\n self.handle_error(drama_name)\n return\n score = 0\n if rating.has_attr('class'):\n score = float(rating.string)\n\n return (drama_name,score,drama_url) \n \n def alert(self):\n self.parse_dramas()\n \n #test code\n start = time.time()\n #self.dramas = self.dramas[:18]\n\n self.parse_scores()\n \n end = time.time()\n print('it took %f seconds' % (end - start))\n \n self.alerts = [s for s in self.scores if s[1] >= self.highscore]\n self.output_html()\n \n\nif __name__== \"__main__\":\n\n highscore = 8.5\n sync = True\n semaphore = 5\n \n if (len(sys.argv) >= 2):\n try:\n highscore = float(sys.argv[1])\n if (len(sys.argv) >= 3):\n try:\n sync = bool(int(sys.argv[2]))\n except Exception as e:\n pass\n if (len(sys.argv) >= 4):\n try:\n semaphore = int(sys.argv[3])\n except Exception as e:\n pass\n \n except Exception as e:\n pass\n \n alerter = DramaHighScoreAlerter(highscore=highscore,sync=sync,connections=semaphore)\n alerter.alert()\n \n\n \n\n ","sub_path":"highscorealerts.py","file_name":"highscorealerts.py","file_ext":"py","file_size_in_byte":6864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"375660016","text":"import sys\nimport time\nimport requests\n\n\ndef pb_report(i, n):\n j = (i + 1) / n\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\"[%-20s] %d%%\" % (\"=\" * int(20 * j), 100 * j))\n sys.stdout.flush()\n\n\ndef get_request(url):\n import random\n\n sleep_time = 0\n while True:\n try:\n response = requests.get(url)\n break\n except Exception as e:\n print(e)\n rand = random.randint(0, 10)\n time.sleep(rand)\n sleep_time += rand\n if sleep_time > 60:\n return None\n if response.status_code == 200:\n try:\n return response.json()\n except Exception as e:\n print(e)\n return None\n\n\ndef flatten_list(list_of_lists):\n if len(list_of_lists) == 0:\n return list_of_lists\n if isinstance(list_of_lists[0], list):\n return flatten_list(list_of_lists[0]) + flatten_list(list_of_lists[1:])\n return list_of_lists[:1] + flatten_list(list_of_lists[1:])\n","sub_path":"baseball-data-etl/etl/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"531250982","text":"# --------------------------------------------------------\n# Motion R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Simon Meister\n# --------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nimport os\nimport glob\nimport shutil\n\nimport numpy as np\nimport PIL.Image as Image\nfrom PIL import ImageDraw\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom cityscapesscripts.helpers.labels import trainId2label\n\nfrom object_detection.data_decoders.tf_example_decoder import TfExampleDecoder\nfrom object_detection.utils.flow_util import flow_to_color, flow_error_image, flow_error_avg\nfrom object_detection.utils.np_motion_util import dense_flow_from_motion, q_rotation_angle\nfrom object_detection.utils.visualization_utils import visualize_flow\n\n\nwith tf.Graph().as_default():\n file_pattern = 'object_detection/data/records/mytest/vkitti_val/00000-of-00000.record'\n #file_pattern = 'object_detection/data/records/vkitti_train/00000-of-00020.record'\n tfrecords = glob.glob(file_pattern)\n\n with tf.device('/cpu:0'):\n filename_queue = tf.train.string_input_producer(\n tfrecords, capacity=len(tfrecords))\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n example = TfExampleDecoder().decode(serialized_example)\n flow_color = flow_to_color(tf.expand_dims(example['groundtruth_flow'], 0))[0, :, :, :]\n print(example.keys())\n\n sess = tf.Session()\n init_op = tf.group(\n tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\n sess.run(init_op)\n\n tf.train.start_queue_runners(sess=sess)\n out_dir = 'object_detection/output/tests/vkitti/'\n if os.path.isdir(out_dir):\n shutil.rmtree(out_dir)\n os.makedirs(out_dir)\n with sess.as_default():\n for i in range(100):\n example_np, flow_color_np = sess.run([example, flow_color])\n img_id_np = i #example_np['filename']\n image_np = example_np['image']\n gt_boxes_np = example_np['groundtruth_boxes']\n gt_classes_np = example_np['groundtruth_classes']\n gt_masks_np = example_np['groundtruth_instance_masks']\n height, width = image_np.shape[:2]\n num_instances_np = gt_masks_np.shape[0]\n image_np = np.squeeze(image_np)\n depth_np = example_np['groundtruth_depth']\n gt_motions_np = example_np['groundtruth_instance_motions']\n\n composed_flow_color_np, flow_error_np = visualize_flow(\n depth_np,\n gt_motions_np,\n np.ones([gt_motions_np.shape[0]]),\n example_np['groundtruth_camera_motion'],\n example_np['camera_intrinsics'],\n masks=gt_masks_np,\n groundtruth_flow=example_np['groundtruth_flow'])\n\n # motion gt summary\n gt_q = gt_motions_np[:, :4]\n gt_trans = gt_motions_np[:, 4:7]\n mean_rot_angle = np.mean(np.degrees(q_rotation_angle(gt_q)))\n mean_trans = np.mean(np.linalg.norm(gt_trans))\n cam_moving = example_np['groundtruth_camera_motion'][-1]\n\n print('image_id: {}, instances: {}, shape: {}, rot(deg): {}, trans: {}, moving: {}'\n .format(img_id_np, num_instances_np, image_np.shape, mean_rot_angle, mean_trans,\n cam_moving))\n\n # overlay masks\n for i in range(gt_boxes_np.shape[0]):\n label = trainId2label[gt_classes_np[i]]\n mask = np.expand_dims(gt_masks_np[i, :, :], 2)\n image_np += (0.5 * mask * np.array(label.color)).astype(np.uint8)\n # draw boxes\n im = Image.fromarray(image_np)\n imd = ImageDraw.Draw(im)\n for i in range(gt_boxes_np.shape[0]):\n label = trainId2label[gt_classes_np[i]]\n name = 'car' if gt_classes_np[i] == 1 else 'van'\n if gt_motions_np[i, 10] < 0.5:\n name = name + '_static'\n color = 'rgb({},{},{})'.format(*label.color)\n pos = gt_boxes_np[i, :]\n y0 = pos[0] * height\n x0 = pos[1] * width\n y1 = pos[2] * height\n x1 = pos[3] * width\n imd.rectangle([x0, y0, x1, y1], outline=color)\n imd.text(((x0 + x1) / 2, y1), name, fill=color)\n\n depth_im = Image.fromarray(np.squeeze(\n depth_np * 255 / 655.3).astype(np.uint8))\n flow_im = Image.fromarray(np.squeeze(\n flow_color_np * 255).astype(np.uint8))\n composed_flow_im = Image.fromarray(np.squeeze(\n composed_flow_color_np * 255).astype(np.uint8))\n flow_error_im = Image.fromarray(np.squeeze(\n flow_error_np * 255).astype(np.uint8))\n next_im = Image.fromarray(np.squeeze(example_np['next_image']))\n\n im.save(os.path.join(out_dir, str(img_id_np) + '_image1.png'))\n next_im.save(os.path.join(out_dir, str(img_id_np) + '_image2.png'))\n depth_im.save(os.path.join(out_dir, str(img_id_np) + '_depth.png'))\n flow_im.save(os.path.join(out_dir, str(img_id_np) + '_flow.png'))\n composed_flow_im.save(os.path.join(out_dir, str(img_id_np) + '_flow_from_motion.png'))\n flow_error_im.save(os.path.join(out_dir, str(img_id_np) + '_error.png'))\n sess.close()\n","sub_path":"object_detection/create_vkitti_tf_record_test.py","file_name":"create_vkitti_tf_record_test.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"151709617","text":"# -*- coding: utf-8 -*-\nimport pygame\nfrom pygame.locals import *\nimport Fighter\nfrom shot import *\nfrom Leaser import *\n\n\n\nSCR_RECT = Rect(0,0,1200,600)\n\nclass Beast(Fighter.Fighter):\n def __init__(self,hp=1000,attack=200,speed=40):\n Fighter.Fighter.__init__(self,hp,attack,speed)\n self.rect.left = SCR_RECT.left\n self.rect.bottom = SCR_RECT.height/2\n self.reload_timer = 0\n self.level = \"Error\"\n self.prelevel = 1\n self.killpoint =0\n self.sysfont = pygame.font.SysFont(None,50)\n\n def update(self):\n\n pressed_keys = pygame.key.get_pressed()\n\n if pressed_keys[K_LEFT]:\n self.rect.move_ip(-self.speed,0)\n elif pressed_keys[K_RIGHT]:\n self.rect.move_ip(self.speed,0)\n elif pressed_keys[K_UP]:\n self.rect.move_ip(0,-self.speed)\n elif pressed_keys[K_DOWN]:\n self.rect.move_ip(0,self.speed)\n self.rect.clamp_ip(SCR_RECT)\n \n \n if pressed_keys[K_SPACE]:\n\n if self.reload_timer >0:\n self.reload_timer -= 1\n \n else:\n Leaser(self.rect.center)\n self.reload_timer = Leaser.reload_time\n\n def draw(self,screen,killpoint):\n level = self.sysfont.render(\"Kill:\"+str(killpoint)+\" Level: Error HP:\"+str(self.hp)+\" Attack:\"+str(self.attack)+\" Speed:\"+str(self.speed),True,(0,0,0))\n screen.blit(level,(280,610))\n \n","sub_path":"Beast.py","file_name":"Beast.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"55609372","text":"#!/usr/bin/env python\nimport math\nimport numpy as np\n\nfrom FireROOT.Analysis.Events import *\nfrom FireROOT.Analysis.Utils import *\n\nclass MyEvents(ProxyEvents):\n def __init__(self, files=None, type='MC', maxevents=-1, channel=['2mu2e', '4mu'], **kwargs):\n super(MyEvents, self).__init__(files=files, type=type, maxevents=maxevents, channel=channel, **kwargs)\n self.KeepCutFlow=True\n\n def processEvent(self, event, aux):\n if aux['channel'] not in self.Channel: return\n chan = aux['channel']\n cutflowbin = 5\n\n self.Histos['{}/cutflow'.format(chan)].Fill(cutflowbin, aux['wgt']); cutflowbin+=1\n\n lj, proxy = aux['lj'], aux['proxy']\n if not lj.passCosmicVeto(event): return\n if abs(proxy.dz(event))>40: return\n self.Histos['{}/cutflow'.format(chan)].Fill(cutflowbin, aux['wgt']); cutflowbin+=1\n\n mind0s = []\n if lj.isMuonType() and not math.isnan(lj.pfcand_tkD0Min):\n mind0s.append( lj.pfcand_tkD0Min*1e4 )\n mind0s.append( abs(proxy.d0(event))*1e4 )\n\n self.Histos['{}/proxyd0inc'.format(chan)].Fill( abs(proxy.d0(event))*1e4, aux['wgt'])\n self.Histos['{}/muljd0inc'.format(chan)].Fill( lj.pfcand_tkD0Min*1e4, aux['wgt'])\n self.Histos['{}/maxd0inc'.format(chan)].Fill( max(mind0s), aux['wgt'])\n\n nbtight, nbmedium = 0, 0\n for s, j in zip(event.hftagscores, event.ak4jets):\n if not (j.jetid and j.p4.pt()>30 and abs(j.p4.eta())<2.5): continue\n if (s.DeepCSV_b&(1<<2))==(1<<2): nbtight += 1\n if (s.DeepCSV_b&(1<<1))==(1<<1): nbmedium += 1\n\n\n dphi = abs(DeltaPhi(lj.p4, proxy.p4))\n\n\n self.Histos['{}/nbtight'.format(chan)].Fill(nbtight, aux['wgt'])\n self.Histos['{}/nbmedium'.format(chan)].Fill(nbmedium, aux['wgt'])\n if nbtight==0: return\n self.Histos['{}/cutflow'.format(chan)].Fill(cutflowbin, aux['wgt']); cutflowbin+=1\n\n\n self.Histos['{}/proxyd0'.format(chan)].Fill( abs(proxy.d0(event))*1e4, aux['wgt'])\n self.Histos['{}/muljd0'.format(chan)].Fill( lj.pfcand_tkD0Min*1e4, aux['wgt'])\n self.Histos['{}/maxd0'.format(chan)].Fill( max(mind0s), aux['wgt'])\n\n if lj.pfcand_tkD0Min*1e4 > 100:\n self.Histos['{}/dphi_100'.format(chan)].Fill(dphi, aux['wgt'])\n self.Histos['{}/dphiIso2Dpre'.format(chan)].Fill(dphi, lj.pfiso(), aux['wgt'])\n if lj.pfcand_tkD0Min*1e4 > 200:\n self.Histos['{}/dphi_200'.format(chan)].Fill(dphi, aux['wgt'])\n if lj.pfcand_tkD0Min*1e4 > 300:\n self.Histos['{}/dphi_300'.format(chan)].Fill(dphi, aux['wgt'])\n if lj.pfcand_tkD0Min*1e4 > 400:\n self.Histos['{}/dphi_400'.format(chan)].Fill(dphi, aux['wgt'])\n if lj.pfcand_tkD0Min*1e4 > 500:\n self.Histos['{}/dphi_500'.format(chan)].Fill(dphi, aux['wgt'])\n self.Histos['{}/dphiIso2Dinit'.format(chan)].Fill(dphi, lj.pfiso(), aux['wgt'])\n\n if lj.pfcand_tkD0Min*1e4 < 1000: return\n self.Histos['{}/cutflow'.format(chan)].Fill(cutflowbin, aux['wgt']); cutflowbin+=1\n\n self.Histos['{}/proxyiso'.format(chan)].Fill(proxy.pfiso(), aux['wgt'])\n self.Histos['{}/muljiso'.format(chan)].Fill(lj.pfiso(), aux['wgt'])\n self.Histos['{}/maxiso'.format(chan)].Fill(max(lj.pfiso(), proxy.pfiso()), aux['wgt'])\n\n self.Histos['{}/dphi'.format(chan)].Fill(dphi, aux['wgt'])\n\n self.Histos['{}/dphiIso2D'.format(chan)].Fill(dphi, lj.pfiso(), aux['wgt'])\n\n\n def postProcess(self):\n super(MyEvents, self).postProcess()\n\n for ch in self.Channel:\n xaxis = self.Histos['{}/cutflow'.format(ch)].axis(0)\n\n labels = [ch, 'ljcosmicveto_pass', 'bjet_ge1', 'mind0_pass',]\n\n for i, s in enumerate(labels, start=6):\n xaxis.SetBinLabel(i, s)\n # binNum., labAngel, labSize, labAlign, labColor, labFont, labText\n xaxis.ChangeLabel(i, 315, -1, 11, -1, -1, s)\n\n for k in self.Histos:\n if 'phi' not in k: continue\n xax = self.Histos[k].axis(0)\n decorate_axis_pi(xax)\n if '2D' in k:\n self.Histos[k].yaxis.SetNdivisions(-210)\n\n\n\nhistCollection = [\n {\n 'name': 'nbtight',\n 'binning': (5, 0, 5),\n 'title': 'Num. tight bjets;Num.bjets;counts'\n },\n {\n 'name': 'nbmedium',\n 'binning': (5, 0, 5),\n 'title': 'Num. medium bjets;Num.bjets;counts'\n },\n {\n 'name': 'proxyd0inc',\n 'binning': (50, 0, 2500),\n 'title': 'proxy muon |d_{0}|;|d_{0}| [#mum];Events'\n },\n {\n 'name': 'muljd0inc',\n 'binning': (50, 0, 2500),\n 'title': 'muon type lepton-jet minimum |d_{0}|;|d_{0}| [#mum];Events'\n },\n {\n 'name': 'maxd0inc',\n 'binning': (50, 0, 2500),\n 'title': 'max(proxy,lj) |d_{0}|;|d_{0}| [#mum];Events'\n },\n {\n 'name': 'proxyd0',\n 'binning': (50, 0, 2500),\n 'title': 'proxy muon |d_{0}|(N_{bjet}#geq1);|d_{0}| [#mum];Events'\n },\n {\n 'name': 'muljd0',\n 'binning': (50, 0, 2500),\n 'title': 'muon type lepton-jet minimum |d_{0}|(N_{bjet}#geq1);|d_{0}| [#mum];Events'\n },\n {\n 'name': 'maxd0',\n 'binning': (50, 0, 2500),\n 'title': 'max(proxy,lj) |d_{0}|(N_{bjet}#geq1);|d_{0}| [#mum];Events'\n },\n {\n 'name': 'proxyiso',\n 'binning': (20, 0, 0.5),\n 'title': 'proxy isolation;iso;counts'\n },\n {\n 'name': 'muljiso',\n 'binning': (20, 0, 0.5),\n 'title': 'lepton-jet isolation;iso;counts'\n },\n {\n 'name': 'maxiso',\n 'binning': (20, 0, 0.5),\n 'title': 'max iso(lepton-jet, proxy muon);iso;counts'\n },\n {\n 'name': 'dphi_100',\n 'binning': (20, 0, M_PI),\n 'title': '|#Delta#phi|(lepton-jet, proxy muon) (LJ |d_{0}|>100#mum);|#Delta#phi|;counts/#pi/20'\n },\n {\n 'name': 'dphi_200',\n 'binning': (20, 0, M_PI),\n 'title': '|#Delta#phi|(lepton-jet, proxy muon) (LJ |d_{0}|>200#mum);|#Delta#phi|;counts/#pi/20'\n },\n {\n 'name': 'dphi_300',\n 'binning': (20, 0, M_PI),\n 'title': '|#Delta#phi|(lepton-jet, proxy muon) (LJ |d_{0}|>300#mum);|#Delta#phi|;counts/#pi/20'\n },\n {\n 'name': 'dphi_400',\n 'binning': (20, 0, M_PI),\n 'title': '|#Delta#phi|(lepton-jet, proxy muon) (LJ |d_{0}|>400#mum);|#Delta#phi|;counts/#pi/20'\n },\n {\n 'name': 'dphi_500',\n 'binning': (20, 0, M_PI),\n 'title': '|#Delta#phi|(lepton-jet, proxy muon) (LJ |d_{0}|>500#mum);|#Delta#phi|;counts/#pi/20'\n },\n {\n 'name': 'dphi',\n 'binning': (20, 0, M_PI),\n 'title': '|#Delta#phi|(lepton-jet, proxy muon);|#Delta#phi|;counts/#pi/20'\n },\n {\n 'name': 'dphiIso2Dpre',\n 'binning': (20, 0, M_PI, 20, 0, 0.5),\n 'title': '|#Delta#phi| vs muon-type lepton-jet isolation;|#Delta#phi|;iso',\n },\n {\n 'name': 'dphiIso2Dinit',\n 'binning': (20, 0, M_PI, 20, 0, 0.5),\n 'title': '|#Delta#phi| vs muon-type lepton-jet isolation;|#Delta#phi|;iso',\n },\n {\n 'name': 'dphiIso2D',\n 'binning': (20, 0, M_PI, 20, 0, 0.5),\n 'title': '|#Delta#phi| vs muon-type lepton-jet isolation;|#Delta#phi|;iso',\n },\n]","sub_path":"Analysis/python/processing/proxy/proxy_4mu.py","file_name":"proxy_4mu.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"259797079","text":"#!python3\n\n# https://projecteuler.net/problem=20\n# \"Factorial digit sum\"\n# Find the sum of the digits in the number 100!.\n\nproduct = 1\nfor n in range(1,101):\n\tproduct *= n\nprint(sum([int(digit) for digit in list(str(product))]))","sub_path":"ProjectEuler/p020.py","file_name":"p020.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"88275161","text":"import json\nimport psycopg2\nimport os\nconn = psycopg2.connect(database = \"ethereum\",user=\"gpadmin\",password=\"123456\",host=\"192.168.1.2\",port=\"5432\")\n\npath = '/root/2017Dataset/'\n\n\n\nif not os.path.exists(path):\n\tos.mkdir(path)\nwith open('/root/2017Dataset.json','r') as f:\n\tcontracts = json.load(f)\ncnt = 0\nfor contract in contracts:\n\taddr = contract[0]\n\tcursor = conn.cursor()\n\tsql = \"select code from code where address =\\'%s\\'\"%addr\n\tcursor.execute(sql)\n\ttry:\n\t\tcode = cursor.fetchall()[0][0]\n\t\tcontract_path = '%s%02d-%s/'%(path,cnt,addr)\n\t\tif not os.path.exists(contract_path):\n\t\t\tos.mkdir(contract_path)\n\texcept Exception as e:\n\t\tprint(e)\n\t\tcontinue\n\tif code.startswith('0x'):\n\t\tcode = code[2:]\n\twith open(contract_path+'%02d-%s.bytecode'%(cnt,addr),'w') as w:\n\t\tw.write(code)\n\tcursor.close()\n\tcnt+=1\n\t\t\n\t\n","sub_path":"script/select2017Code.py","file_name":"select2017Code.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"151449347","text":"#@+leo-ver=5-thin\n#@+node:martin.20160905121823.2: * @file ./common/software/PC/src/event_detection_NN/processing_NN_detection.py\n#@@language python\n\n#@+others\n#@+node:martin.20160505171035.1: ** docs\n#Fireball detection using Neural Network script\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Version 1.0\n# Purpose: This script is an implementation of a neural network\n# to detect meteors in sky images once the filter has\n# been applied to the raw images.\n# Jean Deshayes\n# Curtin University\n# modified somewhat by Martin Towner\n# Curtin University\n\n# usage for detection:\n# python NN_fireball_detection.py /dir/pics (Nb hidden units=10,\n# lambda=0.00256, detection=1, Dataset Nb=11)\n\n\n\n## /dir/pics : Path to pictures directory. PICTURES MUST BE LABELLED as \"NB_DATE_TIME_DSC_ID.[...].jpg\"\n## detection :If detection is enabled (1), the code will run the detection process hence\n## feedforward propagation through the neural network. Otherwise, if 0 is entered,\n## the network will be created from the given hyperparameters and will start\n## the learning process.\n## lambda :This is the value of the regularisation parameter. \n## Nb of hidden units: The number of units desired in the hidden layer\n## Dataset Nb: The dataset number to use to train network. 11 is the default because it\n## it was the last dataset created\n## iterations: Maximum number of iterations used for learning\n## ROC: Plot roc curves\n## CONFMATRIX: Display confusion matrices\n\n### TODO\n# remove requirement for file name of specific format, as they might be thumb.masked.jpg\n# replace saving procedures with library functions\n#### import event detection code list generation and use that for files to do\n### stoptime not implemented\n# slow moving object removal is very simple\n\n#@+node:martin.20160505170929.2: ** imports\nimport datetime\nimport logging\nimport sys\nimport os\nimport cv2\n\nimport fcntl # for flock\n\nimport scipy.ndimage\nimport numpy as np\nfrom PIL import Image\n\nimport dfn_functions as dfn\nfrom NN import * #Import Neural Network functions\nfrom preFilter import * #Import preprocessing functions\nfrom performance import * #Import preprocessing functions\n#@+node:martin.20160505170929.4: ** main_function\ndef main_function( args, save_settings, config_dict, stop_time):\n \"\"\" args explanation:\n could be a '/path/to/dir', or can be a list of settings\n see docs\n \"\"\"\n #@+others\n #@+node:martin.20160505171405.1: *3* settings\n #Settings constant variables\n settings = {}\n #a tracking number to decide if to reprocess older directories with new settings\n settings['algorithm_version'] = 1.06\n #tile size\n settings['boxsize'] = 25\n ###?\n settings['tiles_for_pools'] = (4,8)\n #@+node:martin.20160505172140.1: *3* init\n #Retrieve data passed by user\n #args could be just a '/path/to/dir', or could be a list of arguments\n if not isinstance(args, basestring):\n dirName = args[0]\n else:\n dirName = args\n args = [args]\n H = int(args[1]) if len(args) >= 2 else 10 \n LAMBDA = float(args[2]) if len(args) >= 3 else 0.00256 \n #obsolete arg, keep for backward compatibility\n DETECTION = int(args[3]) if len(args) >= 4 else 1 \n DATASET = int(args[4]) if len(args) >= 5 else 11\n\n local_path = dirName\n PATH = os.path.dirname( os.path.realpath(__file__))\n #Folder with the datasets to train the network\n DATAFOLDER = os.path.join( PATH, \"..\", \"Dataset\" )\n #Initialise Neural Network\n archi = (settings['boxsize'] * settings['boxsize'], H, 1)\n #print( \"Architecture : {0}\".format(archi) )\n #Set default weights to use\n local_fname = \"Filter11_\" + str(settings['boxsize']) + \"x\"\n local_fname += str(settings['boxsize']) + \"_\" + str(H) + \"_\" + str(LAMBDA)\n fileName = os.path.join( PATH, local_fname )\n fileExist = os.path.isfile( fileName + \".npy\")\n NN = Neural_Network( Lambda = LAMBDA, architecture = archi,\n fileExist = fileExist, fileName = fileName)\n\n\n #@+node:martin.20160505172204.1: *3* logfile and file lock\n #@+others\n #@+node:martin.20160609123245.1: *4* logfile\n #logfile\n log_file = os.path.join( dirName, dfn.log_name() + 'processing.txt' )\n logging.basicConfig( filename=log_file, level=logging.INFO,\n format='%(asctime)s, %(levelname)s, %(module)s, %(message)s')\n logger = logging.getLogger()\n ###logger.setLevel( logging.DEBUG) ###\n logger.info(\"begin_event_detection_NN, \" + dirName)\n logger.info(\"algorithm version, %f\" % settings['algorithm_version'])\n logger.info(\"tile size, %d\" % settings['boxsize']) \n logger.info(\"architecture of neural network, %d, %d, %d\" % (archi[0],archi[1],archi[2]))\n logger.info(\"regularisation parameter, %f\" % LAMBDA)\n #@+node:martin.20160609121524.1: *4* file lock\n #check if dir is already processing/lockfile\n trans_status_file = os.path.join( local_path, 'transfer_status.txt')\n if not os.path.isfile( trans_status_file):\n #no status.txt, so this is current dir for interval control, or\n # not a relevant dir\n return True\n # lock file using at mode so not to overwrite existing\n # https://stackoverflow.com/questions/220525/ensuring-a-single-instance-\n # of-an-application-in-linux#221159\n fp = open(trans_status_file, 'at')\n try:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as e: # another event_detect is running on this dir\n print( 'trans_file_already_locked, ' + str(e) )\n # print( 'trans_file_already_locked, ' + str(e) )\n return True\n #@-others\n\n #use config name not name() so that we can run on other machines\n namestr = '#' + ','.join( [config_dict['station']['hostname'],\n str(config_dict['station']['lon']),\n str(config_dict['station']['lat']),\n str(config_dict['station']['altitude'])] ) + '\\n'\n #@+node:martin.20160505172402.1: *3* detection\n #Reading all the images in the folder\n fileName = [ f for f in os.listdir(dirName) \n if (f.lower().endswith(\".jpg\")\n and not 'tile' in f.lower()\n and not 'daily' in f.lower()) ]\n fileName.sort()\n posDir = dirName\n #@+<>\n #@+node:martin.20160508124533.1: *4* <>\n #initialise structure to check for slow moving objects\n #98 = 4912/25/2, 147 = 7360/25/2\n #list of coords of 1 events [[x,y],[x,y], [x,y]....]\n grid_coord_curr = [] #np.zeros( (98,147) ) #Coordinates tiles in current image\n grid_coord_prev1 = [] #Coordinates tiles in previous image\n grid_coord_prev2 = []\n # prev1_recon_img = np.zeros((2,2)) #Initialise array for checked image\n first_image_sequence = 0\n #@-<>\n for j in range(0,len(fileName)-1):\n #@+<>\n #@+node:martin.20160505174337.1: *4* <>\n #Open images\n logger.debug( 'images, %s, %s' % (fileName[j],fileName[j+1]))\n varName1 = fileName[j].split('_')\n varName2 = fileName[j+1].split('_')\n time1 = datetime.datetime.strptime( varName1[1] + ' ' + varName1[2], '%Y-%m-%d %H%M%S')\n time2 = datetime.datetime.strptime( varName2[1] + ' ' + varName2[2], '%Y-%m-%d %H%M%S')\n #@-<>\n #Check if time between previous and current picture less near 30 seconds\n if (time2-time1).seconds < 40:\n #@+<>\n #@+node:martin.20160505174428.1: *4* <> load and filter\n logger.debug( \"processing, %s\" % (fileName[j+1]) )\n img1=cv2.imread( os.path.join( dirName, fileName[j]) )\n img2=cv2.imread( os.path.join( dirName, fileName[j+1]) )\n #Use green channel only\n #Convert image from uint8 to int16 (Enable substraction of matrices) \n img1 = img1[:,:,1].astype( np.int16)\n img2 = img2[:,:,1].astype( np.int16)\n #Apply filter\n f = metFilter( img1, img2,\n #dirName, \n fileName[j+1],\n radius=10, tilessettings = settings['tiles_for_pools'] ) \n #Split image into tiles\n X = np.asarray( f.splitToTiles( (settings['boxsize'],settings['boxsize']) ))\n logger.debug('len x, ' + str(len(X)) )\n logger.debug('len f, ' + str(f.nameSplit) )\n #X is array of tiles. Normalise array\n X = X / 255.0\n #@-<>\n #Save coordinates of tiles in 2 previous images\n grid_coord_prev2 = grid_coord_prev1\n grid_coord_prev1 = grid_coord_curr\n grid_coord_curr = [] #np.zeros( (98,147) )\n grid_coord_curr_idx = []\n if len(X) != 0: \n #Execute feed forward propagation on all potential tiles\n allOutputs = NN.forward(X)\n logger.debug( 'len all outputs, ' + str(len( allOutputs)) )\n for i in range(len(allOutputs)):\n logger.debug( 'output, ' + str(allOutputs[i]) )\n #### some sort of threshold to remove weaker positive tiles?\n coord = f.nameSplit[i].rsplit('_',2)[1:]\n logger.debug( 'output coords, ' + str( coord) )\n if allOutputs[i] + 0.5 > 1.00:\n logger.debug( 'add tiles to list, ' + str( coord) )\n grid_coord_curr.append( (int(coord[0]), int(coord[1])) )\n grid_coord_curr_idx.append( i)\n else : #negative tiles\n logger.debug('negative tile, ' + str(coord) )\n #done all tiles, remove duplicates (which shouldn't occur anyway)\n #grid_coord_curr = list(set(grid_coord_curr))\n #@+<>\n #@+node:martin.20160508222352.1: *4* <>\n #remove tile from list if it in 3 consecutive images\n grid_coord_curr_cln = []\n for crd in grid_coord_curr:\n if not crd in grid_coord_prev1 and not crd in grid_coord_prev2:\n grid_coord_curr_cln.append( crd)\n else:\n logger.debug('slow object removed, %i, %i' % (crd[0], crd[1]) )\n grid_coord_curr = grid_coord_curr_cln\n #@-<>\n if len(grid_coord_curr) > 0:\n #@+<>\n #@+node:martin.20160505174741.1: *4* <>\n #start event file\n evtfname = os.path.join( dirName, \n fileName[j+1][:-4] + '.raw_pixels.txt' )\n headstr = '#datetime, x, y, -dx, +dx, -dy, +dy, brightness, -b, +b\\n'\n with open( os.path.join(dirName, evtfname), 'at') as myFile:\n myFile.write( namestr)\n myFile.write( headstr)\n \n for i in range(len(grid_coord_curr)):\n crd_pair = grid_coord_curr[i]\n img_idx = grid_coord_curr_idx[i]\n y = crd_pair[0]\n x = crd_pair[1]\n logger.debug('curr_coord_event_seen,' + str( crd_pair) )\n #save event file\n xcoord = x*settings['boxsize']*2\n ycoord = y*settings['boxsize']*2\n with open( os.path.join(dirName, evtfname), 'a') as myFile:\n now = dfn.exposure_time( os.path.join(dirName, fileName[j+1]) )\n now_iso = datetime.datetime.utcfromtimestamp( now).isoformat()\n myFile.write( now_iso + ', ' + \n str(ycoord) + ', ' + str(xcoord) + ', ' +\n '1.0, 1.0, 1.0, 1.0, 100, 1.0, 1.0\\n' )\n #write event to log file\n logger.info(\"event, %s, [%d %d]\" % (fileName[j+1], ycoord, xcoord) )\n #save individual tile\n if save_settings['save_tiles'] == '1':\n fname_w_coords = '_'.join( f.nameSplit[i].split('_')[:-2] )\n fname_w_coords = '.'.join( [fname_w_coords, str(xcoord), \n str(ycoord), 'tile.jpg' ] )\n\n tmp_img = scipy.misc.toimage(X[img_idx].reshape((settings['boxsize'],\n settings['boxsize'])),\n cmin = 0,\n cmax = 1)\n #mirror diagnonally = rot 90 clock + horiz flip\n # tmp_img = tmp_img.transpose( Image.ROTATE_180)\n # tmp_img = tmp_img.transpose( Image.FLIP_LEFT_RIGHT)\n #print('rescaling, ', crd_pair, fname_w_coords)\n width, height = tmp_img.size\n tmp_img = tmp_img.resize((2*width,2*height), Image.BILINEAR )\n tmp_img.save( os.path.join( posDir,fname_w_coords) )\n logger.debug('saved_tile, ' + os.path.join( posDir,fname_w_coords) )\n else:\n logger.debug('not_save_tile, ' + os.path.join( posDir,fname_w_coords) )\n #@-<>\n else:\n logger.debug( 'all removed')\n else: #time diff too big\n logger.debug(\"ignored, %s, time difference with previous image, %d\\n\"\n % (fileName[j+1], (time2-time1).seconds) )\n #@+<>\n #@+node:martin.20160508124700.1: *4* <>\n #set new beginning for slow objects due to large time difference\n first_image_sequence = j\n grid_coord_prev1 = []\n grid_coord_prev2 = []\n #Set the values of checked array to 0\n #prev1_recon_img = np.zeros( (curr_recon_img.shape[0],\n # curr_recon_img.shape[1]) )\n #@-<>\n logger.info(\"processed, %s, version, %f\" % \n (fileName[j+1], settings['algorithm_version']) ) \n\n #@+node:martin.20160609121416.1: *3* final\n #flock remove not needed, done automatically as fp closed\n fp.close()\n #now = datetime.datetime.utcfromtimestamp( time.time()).isoformat()\n now = str( datetime.datetime.utcnow().isoformat())\n dfn.write_string_to_file( 'done, ' + now + '\\n', trans_status_file, 'wt')\n # make sure combo log file is now blank so that dir \n # is considered new for triangulation\n dfn.write_string_to_file( '',\n os.path.join( local_path, 'double_combo_log.txt'), \n 'wt')\n logger.info( 'event_detection_done, '+ str(settings['algorithm_version']) )\n #@-others\n \n#@-others\n\nif __name__ == '__main__':\n #logging.basicConfig()\n np.set_printoptions(linewidth=999999) #Increase limit of line width of the shell\n np.set_printoptions(threshold='nan') #Remove threshold to display unlimitted array sizes\n\n #Set parameters given through command line\n args = sys.argv[1:]\n # if len(args)>=1:\n # dirName=args[0]\n # else: #Open window to allow user to select a directory to browse\n # print( \"Detection mode enabled by default. Set detection variable to 0 for training\")\n # root = Tkinter.Tk()\n # dirName = tkFileDialog.askdirectory(parent=root,initialdir=\"/\",title='Please select the folder of images desired to be processed')\n # if len(dirName ) > 0:\n # print \"Browsing files in %s\" % dirName\n # root.destroy()\n # args=[dirname]\n\n config_dict = dfn.load_config( os.path.join( args[0], 'dfnstation.cfg') )\n \n #settings\n save_settings = {\n #Save positive tiles\n 'save_tiles': '1',\n #save blue/red thumbmail sketch of event\n 'save_thumb': '0' } \n main_function( args, save_settings, config_dict, 0)\n \n#@-leo\n","sub_path":"processing_NN_detection.py","file_name":"processing_NN_detection.py","file_ext":"py","file_size_in_byte":16491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583475058","text":"##### QUESTION - AMICABLE NUMBERS ############################################\n# Let d(n) be defined as the sum of proper divisors of n (numbers less than n\n# which divide evenly into n).\n# If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and\n# each of a and b are called amicable numbers.\n#\n# For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55\n# and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71\n# and 142; so d(284) = 220.\n#\n# Evaluate the sum of all the amicable numbers under 10000.\n#\n################################################################################\n\n##### NOTES ##################################################################\n#\n################################################################################\n\n##### SOLUTION ###############################################################\n# Answer to problem = 31,626\n#\n################################################################################\n\nimport math\n\n# - Helper function to return the sum of all factors of a number\ndef get_factors_sum(num):\n factors_sum = 1\n\n for i in range(2, math.ceil(num / 2 + 1)):\n if num % i == 0:\n factors_sum += i\n\n return factors_sum\n\n# - Main function to sum up all amicable numbers\ndef amicable_number_sum(max):\n numbers = {}\n amicable_number_total = 0\n\n for i in range(2, max):\n numbers[i] = get_factors_sum(i)\n\n for number, num_factors_total in numbers.items():\n if (num_factors_total in numbers and\n numbers[num_factors_total] == number and\n num_factors_total != number):\n\n amicable_number_total += number\n\n return amicable_number_total\n\n\n##### TEST CASE ##############################################################\nprint (\"--- TEST CASE SHOULD BE TRUE ---\")\nprint (get_factors_sum(220) == 284)\nprint (get_factors_sum(284) == 220)\n\n##### SOLUTION ###############################################################\nprint (\"--- SOLUTION FOR AMICABLE NUMBERS PROBLEM IS ---\")\nprint (amicable_number_sum(10000))\n","sub_path":"001-025/021_amicablenumbers.py","file_name":"021_amicablenumbers.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"652453498","text":"def mergeSort(l):\n if len(l) <= 1:\n return l\n mid = len(l) // 2\n left = mergeSort(l[:mid])\n right = mergeSort(l[mid:])\n return merge(left, right)\n\n\ndef merge(left, right):\n if not left:\n return right\n if not right:\n return left\n\n result = [0] * (len(left) + len(right))\n\n p1 = p2 = 0\n\n while p1 < len(left) and p2 < len(right):\n if left[p1] < right[p2]:\n result[p1 + p2] = left[p1]\n p1 += 1\n else:\n result[p1 + p2] = right[p2]\n p2 += 1\n\n while p1 < len(left):\n result[p1 + p2] = left[p1]\n p1 += 1\n\n while p2 < len(right):\n result[p1 + p2] = right[p2]\n p2 += 1\n return result\n\nl = [1, 2, 2, 3, 4, 2, 12, 3, 12, 1, 23, 123, 2, 321, 21, 23, 2,\n 31, 2, 3, 12, 31, 23, 1, 32443, 65, 7, 56, 22, 34, 2, 221]\n\n\nprint(mergeSort(l))\n","sub_path":"CCI/10-sorting/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"447864623","text":"from pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy.exc import DBAPIError\nfrom ..models import MyModel\nimport os\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.security import remember, forget\nfrom ..security import verify_user\n\n\nHERE = os.path.dirname(__file__)\n\nENTRIES_DATA = [\n {\n 'title': 'Day12',\n 'creation_date': 'August 23, 2016',\n 'body': 'Today, we learned about templating with Jinja, and about the binary tree data type. I spent most of the time revising old data structures, since it is not a good idea to coninue building upon something that is not perfect. I also got my journal site deployed with the templates working. Lastly, we formed project groups, and I will be working on my idea for a market analysis web application.'\n },\n]\n\n\n@view_config(route_name='login', renderer='../templates/login.jinja2')\ndef login(request):\n if request.method == 'GET':\n return {'bogus_attempt': False}\n if request.method == 'POST':\n username = str(request.params.get('user', ''))\n password = str(request.params.get('pass', ''))\n print('user/pass:', username, password)\n\n if verify_user(username, password):\n print('User verfied.')\n headers = remember(request, username)\n return HTTPFound(location=request.route_url('home'), headers=headers)\n return {'bogus_attempt': True}\n\n\n@view_config(route_name='logout', renderer='../templates/logout.jinja2')\ndef logout(request):\n headers = forget(request)\n return HTTPFound(request.route_url('home'), headers=headers)\n\n\n@view_config(route_name='detail', renderer='../templates/detail.jinja2')\ndef detail(request):\n \"\"\"Send individual entry for detail view.\"\"\"\n query = request.dbsession.query(MyModel)\n data = query.filter_by(id=request.matchdict['id']).first()\n return {\"entry\": data}\n\n\n@view_config(route_name='edit', renderer='../templates/edit.jinja2', permission='root')\ndef edit(request):\n \"\"\"Send individual entry to be edited.\"\"\"\n query = request.dbsession.query(MyModel)\n data = query.filter_by(id=request.matchdict['id']).one()\n data2 = {'id': data.id, 'body': data.body, 'creation_date': data.creation_date, 'title': data.title}\n updated = False\n # using data2 prevents data from being written to the db. Use data\n # to like data.body = req.... to change database (autocommit is on)\n if request.method == 'POST':\n updated = True\n data2['creation_date'] = request.POST['creation_date']\n data2['body'] = request.POST['body']\n data2['title'] = request.POST['title']\n\n #updating # comment out for testing\n data.body = data2['body']\n data.title = data2['title']\n data.creation_date = data2['creation_date']\n\n return {'entry': data2, 'updated': updated}\n\n\n@view_config(route_name='new', renderer='../templates/new.jinja2', permission='root')\ndef new(request):\n \"\"\"Return empty dict for new entry.\"\"\"\n goofed = {'goofed': 0}\n if request.method == 'GET':\n return {'entry': goofed}\n if request.method == 'POST':\n new_model = MyModel(title=request.POST['title'], body=request.POST['body'], creation_date=request.POST['creation_date'])\n if new_model.title == '' or new_model.body == '':\n goofed['goofed'] = 1\n return {'entry': goofed} # http exception here\n request.dbsession.add(new_model)\n return HTTPFound(request.route_url('home'))\n\n\n@view_config(route_name='home', renderer='../templates/index.jinja2')\ndef my_view(request):\n try:\n query = request.dbsession.query(MyModel)\n data_from_DB = query.order_by(MyModel.title).all()\n except DBAPIError:\n return Response(db_err_msg, content_type='text/plain', status=500)\n return {'entries': data_from_DB}\n\n\ndb_err_msg = \"\"\"\\\nPyramid is having a problem using your SQL database. The problem\nmight be caused by one of the following things:\n\n1. You may need to run the \"initialize_website_db\" script\n to initialize your database tables. Check your virtual\n environment's \"bin\" directory for this script and try to run it.\n\n2. Your database server may not be running. Check that the\n database server referred to by the \"sqlalchemy.url\" setting in\n your \"development.ini\" file is running.\n\nAfter you fix the problem, please restart the Pyramid application to\ntry it again.\n\"\"\"\n","sub_path":"website/views/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"399076178","text":"def bf(candidate, record, all_record, depth):\n if depth == 3:\n all_record.append(''.join(record + [record[0]]))\n else:\n for i in range(len(candidate)):\n if candidate[i]:\n record.append(chr(ord('a') + i))\n candidate[i] = False\n bf(candidate, record, all_record, depth+1)\n candidate[i] = True\n record.pop()\nall_record = []\nbf([True, True, True], [], all_record, 0)\nn = int(input())\nfirst = input().strip()\nsecond = input().strip()\nprint('YES')\n\"\"\"\nif first[0] == first[1] and second[0] == second[1]:\n print('abc'*n)\nelif first[0] != first[1] and second[0] != second[1]:\n union = set(first + second)\n if len(union) == 2:\n for i in 'abc':\n if i not in union:\n break\n print(first[0] * n + i * n + first[1] * n)\n else:\n\"\"\"\nhas_ans = False\nfor cand in all_record:\n if first not in cand and second not in cand:\n print(cand[:3]*n)\n has_ans = True\n break\n\nif not has_ans:\n if first[0] == second[0]:\n print(first[1]*n + second[1]*n + first[0]*n)\n elif first[1] == second[1]:\n print(first[1]*n + second[0]*n + first[0]*n)\n else:\n for c in 'abc':\n if c not in first:\n break\n print(first[0]*n + c*n + first[1]*n)\n","sub_path":"codeforce/582_div3/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"366375491","text":"import re, os\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nfrom Bio import Phylo\nfrom ete3 import Tree\nfrom subprocess import Popen, PIPE, STDOUT\nfrom pathlib import Path\nfrom reinforcement_model import INPUT_SIZE\n\n\nNUM_OF_NODES = np.sqrt(INPUT_SIZE)\nparent_path = Path().resolve().parent\nparent_folder = parent_path / \"reinforcement_data\"\n\nRAXML_NG_SCRIPT = \"raxml-ng\" # after you install raxml-ng on your machine\n# conda install -c bioconda raxml-ng\nMSA_PHYLIP_FILENAME = \"masked_species_real_msa.phy\"\n\n\ndef return_likelihood(tree, msa_file, rates, pinv, alpha, freq):\n\t\"\"\"\n\t:param tree: ETEtree OR a newick string\n\t:param msa_file:\n\t:param rates: as extracted from parse_raxmlNG_content() returned dict\n\t:param pinv: as extracted from parse_raxmlNG_content() returned dict\n\t:param alpha: as extracted from parse_raxmlNG_content() returned dict\n\t:param freq: as extracted from parse_raxmlNG_content() returned dict\n\t:return: float. the score is the minus log-likelihood value of the tree\n\t\"\"\"\n\tmodel_line_params = 'GTR{rates}+I{pinv}+G{alpha}+F{freq}'.format(rates=\"{{{0}}}\".format(\"/\".join(rates)),\n\t pinv=\"{{{0}}}\".format(pinv),\n\t alpha=\"{{{0}}}\".format(alpha),\n\t freq=\"{{{0}}}\".format(\"/\".join(freq)))\n\n\t# create tree file in memory and not in the storage:\n\ttree_rampath = \"/dev/shm/\" + msa_file.split(\"/\")[-1] + \"tree\" # the var is the str: tmp{dir_suffix}\n\ttry:\n\t\twith open(tree_rampath, \"w\") as fpw:\n\t\t\tfpw.write(tree)\n\n\t\tp = Popen(\n\t\t\t[RAXML_NG_SCRIPT, '--evaluate', '--msa', msa_file, '--threads', '1', '--opt-branches', 'on', '--opt-model',\n\t\t\t 'off', '--model', model_line_params, '--nofiles', '--tree', tree_rampath],\n\t\t\tstdout=PIPE, stdin=PIPE, stderr=STDOUT)\n\n\t\traxml_stdout = p.communicate()[0]\n\t\traxml_output = raxml_stdout.decode()\n\n\t\tres_dict = parse_raxmlNG_content(raxml_output)\n\t\tll = res_dict['ll']\n\n\t# check for 'rare' alpha value error, run again with different alpha\n\texcept AttributeError:\n\t\t# float 0.5-8\n\t\tnew_alpha = np.random.uniform(0.5, 8)\n\t\tmodel_line_params = 'GTR{rates}+I{pinv}+G{alpha}+F{freq}'.format(rates=\"{{{0}}}\".format(\"/\".join(rates)),\n\t\t pinv=\"{{{0}}}\".format(pinv),\n\t\t alpha=\"{{{0}}}\".format(new_alpha),\n\t\t freq=\"{{{0}}}\".format(\"/\".join(freq)))\n\t\tp = Popen(\n\t\t\t[RAXML_NG_SCRIPT, '--evaluate', '--msa', msa_file, '--threads', '1', '--opt-branches', 'on', '--opt-model',\n\t\t\t 'off', '--model', model_line_params, '--nofiles', '--tree', tree_rampath],\n\t\t\tstdout=PIPE, stdin=PIPE, stderr=STDOUT)\n\n\t\traxml_stdout = p.communicate()[0]\n\t\traxml_output = raxml_stdout.decode()\n\n\t\tres_dict = parse_raxmlNG_content(raxml_output)\n\t\tll = res_dict['ll']\n\texcept Exception as e:\n\t\tprint(msa_file)\n\t\tprint(e)\n\t\tprint(\"return_likelihood() in bio_methods.py\")\n\t\texit()\n\tfinally:\n\t\tos.remove(tree_rampath)\n\n\treturn float(ll) # changed to return a num not a str\n\n\ndef parse_raxmlNG_content(content):\n\t\"\"\"\n\t:return: dictionary with the attributes - string typed. if parameter was not estimated, empty string\n\t\"\"\"\n\tres_dict = dict.fromkeys([\"ll\", \"pInv\", \"gamma\",\n\t \"fA\", \"fC\", \"fG\", \"fT\",\n\t \"subAC\", \"subAG\", \"subAT\", \"subCG\", \"subCT\", \"subGT\",\n\t \"time\"], \"\")\n\n\t# likelihood\n\tll_re = re.search(\"Final LogLikelihood:\\s+(.*)\", content)\n\tif not ll_re and (\n\t\t\tre.search(\"BL opt converged to a worse likelihood score by\", content) or re.search(\"failed\", content)):\n\t\tres_dict[\"ll\"] = re.search(\"initial LogLikelihood:\\s+(.*)\", content).group(1).strip()\n\telse:\n\t\tres_dict[\"ll\"] = ll_re.group(1).strip()\n\n\t\t# gamma (alpha parameter) and proportion of invariant sites\n\t\tgamma_regex = re.search(\"alpha:\\s+(\\d+\\.?\\d*)\\s+\", content)\n\t\tpinv_regex = re.search(\"P-inv.*:\\s+(\\d+\\.?\\d*)\", content)\n\t\tif gamma_regex:\n\t\t\tres_dict['gamma'] = gamma_regex.group(1).strip()\n\t\tif pinv_regex:\n\t\t\tres_dict['pInv'] = pinv_regex.group(1).strip()\n\n\t\t# Nucleotides frequencies\n\t\tnucs_freq = re.search(\"Base frequencies.*?:\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\", content)\n\t\tfor i, nuc in enumerate(\"ACGT\"):\n\t\t\tres_dict[\"f\" + nuc] = nucs_freq.group(i + 1).strip()\n\n\t\t# substitution frequencies\n\t\tsubs_freq = re.search(\n\t\t\t\"Substitution rates.*:\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\\s+(\\d+\\.?\\d*)\",\n\t\t\tcontent)\n\t\tfor i, nuc_pair in enumerate([\"AC\", \"AG\", \"AT\", \"CG\", \"CT\", \"GT\"]): # todo: make sure order\n\t\t\tres_dict[\"sub\" + nuc_pair] = subs_freq.group(i + 1).strip()\n\n\t\t# Elapsed time of raxml-ng optimization\n\t\trtime = re.search(\"Elapsed time:\\s+(\\d+\\.?\\d*)\\s+seconds\", content)\n\t\tif rtime:\n\t\t\tres_dict[\"time\"] = rtime.group(1).strip()\n\t\telse:\n\t\t\tres_dict[\"time\"] = 'no ll opt_no time'\n\treturn res_dict\n\n\ndef parse_phyml_stats_output(stats_filepath):\n\t\"\"\"\n :return: dictionary with the attributes - string typed. if parameter was not estimated, empty string\n \"\"\"\n\tres_dict = dict.fromkeys([\"ntaxa\", \"nchars\", \"ll\",\n\t \"fA\", \"fC\", \"fG\", \"fT\",\n\t \"subAC\", \"subAG\", \"subAT\", \"subCG\", \"subCT\", \"subGT\",\n\t \"pInv\", \"gamma\",\n\t \"path\"], \"\")\n\n\tres_dict[\"path\"] = stats_filepath\n\ttry:\n\t\twith open(stats_filepath) as fpr:\n\t\t\tcontent = fpr.read()\n\n\t\t# likelihood\n\t\tres_dict[\"ll\"] = re.search(\"Log-likelihood:\\s+(.*)\", content).group(1).strip()\n\n\t\t# gamma (alpha parameter) and proportion of invariant sites\n\t\tgamma_regex = re.search(\"Gamma shape parameter:\\s+(.*)\", content)\n\t\tpinv_regex = re.search(\"Proportion of invariant:\\s+(.*)\", content)\n\t\tif gamma_regex:\n\t\t\tres_dict['gamma'] = gamma_regex.group(1).strip()\n\t\tif pinv_regex:\n\t\t\tres_dict['pInv'] = pinv_regex.group(1).strip()\n\n\t\t# Nucleotides frequencies\n\t\tfor nuc in \"ACGT\":\n\t\t\tnuc_freq = re.search(\" - f\\(\" + nuc + \"\\)\\= (.*)\", content).group(1).strip()\n\t\t\tres_dict[\"f\" + nuc] = nuc_freq\n\n\t\t# substitution frequencies\n\t\tfor nuc1 in \"ACGT\":\n\t\t\tfor nuc2 in \"ACGT\":\n\t\t\t\tif nuc1 < nuc2:\n\t\t\t\t\tnuc_freq = re.search(nuc1 + \" <-> \" + nuc2 + \"(.*)\", content).group(1).strip()\n\t\t\t\t\tres_dict[\"sub\" + nuc1 + nuc2] = nuc_freq\n\texcept:\n\t\tprint(\"Error with:\", res_dict[\"path\"], res_dict[\"ntaxa\"], res_dict[\"nchars\"])\n\t\treturn\n\treturn res_dict\n\n\ndef prune_branch(t_orig, prune_name):\n\t'''\n\treturns (a copy of) both ETE subtrees after pruning\n\t'''\n\tt_cp_p = t_orig.copy() # the original tree is needed for each iteration\n\tassert t_cp_p & prune_name # todo Oz: add indicative error\n\tprune_node_cp = t_cp_p & prune_name # locate the node in the copied subtree\n\tassert prune_node_cp.up\n\n\tnname = prune_node_cp.up.name\n\tprune_loc = prune_node_cp\n\tprune_loc.detach() # pruning: prune_node_cp is now the subtree we detached. t_cp_p is the one that was left behind\n\t###########\n\tif nname == t_cp_p.get_tree_root().name:\n\t\tfor n in t_cp_p.children:\n\t\t\ttmp_tree = n\n\t\t\tif n.name != prune_name:\n\t\t\t\toutgroup = n.children[0]\n\t\t\t\tn.set_outgroup(outgroup)\n\t\t\t\tbreak\n\n\t\tt_cp_p = tmp_tree\n\telse:\n\t\tt_cp_p.search_nodes(name=nname)[0].delete(\n\t\t\tpreserve_branch_length=True) # delete the specific node (without its childs) since after pruning this branch should not be divided\n\t###########\n\treturn nname, prune_node_cp, t_cp_p\n\n\ndef regraft_branch(t_cp_p, prune_node_cp, rgft_name, nname):\n\t'''\n\trecieves: 2 ETE subtrees and 2 node names\n\treturns: an ETEtree with the 2 concatenated ETE subtrees\n\t'''\n\n\tt_temp = Tree() # for concatenation of both subtrees ahead, to avoid polytomy\n\tt_temp.add_child(prune_node_cp)\n\tt_curr = t_cp_p.copy()\n\tassert t_curr & rgft_name # todo Oz: add indicative error\n\trgft_node_cp = t_curr & rgft_name # locate the node in the copied subtree\n\tnew_branch_length = rgft_node_cp.dist / 2\n\n\trgft_loc = rgft_node_cp.up\n\trgft_node_cp.detach()\n\tt_temp.add_child(rgft_node_cp, dist=new_branch_length)\n\tt_temp.name = nname\n\trgft_loc.add_child(t_temp, dist=new_branch_length) # regrafting\n\n\t# check for case when tree becomes rooted\n\t# num of nodes (hopefully) = len(t_curr.get_descendants()) + 1\n\t# if NUM_OF_NODES != len(t_curr.get_descendants()) + 1:\n\t# \tprint(\"UNROOTING: before unroot t_curr has \" + str(len(t_curr.get_descendants()) + 1) + \" nodes\")\n\t# \tt_curr.write(outfile=\"log_run/tree_before_unroot\", format=1)\n\t# \tt_curr.unroot()\n\t# \t# next line checks for new created node we wnt to remove\n\t# \tnodes_to_preserve_lst = [\"Sp\" + (str(n)).zfill(3) for n in range(20)]\n\t# \tnodes_to_preserve_lst.extend([\"N\" + str(i) for i in range(1, 19)])\n\t# \tt_curr.prune(nodes_to_preserve_lst, preserve_branch_length=True)\n\t# \tprint(\"I DID UNROOT!\")\n\t# TODO: remove\n\tt_curr.write(outfile=\"log_run/tree_after_regraft\", format=1, format_root_node=True)\n\n\treturn t_curr\n\n\ndef SPR_by_edge_names(ETEtree, cut_name, paste_name):\n\tnname, subtree1, subtree2 = prune_branch(ETEtree,\n\t cut_name) # subtree1 is the pruned subtree. subtree2 is the remaining subtree\n\trearr_tree_str = regraft_branch(subtree2, subtree1, paste_name, nname).write(\n\t\tformat=1, format_root_node=True) # .write() is how you convert an ETEtree to newick string. now you can convert it back (if needed) using Tree(), or convert it to BIOtree\n\n\treturn rearr_tree_str\n\n\ndef add_internal_names(tree_file, t_orig, newfile_suffix=\"_with_internal.txt\"):\n\t# todo oz: I know you defined 'newfile_suffix' diferently (just None to runover?)\n\t# for tree with ntaxa=20 there are 2n-3 nodes --> n-3=17 internal nodes. plus one ROOT_LIKE node ==> always 18 internal nodes.\n\t#N_lst = [\"N{}\".format(i) for i in range(1,19)]\n\tN_lst = [\"N{}\".format(i) for i in range(1,20)]\n\ti = 0\n\tfor node in t_orig.traverse():\n\t\tif not node.is_leaf():\n\t\t\tnode.name = N_lst[i]\n\t\t\ti += 1\n\t# assuming tree file is a pathlib path\n\tnew_tree_file = tree_file.parent / (tree_file.name + newfile_suffix)\n\tt_orig.write(format=1, outfile=new_tree_file, format_root_node=True)\n\n\treturn t_orig, new_tree_file\n\n\n# convert tree to weighted_adjacency_matrix\ndef tree_to_matrix(bio_tree):\n\tgraph = Phylo.to_networkx(bio_tree)\n\n\tif graph.number_of_nodes() != NUM_OF_NODES:\n\t\tprint(\"tree_to_matrix() in bio_methods.py\")\n\t\tprint(\"graph has \" + str(graph.number_of_nodes()) + \" nodes\")\n\t\texit()\n\n\t# matrix = networkx.adjacency_matrix(net)\n\tmatrix = nx.to_numpy_matrix(graph)\n\t# makes a numpy array from 2-dim matrix\n\treturn np.asarray(matrix).reshape(-1)\n\n\n# returns the tree from the text file in the msa_num's folder\ndef get_tree_from_msa(msa_path):\n\ttree_path = parent_folder / (msa_path + \"masked_species_real_msa.phy_phyml_tree_bionj.txt\")\n\n\twith open(tree_path, \"r\") as f:\n\t\ttree_str = f.read()\n\tete_tree = Tree(newick=tree_str, format=1)\n\tete_tree.resolve_polytomy(recursive=False)\n\n\t# add_internal_names does not run over the file it is given\n\tete_tree, tree_copy = add_internal_names(tree_path, ete_tree)\n\t# TODO: remove\n\tete_tree.write(outfile=\"log_run/tree_right_after_add_internal_names()\", format=1, format_root_node=True)\n\t##########\n\twith open(tree_copy, \"r\") as f:\n\t\ttree_str = f.read()\n\n\tbio_tree = Phylo.read(tree_copy, \"newick\")\n\tos.remove(tree_copy)\n\treturn ete_tree, bio_tree, tree_str\n\n\n# converts string to other formats\ndef get_ete_and_bio_from_str(tree_str, msa_path):\n\ttree_copy = parent_folder / (msa_path + \"for_now.txt\")\n\n\tete_tree = Tree(newick=tree_str, format=1)\n\twith open(tree_copy, \"w\") as f:\n\t\tf.write(tree_str)\n\n\tbio_tree = Phylo.read(tree_copy, \"newick\")\n\tos.remove(tree_copy)\n\treturn ete_tree, bio_tree\n\n\n# calculating likelihood of tree, msa_num should be the folder number of its corresponding msa\ndef get_likelihood_simple(tree_str, msa_path, params=None):\n\tif params is None:\n\t\t# taking the params required for likelihood calculation from the stats file in the msa_num's folder\n\t\tfreq, rates, pinv, alpha = calc_likelihood_params(msa_path)\n\telse:\n\t\tfreq, rates, pinv, alpha = params\n\n\tmsa_path = parent_folder / (msa_path + \"masked_species_real_msa.phy\")\n\treturn return_likelihood(tree_str, str(msa_path), rates, pinv, alpha, freq)\n\n\ndef calc_likelihood_params(msa_path):\n\tstats_path = parent_folder / (msa_path + \"masked_species_real_msa.phy_phyml_stats_bionj.txt\")\n\tparams_dict = parse_phyml_stats_output(stats_path)\n\tfreq, rates, pinv, alpha = [params_dict[\"fA\"], params_dict[\"fC\"], params_dict[\"fG\"], params_dict[\"fT\"]], [\n\t\tparams_dict[\"subAC\"], params_dict[\"subAG\"], params_dict[\"subAT\"], params_dict[\"subCG\"], params_dict[\"subCT\"],\n\t\tparams_dict[\"subGT\"]], params_dict[\"pInv\"], params_dict[\"gamma\"]\n\n\treturn freq, rates, pinv, alpha\n\n\nif __name__ == '__main__':\n\twith open(\"log_run/tree_after_regraft\", \"r\") as f:\n\t\ttree_str = f.read()\n\tmsa_path = \"/groups/itay_mayrose/danaazouri/PhyAI/ML_workshop/reinforcement_data/data/training_datasets/19078/\"\n\tlikelihood_params = calc_likelihood_params(msa_path)\n\n\tprint(get_likelihood_simple(tree_str, msa_path, likelihood_params))\n\n\t# directoryname = \"log_run/to_print\"\n\t# directory = os.fsencode(directoryname)\n\t# for file in os.listdir(directory):\n\t# \tfilename = os.fsdecode(file)\n\t#\n\t# \twith open(os.path.join(directoryname, filename), \"r\") as f:\n\t# \t\tcurrent_tree_str = f.read()\n\t# \t\tete_tree = Tree(newick=current_tree_str, format=1)\n\t# \t\tprint(\"\\n\"+filename+\":\")\n\t# \t\tprint(\"root_name = \" + ete_tree.get_tree_root().name + \":\\n\\n\")\n\t# \t\tprint(ete_tree.get_ascii(show_internal=True))\n\t#\n\n\n\n","sub_path":"Reinforcement_model_pytorch/bio_methods.py","file_name":"bio_methods.py","file_ext":"py","file_size_in_byte":13509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583902628","text":"\"\"\"\nINSERT DOCSTRING\n\"\"\"\n\nimport urllib\nimport re\n\n\ndef url_parse(entry):\n \"\"\" outputs urls contained in words of textEdit text \"\"\"\n\n entry_words = entry.split()\n\n # entry as list from textEdit\n top_level_domains = ['.com', '.org', '.net', '.int', '.edu',\n '.co', '.gov']\n\n urls = [word for domain in top_level_domains\n for word in entry_words\n if domain in word]\n\n urls = list(set(urls))\n\n html_urls = []\n\n for url in urls:\n\n if \"http\" not in url:\n html_url = '''{0}'''.format(url)\n\n else:\n domain = url.strip('http://')\n html_url = '''{1}'''.format(url, domain)\n\n html_urls.append(html_url)\n\n return [urls, html_urls] if urls or html_urls else None\n\n\n# test locations detection in entries with no time indications\ndef loc_re_parse(entry):\n\n result = ''\n googlemapsurl = 'http://maps.google.com/maps?q='\n at_to = re.search(r'\\s(at|to)\\s', entry)\n if at_to:\n # logging.debug('at_to found')\n words = entry.split()\n if 'at' in entry:\n at_to_pos = words.index('at')\n elif 'to' in entry:\n at_to_pos = words.index('to')\n location = words[at_to_pos + 1:]\n entry_body = words[:at_to_pos + 1]\n location = ' '.join(location)\n # logging.debug('location: {}'.format(location))\n googlemapsurl += urllib.quote(location)\n quickday_loc_url = ''' {1}'''.format(\n googlemapsurl, location)\n result += ' '.join(entry_body)\n result += quickday_loc_url\n else:\n result = entry\n\n return result\n\n\ndef test_url_parse_short():\n assert url_parse('amazon.com') == [\n ['amazon.com'],\n [\"amazon.com\"]\n ]\n\n\ndef test_url_parse_long():\n assert url_parse('http://www.amazon.com') == [\n ['http://www.amazon.com'],\n [\"www.amazon.com\"]\n ]\n","sub_path":"tests/test_widgets.py","file_name":"test_widgets.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"481508559","text":"from __future__ import division\n\nimport time\nimport os\nfrom collections import namedtuple\n\nimport numpy as np\nfrom scipy import io\n\nfrom pacu.core.io.view.zero_dimension_array import ZeroDimensionArrayView\nfrom pacu.util.path import Path\nfrom pacu.profile import manager\n\nopt = manager.instance('opt')\n\nDimension = namedtuple('Dimension', 'height, width')\n\nclass ScanboxMatView(ZeroDimensionArrayView):\n def __init__(self, path):\n self.path = Path(path).ensure_suffix('.mat')\n self.bound_sbx_path = self.path.with_suffix('.sbx')\n array = io.loadmat(self.path.str, squeeze_me=True).get('info')\n super(ScanboxMatView, self).__init__(array)\n @property\n def is_aligned(self):\n return self.path.name.startswith('Aligned')\n @property\n def sbxsize(self):\n #return self.path.with_suffix('.sbx').size\n return Path(self.stempath).with_suffix('.sbx').size\n @property\n def sbxtime(self):\n #return self.path.with_suffix('.sbx').created_at\n return Path(self.stempath).with_suffix('.sbx').created_at\n @property\n def sbxpath(self):\n #return self.path.with_suffix('.sbx').relative_to(opt.scanbox_root)\n #return Path(os.path.split(self.stempath)[1]).with_suffix('.sbx')\n return Path(self.stempath).with_suffix('.sbx').relative_to(opt.scanbox_root)\n @property\n def iopath(self):\n return self.sbxpath.with_suffix('.io')\n @property\n def shape(self):\n return tuple(reversed((self.nframes, self.channels) + self.dimension))\n @property\n def dimension(self):\n return Dimension(*self.sz)\n @property\n def nframes(self):\n nframes = int(self.sbxsize/self.recordsPerBuffer/\n self.dimension.width/2/self.channels)\n return nframes * (1 if self.scanmode else 2)\n @property\n def framerate(self):\n # recordsPerBuffer = self.originalRecordsPerBuffer \\\n # if self.is_aligned else self.recordsPerBuffer\n if self.is_aligned:\n try:\n recordsPerBuffer = self.originalRecordsPerBuffer\n except:\n recordsPerBuffer = self.recordsPerBuffer\n else:\n recordsPerBuffer = self.recordsPerBuffer\n rate = self.resfreq / recordsPerBuffer\n return rate if self.scanmode else rate * 2\n @property\n def nchannels(self):\n if self.channels == -1:\n if self.chan.nchan == 1:\n return 1\n else:\n return 2\n elif self.channels == 1:\n return 2\n else:\n return 1\n #return 2 if self.channels == 1 else 1 (JZ) For compatibility with mesoscope\n @property\n def factor(self):\n if self.channels == -1:\n if self.chan.nchan == 1:\n return 2\n else:\n return 1\n elif self.channels == 1:\n return 1\n else:\n return 2\n #return 1 if self.channels == 1 else 2 (JZ) For compatibility with mesoscope\n @property\n def scanmodestr(self):\n return 'uni' if self.scanmode == 1 else 'bi'\n def get_max_idx(self, size):\n return int(size/self.recordsPerBuffer/self.sz[1]*self.factor/4 - 1)\n def get_shape(self, size):\n return self.get_max_idx(size) + 1, self.sz[0], self.sz[1]\n @property\n def recordsPerBuffer(self):\n rpb = self._dict.get('recordsPerBuffer')\n return rpb * 2 if self.scanmode is 0 else rpb\n# def __dir__(self): # quick and dirty: need to use descriptor set\n# return super(ScanboxInfoView, self).__dir__() + \\\n# 'path nchan factor framerate recordsPerBuffer sz'.split()\n @property\n def activeChannels(self):\n channels = self.channels\n if channels == -1:\n activeChannels = self.chan.sample\n if activeChannels[0] and activeChannels[1]:\n return ['Green', 'Red', 'Both']\n elif activeChannels[0] and not activeChannels[1]:\n return ['Green']\n elif activeChannels[1] and not activeChannels[0]:\n return ['Red']\n elif channels == 1:\n return ['Green', 'Red', 'Both']\n elif channels == 2:\n return ['Green']\n elif channels == 3:\n return ['Red']\n\n def toDict(self):\n data = self.items()\n data['iopath'] = str(self.iopath)\n data['framerate'] = self.framerate\n data['frameratestr'] = str(self.framerate) + ' fps'\n data['sbxsize'] = self.sbxsize.str\n data['sbxtime'] = time.mktime(self.sbxtime.timetuple())\n data['sbxpath'] = self.sbxpath.str\n data['nchannels'] = self.nchannels\n data['nframes'] = self.nframes\n data['nframesstr'] = str(self.nframes) + ' frames'\n data['scanmodestr'] = self.scanmodestr\n data['focal_pane_args'] = self.focal_pane_args\n return data\n @property\n def duration(self):\n return self.nframes / self.framerate\n # return '{s.nframes} frames at {s.framerate} fps is 00:01:14:01'.format(s=self)\n # duration = frame_count / frame_rate\n# @property\n# def originalRecordsPerBuffer(self):\n# obuf = self._dict.get('originalRecordsPerBuffer')\n# buf = self.recordsPerBuffer\n# print 'original: {}, normal: buf'.format(obuf, buf)\n# return obuf or buf\n @property\n def focal_pane_args(self):\n try:\n if self.volscan:\n _, _, n = map(int, self.otparam)\n else:\n n = 1\n except:\n n = 1\n try:\n if self.volscan:\n waves = list(map(int, self.otwave))\n else:\n waves = [0]\n except:\n waves = [0]\n return dict(waves=waves, n=n)\n @property\n def memmap(self): # first channel\n shape = self.get_shape(self.bound_sbx_path.size)\n chans = np.memmap(self.bound_sbx_path.str, dtype='uint16', mode='r', shape=shape)\n return chans[0::mat.nchannels]\n @property\n def opened(self):\n return self.bound_sbx_path.open('rb')\n\n# import psutil\n# import functools\n# p = psutil.Process()\n# mat = ScanboxMatView('/Volumes/Users/ht/dev/current/pacu/tmp/sbxroot/Dario/P22_000_004.mat')\n# h, w = map(int, mat.sz)\n# mm = mat.memmap\n# frame_size = h * w * 2\n# raw = mat.opened\n# for index, chunk in enumerate(iter(functools.partial(raw.read, frame_size), '')):\n# print index, p.memory_percent()\n# print chunk == mm[index].tostring()\n# break\n","sub_path":"pacu/pacu/core/io/scanbox/view/mat.py","file_name":"mat.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"427588626","text":"import requests as req\nimport json\nperm_token = \"EAAd0ep1GT0wBAEbm1UTKaZAZBaxZAc6Iyg9gnlJWYxuHY0Wd2BklpJSvS6XpfsKIvsD6RdYrqZC0hUMyIKwsqZAAPZB40SxiQt7GmMSAko1Ln0GzPXG68eBQNcmmtifahM8v4ZAtemmnryaOMn20QoAke7ORZBPShv8bAwZBm9woNP1EkNvi0axSK\"\ntimeStamp_list = [] \nmessage_list = []\ndef jsonToLists(some_json) :\n count = len(some_json)\n for i in range(count) : \n print(some_json[i])\n print(\"----------\")\n try :\n tmp = some_json[i][\"live_broadcast_timestamp\"]\n except : \n tmp = -111\n timeStamp_list.append(tmp)\n message_list.append(some_json[i][\"message\"])\n return \n\ndef getVideoId(video_url) : \n tmp = video_url.split(\"videos/\")[1]\n video_id = tmp.split(\"/\")[0]\n return video_id\n\ndef makeRequest(video_id) :\n try :\n host_url = \"https://graph.facebook.com/v2.12/\" + video_id + \"?fields=created_time,live_status,updated_time,backdated_time,backdated_time_granularity,comments.limit(1000){live_broadcast_timestamp,message},length&access_token=\" + perm_token \n except : \n host_url = \"https://graph.facebook.com/v2.12/\" + video_id + \"?fields=created_time,live_status,updated_time,backdated_time,backdated_time_granularity,comments.limit(100){live_broadcast_timestamp,message},length&access_token=\" + perm_token \n else :\n host_url = \"https://graph.facebook.com/v2.12/\" + video_id + \"?fields=created_time,live_status,updated_time,backdated_time,backdated_time_granularity,comments{live_broadcast_timestamp,message},length&access_token=\" + perm_token \n\n response = req.get(host_url)\n response_json = json.loads(response.text)\n response_code = response.status_code\n return response_json,response_code\n\ndef init(url) :\n video_id = getVideoId(url)\n response_json , response_code = makeRequest(video_id)\n print(response_json)\n print(response_json[\"comments\"][\"data\"])\n jsonToLists(response_json[\"comments\"][\"data\"])\n try :\n live_status = response_json[\"live_status\"]\n except:\n live_status = None \n video_duration = response_json[\"length\"]\n output = {\n \"live_status\" : live_status ,\n \"video_duration\" : video_duration , \n \"timeStamp_list\" : timeStamp_list , \n \"message_list\" : message_list , \n \"video_id\" : video_id,\n }\n return output\n\n# if __name__ == \"_main_\" :\n# video_url = raw_input()\n# init(video_url)\n# url = \"https://www.facebook.com/election.commission.iitk/videos/810829939120636\"\n# video_id = getVideoId(url)\n# response_json , response_code = makeRequest(video_id)\n# print(video_id , response_json , response_code)\n","sub_path":"Project2/myapp/lib/fb_comments.py","file_name":"fb_comments.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"413193273","text":"#! /usr/bin/python\n# -*- coding: iso-8859-15 -*-\nfrom pylab import *\nimport matplotlib.pyplot as plt\nfrom matplotlib import *\nfrom mpl_toolkits.mplot3d import Axes3D # Cargo Axes3D de mpl_toolkits.mplot3d\nfrom scipy.misc import imread # Cargo imread de scipy.misc\nimport numpy as np # Cargo numpy como el aliaas np\n\n# Leo una imagen y la almaceno en imagen_superficial\nimagen_superficial = imread('python.png')\n\n# Creo una figura\nplt.figure()\n\n# Muestro la imagen en pantalla\nplt.imshow(imagen_superficial)\n\n# Añado etiquetas\nplt.title('Imagen que usaremos de superficie')\nplt.xlabel(u'# de píxeles')\nplt.ylabel(u'# de píxeles')\n\n# Creo otra figura y la almaceno en figura_3d\nfigura_3d = plt.figure()\n\n# Indicamos que vamos a representar en 3D\nax = figura_3d.gca(projection = '3d')\n\n# Creamos los arrays dimensionales de la misma dimensión que imagen_superficial\nX = np.linspace(-5, 5, imagen_superficial.shape[0])\nY = np.linspace(-5, 5, imagen_superficial.shape[1])\n\n# Obtenemos las coordenadas a partir de los arrays creados\nX, Y = np.meshgrid(X, Y)\n\n# Defino la función que deseo representar\nR = np.sqrt(X ** 2 + Y ** 2)\nZ = np.sin(R)\n\n# Reescalamos de RGB a [0-1]\nimagen_superficial = imagen_superficial.swapaxes(0, 1) / 255.\n\n# meshgrid orienta los ejes al revés luego hay que voltear\nax.plot_surface(X, Y, Z, facecolors = np.flipud(imagen_superficial))\n\n# Fijamos la posición inicial de la grafica\nax.view_init(45, -35)\n\n# Añadimos etiquetas\nplt.title(u'Imagen sobre una grafica 3D')\nplt.xlabel('Eje x')\nplt.ylabel('Eje y')\n# Mostramos en pantalla\nplt.show()","sub_path":"Graficos5.py","file_name":"Graficos5.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"165488021","text":"#!/usr/bin/python3\nfrom pwn import *\n\nrand = 0x6b8b4567\n\ns = ssh('random', 'pwnable.kr', 2222, 'guest')\np = s.process('./random')\n\nkey = rand ^ 0xdeadbeef\n\np.sendline(str(key))\nprint(p.recvall().decode('ascii'))","sub_path":"Toddler's Bottle/random/_random.py","file_name":"_random.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"249551386","text":"from sys import stdin\nlista=[\"-\",\"+\",\"/\",\"*\"]\ndef f(exp):\n global lista\n x=[]\n n=\"\"\n v=0\n for i in range(len(exp)):\n u=exp[i]\n if i==0 and u==\"-\":\n n+=\"-\"\n v+=1\n #print(\"primer if i==0 and u== -:\",n,v)\n if i==0 and u==\"+\":\n #print(\"segundo if i==0 and u== +:\",n,v)\n continue\n elif u==\"-\" and exp[i-1] in lista and i!=0:\n n+=\"-\"\n v+=1\n #print(\"tercer elif u== - and exp[i-1] in lista:\",n,v)\n elif u==\"+\" and exp[i-1] in lista and i!=0:\n #print(\"cuarto elif u== + and exp[i-1] in lista:\",n,v)\n continue\n elif u in lista and exp[i-1] not in lista and i!=0:\n g=float(n[v:])*((-1)**v)\n x.append(g)\n x.append(exp[i])\n #print(\"quinto elif u in lista and exp[i-1] not in lista:\",n,v)\n n=\"\"\n v=0\n elif u != \"+\" and u!=\"-\" and u!=\"*\" and u!=\"/\":\n n+=exp[i]\n #print(\"else\",n,v)\n g=float(n[v:])*((-1)**v)\n x.append(g)\n return x\ndef evalue(j):\n h=[]\n for i in range(len(j)):\n h.append(j[i])\n if len(h)>2 and h[-2]==\"*\":\n temp=h.pop()\n h.pop()\n h.append(h.pop()*temp)\n if len(h)>2 and h[-2]==\"/\":\n temp=h.pop()\n h.pop()\n h.append(h.pop()/temp)\n y=[]\n for i in range(len(h)):\n y.append(h[i])\n if len(y)>2 and y[-2]==\"+\":\n temp=y.pop()\n y.pop()\n y.append(y.pop()+temp)\n if len(y)>2 and y[-2]==\"-\":\n temp=y.pop()\n y.pop()\n y.append(y.pop()-temp)\n print(\"%.3f\"%y[0])\ndef main():\n exp=stdin.readline().strip()\n while exp:\n g=f(exp)\n evalue(g)\n exp=stdin.readline().strip()\nmain()\n","sub_path":"ejercicios/Data structures simples/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"345977071","text":"# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import create_engine\nimport tushare as ts\nimport pandas as pd\nimport datetime\nfrom datetime import timedelta\nfrom progressbar import ProgressBar,SimpleProgress,Bar,ETA,ReverseBar\nimport settings\nimport sqlite3\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine.url import URL\n\n\n# connect to the database\n#conn = create_engine(URL(**settings.DATABASE))\nconn = sqlite3.connect('cn_stocks.db')\n\nts.set_token('3c9fcd3daa9244ca0c45a7e47d5ba14004c9aff7208506910b991f30')\npro = ts.pro_api()\n#conn = sqlite3.connect('cn_stocks.db')\n#engine = create_engine('mysql+pymysql://stock:494904@120.79.35.86:3306/stocks?charset=utf8')\n\ntoday = (datetime.datetime.today()).strftime(\"%Y-%m-%d\")\ntoday_all_real = ts.get_today_all()\n\ntry:\n\ttoday_all = pd.read_sql('SELECT * from today_all',conn).drop(['date'],axis=1)\nexcept:\n\ttoday_all = today_all_real\n\ttoday_all_real.reset_index().to_sql('today_all',conn,if_exists='replace',index=False)\n\nif not today_all_real.equals(today_all):\n\tprint('NOT EQUALS')\n\ttoday_all = today_all_real\n\ttoday_all['date'] = today\n\ttoday_all.reset_index().to_sql('today_all',conn,if_exists='replace',index=False)\n\ntoday_all = today_all.set_index('code')\n\n\nall_stocks = ts.get_stock_basics()\nall_stocks.reset_index().to_sql('all_stocks',conn,if_exists='replace',index=False)\nall_stocks_list = all_stocks.index.tolist()\nall_stocks_dict = {code:all_stocks.loc[code]['name'] for code in all_stocks.index.tolist()}\n\nwidgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]\npbar = ProgressBar(widgets=widgets,maxval=len(all_stocks_dict.keys())).start()\n\nfor i,code in enumerate(all_stocks_dict.keys()):\n\ttry:\n\t\tcurrent_stock = pd.read_sql('SELECT * from \\'{}\\''.format(code),conn)\n\t\tstock = today_all.loc[code][['open','high','trade','low','volume','changepercent']]\n\n\t\tdf_stock = pd.DataFrame([stock])\n\t\tdf_stock['date'] = today\n\t\tdf_stock['volume'] = df_stock['volume']/100.0\n\t\tdf_stock.columns = ['open','high','close','low','volume','p_change','date']\n\n\t\tdf_combine = pd.concat([current_stock.sort_values('date'),df_stock])\n\t\tdf_combine['ma5'] = df_combine['ma5'].fillna(df_combine['close'].rolling(5).mean())\n\t\tdf_combine['max5'] = df_combine['close'].rolling(5).max()\n\t\tdf_combine['min5'] = df_combine['close'].rolling(5).min()\n\t\tdf_combine['ma10'] = df_combine['ma10'].fillna(df_combine['close'].rolling(10).mean())\n\t\tdf_combine['max10'] = df_combine['close'].rolling(10).max()\n\t\tdf_combine['min10'] = df_combine['close'].rolling(10).min()\n\t\tdf_combine['ma20'] = df_combine['ma20'].fillna(df_combine['close'].rolling(20).mean())\n\t\tdf_combine['max20'] = df_combine['close'].rolling(20).max()\n\t\tdf_combine['min20'] = df_combine['close'].rolling(20).min()\n\t\tdf_combine['ma30'] = df_combine['close'].rolling(30).mean()\n\t\tdf_combine['max30'] = df_combine['close'].rolling(30).max()\n\t\tdf_combine['min30'] = df_combine['close'].rolling(30).min()\n\t\tdf_combine['ma60'] = df_combine['close'].rolling(60).mean()\n\t\tdf_combine['max60'] = df_combine['close'].rolling(60).max()\n\t\tdf_combine['min60'] = df_combine['close'].rolling(60).min()\n\t\tdf_combine['ma120'] = df_combine['close'].rolling(120).mean()\n\t\tdf_combine['max120'] = df_combine['close'].rolling(120).max()\n\t\tdf_combine['min120'] = df_combine['close'].rolling(120).min()\n\n\t\tdf_combine['v_ma5'] = df_combine['v_ma5'].fillna(df_combine['volume'].rolling(5).mean())\n\t\tdf_combine['v_ma10'] = df_combine['v_ma10'].fillna(df_combine['volume'].rolling(10).mean())\n\t\tdf_combine['v_ma20'] = df_combine['v_ma20'].fillna(df_combine['volume'].rolling(20).mean())\n\t\tdf_combine['v_ma30'] = df_combine['volume'].rolling(30).mean()\n\t\tdf_combine['v_ma60'] = df_combine['volume'].rolling(60).mean()\n\t\tdf_combine['v_ma120'] = df_combine['volume'].rolling(120).mean()\n\n\t\tdf_combine.to_sql(code,conn,if_exists='replace',index=False)\n\texcept:\n\t\tcontinue\n\tpbar.update(i + 1)\n\nstocks_5 \t= {}#pd.DataFrame()\nstocks_10 \t= {}#pd.DataFrame()\nstocks_20 \t= {}#pd.DataFrame()\nstocks_30 \t= {}#pd.DataFrame()\nstocks_60 \t= {}#pd.DataFrame()\nstocks_90 \t= {}#pd.DataFrame()\nstocks_125 \t= {}#pd.DataFrame()\n\n\none_year_before = (datetime.datetime.today()-timedelta(days=365)).strftime(\"%Y%m%d\")\ncal = pro.trade_cal(start_date=one_year_before, end_date=datetime.datetime.today().strftime(\"%Y%m%d\")).sort_values('cal_date',ascending=False)\ncal['cal_date'] = pd.to_datetime(cal['cal_date'])\ntrade_date = cal[cal.is_open==1]['cal_date']\n\nfor i,code in enumerate(all_stocks_dict.keys()):\n\ttry:\n\t\tcurrent_stock = pd.read_sql('SELECT * from \\'{}\\''.format(code),conn)\n\t\tcurrent_stock['date'] = pd.to_datetime(current_stock['date'])\n\texcept:\n\t\tcontinue\n\n\tcurrent_stock['code'] = code\n\tcurrent_stock = current_stock.drop_duplicates()\n\n\tmask_60 = (current_stock['date']>trade_date.iloc[60]) & (current_stock['date']<=trade_date.iloc[0])\n\t#stocks_60 = pd.concat([stocks_60,current_stock.loc[mask_60]],sort=True)\n\tstocks_60[code]= current_stock.loc[mask_60]\n\n\t#mask_90 = (current_stock['date']>trade_date.iloc[90]) & (current_stock['date']<=trade_date.iloc[0])\n\t#stocks_90 = pd.concat([stocks_90,current_stock.loc[mask_90]],sort=True)\n\n\tmask_125 = (current_stock['date']>trade_date.iloc[125]) & (current_stock['date']<=trade_date.iloc[0])\n\t#stocks_120 = pd.concat([stocks_120,current_stock.loc[mask_120]],sort=True)\n\tstocks_125[code]= current_stock.loc[mask_125]\n\n\tpbar.update(i + 1)\n\nst60_df = pd.concat(stocks_60)\nst60_df.to_sql('stocks_60_days',conn,if_exists='replace',index=False)\n\nst125_df = pd.concat(stocks_125)\nst125_df.to_sql('stocks_125_days',conn,if_exists='replace',index=False)\n\npbar.finish()\n\n","sub_path":"stocks_update_shortcut.py","file_name":"stocks_update_shortcut.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"229115301","text":"import keras\nfrom keras.layers import Activation\nfrom keras.layers import Conv2D, BatchNormalization, Dense, Flatten, Reshape\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef train_model(x_train, y_train, x_test, y_test, batch_size=64, ep=2):\n\n model = keras.models.Sequential()\n\n model.add(Conv2D(64, kernel_size=(3,3), activation='relu', padding='same', input_shape=(9,9,1)))\n model.add(BatchNormalization())\n model.add(Conv2D(64, kernel_size=(3,3), activation='relu', padding='same'))\n model.add(BatchNormalization())\n model.add(Conv2D(128, kernel_size=(1,1), activation='relu', padding='same'))\n\n model.add(Flatten())\n model.add(Dense(81*9))\n model.add(Reshape((-1, 9)))\n model.add(Activation('softmax'))\n \n adam = keras.optimizers.adam(lr=.001)\n model.compile(loss='sparse_categorical_crossentropy', optimizer=adam)\n\n print(model.fit(x_train, y_train, batch_size=batch_size, epochs=ep))\n\n model.save('sudoku.model')\n \n \n# score = model.evaluate(x_test, y_test, verbose=0)\n# print('Test loss:', score[0])\n# print('Test accuracy:', score[1])\n \n \n\ndef get_data(file): \n\n data = pd.read_csv(file)\n\n feat_raw = data['quizzes']\n label_raw = data['solutions']\n\n feat = []\n label = []\n\n for i in feat_raw:\n \n x = np.array([int(j) for j in i]).reshape((9,9,1))\n feat.append(x)\n \n feat = np.array(feat)\n feat = feat/9\n feat -= .5 \n \n for i in label_raw:\n \n x = np.array([int(j) for j in i]).reshape((81,1)) - 1\n label.append(x) \n \n label = np.array(label)\n \n del(feat_raw)\n del(label_raw) \n\n x_train, x_test, y_train, y_test = train_test_split(feat, label, test_size=0.2, random_state=42)\n \n train_model(x_train, y_train, x_test, y_test)\n \n return x_train, x_test, y_train, y_test\n\n\n\n\n \n#data = get_data(\"sudoku.csv\")\n","sub_path":"VersionPython/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"415429960","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport math\ndef quadratic(a,b,c):\n\tif not isinstance(a,(int, float)) & isinstance(b,(int, float))& isinstance(c,(int, float)):\n\t\traise TypeError('Unexception input!')\n\tdelta = b*b - 4*a*c\n\tif delta < 0:\n\t\treturn 'This equation has no root'\n\telif delta == 0:\n\t\treturn -b/(2*a)\n\telse:\n\t\troot1 = (-b + math.sqrt(delta)) / (2*a)\n\t\troot2 = (-b - math.sqrt(delta)) / (2*a)\n\t\treturn root1, root2\n","sub_path":"SolveEquation.py","file_name":"SolveEquation.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"654331555","text":"class Solution(object):\r\n def firstMissingPositive(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n for i in range(len(nums)):\r\n while nums[i] > 0 and nums[i] <= len(nums) and nums[i] != i + 1 and nums[nums[i] - 1] != nums[i]:\r\n tmp = nums[nums[i] -1]\r\n nums[nums[i] - 1] = nums[i]\r\n nums[i] = tmp\r\n for i in range(len(nums)):\r\n if nums[i] != i + 1:\r\n return i + 1\r\n return len(nums) + 1\r\n\r\n# x = [-10,-3,-100,-1000,-239,1]\r\nx = [3,4,-1,1]\r\ns = Solution()\r\nans = s.firstMissingPositive(x)","sub_path":"First Missing Positive.py","file_name":"First Missing Positive.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"210879086","text":"from __future__ import print_function\nimport numpy as np\nimport datetime\nimport extrapolate as ex\n\ntry:\n import ESMF\nexcept ImportError:\n print(\"Could not find module ESMF\")\n pass\n\n__author__ = 'Trond Kristiansen'\n__email__ = 'me@trondkristiansen.com'\n__created__ = datetime.datetime(2008, 12, 4)\n__modified__ = datetime.datetime(2018, 4, 25)\n__version__ = \"1.5\"\n__status__ = \"Development\"\n\n\ndef laplacefilter(field, threshold, toxi, toeta):\n undef = 2.0e+35\n tx = 0.9 * undef\n critx = 0.01\n cor = 1.6\n mxs = 10\n\n field = np.where(abs(field) > threshold, undef, field)\n\n field = ex.extrapolate.fill(int(1), int(toxi),\n int(1), int(toeta),\n float(tx), float(critx), float(cor), float(mxs),\n np.asarray(field, order='Fortran'),\n int(toxi),\n int(toeta))\n return field\n\n\ndef dohorinterpolationregulargrid(confM2R, mydata):\n if confM2R.showprogress is True:\n import progressbar\n # http://progressbar-2.readthedocs.org/en/latest/examples.html\n progress = progressbar.ProgressBar(widgets=[progressbar.Percentage(), progressbar.Bar()],\n maxval=confM2R.grdMODEL.nlevels).start()\n # progress = progressbar.ProgressBar(widgets=[progressbar.BouncingBar(marker=progressbar.RotatingMarker(), fill_left=True)], maxval=grdMODEL.Nlevels).start()\n\n indexROMS_Z_ST = (confM2R.grdMODEL.nlevels, confM2R.grdROMS.eta_rho, confM2R.grdROMS.xi_rho)\n array1 = np.zeros((indexROMS_Z_ST), dtype=np.float64)\n\n for k in range(confM2R.grdMODEL.nlevels):\n\n if confM2R.useesmf:\n print(np.shape(mydata),k,confM2R.grdMODEL.nlevels)\n confM2R.grdMODEL.fieldSrc.data[:, :] = np.flipud(np.rot90(np.squeeze(mydata[k, :, :])))\n # Get the actual regridded array\n field = confM2R.grdMODEL.regridSrc2Dst_rho(confM2R.grdMODEL.fieldSrc, confM2R.grdMODEL.fieldDst_rho)\n\n # Since ESMF uses coordinates (x,y) we need to rotate and flip to get back to (y,x) order.\n field = np.fliplr(np.rot90(field.data, 3))\n\n if confM2R.usefilter:\n field = laplacefilter(field, 1000, confM2R.grdROMS.xi_rho, confM2R.grdROMS.eta_rho)\n # field=field*grdROMS.mask_rho\n\n array1[k, :, :] = field\n\n # if k in [34,17,2]:\n # import plotData\n # plotData.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, field, str(k)+'_withfilter', myvar)\n # if __debug__:\n # print \"Data range after horisontal interpolation: \", field.min(), field.max()\n\n if confM2R.showprogress is True:\n progress.update(k)\n\n return array1\n\n\ndef dohorinterpolationsshregulargrid(confM2R, myvar, mydata):\n if myvar in [\"uice\"]:\n indexROMS_Z_ST = (confM2R.grdMODEL.nlevels, confM2R.grdROMS.eta_u, confM2R.grdROMS.xi_u)\n toxi = confM2R.grdROMS.xi_u\n toeta = confM2R.grdROMS.eta_u\n mymask = confM2R.grdROMS.mask_u\n elif myvar in [\"vice\"]:\n indexROMS_Z_ST = (confM2R.grdMODEL.nlevels, confM2R.grdROMS.eta_v, confM2R.grdROMS.xi_v)\n toxi = confM2R.grdROMS.xi_v\n toeta = confM2R.grdROMS.eta_v\n mymask = confM2R.grdROMS.mask_v\n else:\n indexROMS_Z_ST = (confM2R.grdMODEL.nlevels, confM2R.grdROMS.eta_rho, confM2R.grdROMS.xi_rho)\n toxi = confM2R.grdROMS.xi_rho\n toeta = confM2R.grdROMS.eta_rho\n mymask = confM2R.grdROMS.mask_rho\n\n array1 = np.zeros((indexROMS_Z_ST), dtype=np.float64)\n\n if confM2R.useesmf:\n\n confM2R.grdMODEL.fieldSrc.data[:, :] = np.flipud(np.rot90(np.squeeze(mydata[:, :])))\n\n if myvar in [\"uice\"]:\n field = confM2R.grdMODEL.regridSrc2Dst_u(confM2R.grdMODEL.fieldSrc, confM2R.grdMODEL.fieldDst_u)\n elif myvar in [\"vice\"]:\n field = confM2R.grdMODEL.regridSrc2Dst_v(confM2R.grdMODEL.fieldSrc, confM2R.grdMODEL.fieldDst_v)\n else:\n field = confM2R.grdMODEL.regridSrc2Dst_rho(confM2R.grdMODEL.fieldSrc, confM2R.grdMODEL.fieldDst_rho)\n\n field = np.fliplr(np.rot90(field.data, 3))\n # if myvar in [\"hice\",\"aice\"]:\n # import plotData\n # plotData.contourMap(grdROMS,grdROMS.lon_rho,grdROMS.lat_rho, field, \"surface\", myvar)\n\n # Smooth the output\n if confM2R.usefilter:\n field = laplacefilter(field, 1000, toxi, toeta)\n field = field * mymask\n array1[0, :, :] = field\n\n # import plotData\n # plotData.contourMap(grdROMS, tolon, tolat, field, \"34\", myvar)\n\n return array1\n","sub_path":"interp2D.py","file_name":"interp2D.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"392360662","text":"import Tkinter as tk\nimport threading\nimport logging\nimport time\nimport sys\n\nimport thread1\n\nclass InRedirector():\n def __init__(self, outRedirector):\n sys.stdin = self\n self.outRedirector = outRedirector\n \n def readline(self):\n line = self.outRedirector.getInputLine()\n while not line:\n line = self.outRedirector.getInputLine()\n time.sleep(0.1)\n return line\n\nclass OutRedirector(tk.Text):\n def __init__(self, *args, **kwargs):\n tk.Text.__init__(self, *args, **kwargs)\n sys.stdout = self\n self.input_buffer = \"\"\n self.input_lines = []\n self.bind(\"\", self.key)\n sys.stdin = InRedirector(self)\n\n def key(self, key):\n if key.char:\n self.input_buffer += key.char\n if key.char == \"\\r\" or key.char == \"\\n\":\n self.input_lines.append(self.input_buffer[:-1] + \"\\n\")\n self.input_buffer = \"\"\n\n def getInputLine(self):\n if self.input_lines == []:\n return None\n else:\n line = self.input_lines[0]\n self.input_lines = self.input_lines[1:]\n return line\n\n def write(self, s):\n self.insert(\"end\", s)\n self.update()\n\nclass ErrorHandler(tk.Text):\n def __init__(self, *args, **kwargs):\n tk.Text.__init__(self, *args, **kwargs)\n self.config(state=\"disabled\")\n \n def write(self, s):\n self.config(state=\"normal\")\n self.insert(\"end\", s)\n self.config(state=\"disabled\")\n self.update()\n\n def flush(self):\n pass\n\ndef main():\n root = tk.Tk()\n\n text = OutRedirector(root)\n text.grid()\n\n err = ErrorHandler(root)\n err.grid()\n\n logger = logging.getLogger(\"error_logger\")\n logger.setLevel(logging.DEBUG)\n\n handler = logging.StreamHandler(err)\n handler.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n info = {'stop' : False}\n thread = threading.Thread(target=thread1.fun, args=(info,))\n thread.daemon = True\n thread.start()\n\n root.mainloop()\n\nif __name__ == '__main__':\n main()","sub_path":"pmagnus-programs/graphics/threading/graphical_thread_logger.py","file_name":"graphical_thread_logger.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"628677190","text":"#!/usr/bin/env python\n# *-* coding: UTF-8 *-*\n\n# Copyright 2012-2022 Ronald Römer\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.extend(['/home/zippy/vtkbool/build/lib/python3.10/site-packages/vtkbool'])\n\nfrom vtkBool import vtkPolyDataBooleanFilter\n\nfrom vtkmodules.vtkFiltersSources import vtkSphereSource, vtkLineSource\nfrom vtkmodules.vtkFiltersCore import vtkTubeFilter\nfrom vtkmodules.vtkIOLegacy import vtkPolyDataWriter\nfrom vtkmodules.vtkFiltersGeneral import vtkTransformPolyDataFilter\nfrom vtkmodules.vtkCommonTransforms import vtkTransform\n\nfrom collections import defaultdict\n\nsphere = vtkSphereSource()\nsphere.SetRadius(5)\nsphere.SetCenter(.5, .5, .5)\nsphere.SetPhiResolution(100)\nsphere.SetThetaResolution(100)\nsphere.Update()\n\npd = sphere.GetOutput()\n\ncenter = pd.GetCenter()\n\nline = vtkLineSource()\nline.SetPoint1(0, 0, 0)\nline.SetPoint2(0, 0, 4)\n\ntube = vtkTubeFilter()\ntube.SetInputConnection(line.GetOutputPort())\ntube.SetRadius(1)\ntube.SetNumberOfSides(50)\ntube.CappingOn()\n\ntransform = vtkTransform()\ntransform.PostMultiply()\ntransform.Translate(center[0], center[1], center[2]+3)\n\ntf = vtkTransformPolyDataFilter()\ntf.SetInputConnection(tube.GetOutputPort())\ntf.SetTransform(transform)\n\nmoves = [(-.5, 0), (-.5, 0), (-.5, 0), (-.5, 0), (-.5, 0), (-.5, 0), (-.5, 0), (-.5, 0), (-.5, 0)]\n\n# moves = [(.5, 0), (.5, 0), (.5, 0), (.5, 0),\n# (0, -.5), (0, -.5), (0, -.5), (0, -.5),\n# (-.5, 0), (-.5, 0), (-.5, 0), (-.5, 0),\n# (0, .5)]\n\n# moves = [(0, -.5), (0, -.5), (0, -.5), (0, -.5), (0, -.5), (0, -.5), (0, -.5)]\n\nfor i, xy in enumerate(moves):\n transform.Translate(*xy, 0)\n\n bf = vtkPolyDataBooleanFilter()\n bf.SetInputData(0, pd)\n bf.SetInputConnection(1, tf.GetOutputPort())\n bf.SetOperModeToDifference()\n\n writer = vtkPolyDataWriter()\n writer.SetFileName(f'sphere{i}.vtk')\n writer.SetInputData(pd)\n writer.Update()\n\n writer2 = vtkPolyDataWriter()\n writer2.SetFileName(f'tube{i}.vtk')\n writer2.SetInputConnection(tf.GetOutputPort())\n writer2.Update()\n\n writer3 = vtkPolyDataWriter()\n writer3.SetFileName(f'bool{i}.vtk')\n writer3.SetInputConnection(bf.GetOutputPort())\n writer3.Update()\n\n pd.Initialize()\n pd.DeepCopy(bf.GetOutput())\n","sub_path":"testing/milling/milling.py","file_name":"milling.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"562434533","text":"from PIL import Image\nfrom PIL import ImageDraw\nfrom Graph import Node\nimport shutil\nimport os\nfrom typing import Tuple, Dict, List\n\n\ndef draw_picture(size: int,\n paths: Dict[Tuple[Node, Node], List[Node]]) -> None:\n \"\"\"Эта функция отрисовывает все пути в соотвествии со словарем\"\"\"\n if os.path.exists('tmp'):\n shutil.rmtree(\"tmp\")\n os.mkdir(\"./tmp\")\n image = Image.new('RGB', (50 * size, 50 * size), \"black\")\n draw = ImageDraw.ImageDraw(image)\n count = 0\n for pair in paths:\n for point in pair:\n draw.rectangle(\n [(point.number[0] * 50, point.number[1] * 50),\n (point.number[0] * 50 + 50, point.number[1] * 50 + 50)],\n fill=point.color)\n cell_map(size, image, (192, 192, 192))\n image.save(f'tmp/{count}.jpg')\n count += 1\n for key in paths:\n for node in paths[key][1:-1]:\n draw.rectangle([(node.number[0]*50, node.number[1]*50),\n (node.number[0]*50+50, node.number[1]*50+50)],\n fill=node.color)\n\n cell_map(size, image, (192, 192, 192))\n image.save(f'tmp/{count}.jpg')\n count += 1\n image.save('grid_img.png', 'PNG')\n\n\ndef cell_map(size: int, img: Image, color: Tuple[int, int, int]) -> Image:\n \"\"\"Разлиновывет поле\"\"\"\n draw = ImageDraw.Draw(img)\n for i in range(50, 50 * size, 50):\n draw.line([(i, 0), (i, 50 * size - 1)], fill=color, width=2)\n for j in range(50, 50 * size, 50):\n draw.line([(0, j), (50 * size - 1), j], fill=color, width=2)\n\n return img\n","sub_path":"numberlink/Drawing.py","file_name":"Drawing.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"422947687","text":"import pygame, sys\n\n\ndef physics(screen, items_on_screen, screen_dimensions, program_clock):\n \n running = True\n horizontal_velocity = 0\n vertical_velocity = 0\n horizontal_acceleration = 0\n vertical_acceleration = 0\n #item_on_screen\n #[SQLID, Name, Inital Location X, Inital Location y, width, height, button alternate 1, butotn alternate 2]\n #velocity = vel init + accel * time\n #screen_dimesions = {\n # \"screen_width\" : width,\n # \"screen_height\" : height,\n # \"width_unit\" : width_unit,\n # \"height_unit\" : height_unit,\n # \"width_in_units\": width_in_units,\n # \"height_in_units\" : height_in_units}\n\n #keys\n #a = 97 w = 119 s = 115 d = 100\n\n while running == True:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n\n if event.type == pygame.KEYDOWN:\n pressed_keys = pygame.key.get_pressed()\n if pressed_keys[97] == 1:\n print(\"a\")\n horizontal_acceleration = horizontal_acceleration - 3\n if pressed_keys[119] == 1:\n print(\"w\")\n vertical_acceleration = vertical_acceleration - 3\n if pressed_keys[115] == 1:\n print(\"s\")\n vertical_acceleration = vertical_acceleration + 3\n if pressed_keys[100] == 1:\n print(\"d\")\n horizontal_acceleration = horizontal_acceleration + 3\n \n #updates all items on the screen each loop\n for item in items_on_screen:\n if item[1] == 'start':\n item_surface = pygame.transform.scale(pygame.image.load(item[8]),(item[4]*screen_dimensions[\"width_unit\"],item[5]*screen_dimensions[\"height_unit\"]))\n screen.blit(item_surface, (item[2]*screen_dimensions[\"width_unit\"]+horizontal_velocity,item[3]*screen_dimensions[\"height_unit\"]+vertical_velocity))\n else:\n item_surface = pygame.transform.scale(pygame.image.load(item[8]),(item[4]*screen_dimensions[\"width_unit\"],item[5]*screen_dimensions[\"height_unit\"]))\n screen.blit(item_surface, (item[2]*screen_dimensions[\"width_unit\"],item[3]*screen_dimensions[\"height_unit\"]))\n\n pygame.display.flip()\n pygame.display.quit()\n pygame.quit()","sub_path":"functions/physics_functions.py","file_name":"physics_functions.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"161450853","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 16 16:39:52 2019\n\n@author: xcxg109\n\"\"\"\n\nimport pandas as pd\nimport query_code as q\nimport data_pull as pull\nimport data_process as process\nimport file_data_att as fd\nimport settings\nfrom queries_PIM import grainger_attr_query\nimport time\n\n\npd.options.mode.chained_assignment = None\n\n\ndef match_category(df):\n \"\"\"compare data colected from matching file (match_df) with grainger and gamut data pulls and create a column to tell analysts\n whether attributes from the two systems have been matched\"\"\"\n\n for row in df.itertuples():\n if (row.Index, row.Grainger_Attribute_Name) == (row.Index, row.Gamut_Attribute_Name):\n df.at[row.Index,'Matching'] = 'Match'\n elif process.isBlank(row.Grainger_Attribute_Name) == False:\n if process.isBlank(row.Gamut_Attribute_Name) == True:\n df.at[row.Index,'Matching'] = 'Grainger only'\n elif process.isBlank(row.Grainger_Attribute_Name) == True:\n if process.isBlank(row.Gamut_Attribute_Name) == False:\n df.at[row.Index,'Matching'] = 'Gamut only'\n \n return df\n\n\ndef grainger_process(grainger_df, grainger_sample, grainger_all, k):\n \"\"\"create a list of grainger skus, run through through the gamut_skus query and pull gamut attribute data if skus are present\n concat both dataframs and join them on matching attribute names\"\"\"\n \n df = pd.DataFrame()\n gamut_sample_vals = pd.DataFrame()\n gamut_att_vals = pd.DataFrame()\n # gamut_l3 = dict()\n \n grainger_skus = grainger_df.drop_duplicates(subset='Grainger_SKU') #create list of unique grainger skus that feed into gamut query\n grainger_sku_count = len(grainger_skus)\n print('Grainger SKU count = ', grainger_sku_count)\n\n grainger_df = grainger_df.drop_duplicates(subset=['Category_ID', 'Grainger_Attr_ID']) #group by Category_ID and attribute name and keep unique\n grainger_df['Grainger Blue Path'] = grainger_df['Segment_Name'] + ' > ' + grainger_df['Family_Name'] + \\\n ' > ' + grainger_df['Category_Name']\n grainger_df = grainger_df.drop(['Grainger_SKU', 'Grainger_Attribute_Value'], axis=1) #remove unneeded columns\n grainger_df = pd.merge(grainger_df, grainger_sample, on=['Grainger_Attribute_Name'])\n grainger_df = pd.merge(grainger_df, grainger_all, on=['Grainger_Attribute_Name'])\n \n grainger_df['Grainger_Attribute_Name'] = process.process_att(grainger_df['Grainger_Attribute_Name']) #prep att name for merge\n grainger_df.to_csv (\"F:/CGabriel/Grainger_Shorties/OUTPUT/grainger_test.csv\")\n \n gamut_skus = q.gamut_skus(grainger_skus) #get gamut sku list to determine pim nodes to pull\n gamut_skus = gamut_skus.drop_duplicates(subset='Gamut_SKU')\n\n # gamut_sku_counts = gamut_sku_list.groupby('Gamut_SKU')['Gamut_SKU']).count())\n if gamut_skus.empty == False:\n #create a dictionary of the unique gamut nodes that corresponde to the grainger node\n gamut_l3 = gamut_skus['Gamut_Node_ID'].unique() #create list of pim nodes to pull\n for node in gamut_l3:\n gamut_df = q.gamut_atts(node, 'tax.id') #tprod.\"categoryId\"') #get gamut attribute values for each gamut_l3 node\n gamut_att_vals, gamut_sample_vals = q.gamut_values(gamut_df) #gamut_values exports a list of --all-- normalized values (temp_df) and sample_values\n gamut_sample_vals = gamut_sample_vals.rename(columns={'Normalized Value': 'Gamut Attribute Sample Values'})\n gamut_att_vals = gamut_att_vals.rename(columns={'Normalized Value': 'Gamut ALL Values'})\n \n gamut_df = gamut_df.drop_duplicates(subset='Gamut_Attr_ID') #gamut attribute IDs are unique, so no need to group by pim node before getting unique\n gamut_df = gamut_df.drop(['Gamut_SKU', 'Grainger_SKU', 'Original Value', 'Normalized Value'], axis=1) #normalized values are collected as sample_value\n \n grainger_df['Gamut_Node_ID'] = int(node) #add correlating gamut node to grainger_df\n\n gamut_df = pd.merge(gamut_df, gamut_sample_vals, on=['Gamut_Attribute_Name']) #add t0p 5 normalized values to report\n gamut_df = pd.merge(gamut_df, gamut_att_vals, on=['Gamut_Attribute_Name']) #add t0p 5 normalized values to report\n gamut_df['Category_ID'] = int(k) #add grainger Category_ID column for gamut attributes\n gamut_df['Gamut_Attribute_Name'] = process.process_att(gamut_df['Gamut_Attribute_Name']) #prep att name for merge\n #create df based on names that match exactly\n gamut_df.to_csv (\"F:/CGabriel/Grainger_Shorties/OUTPUT/gamut_test.csv\")\n \n temp_df = pd.merge(grainger_df, gamut_df, left_on=['Grainger_Attribute_Name', 'Category_ID', 'Gamut_Node_ID'], \n right_on=['Gamut_Attribute_Name', 'Category_ID', 'Gamut_Node_ID'], how='outer')\n temp_df = match_category(temp_df) #compare grainger and gamut atts and create column to say whether they match\n temp_df['Grainger-Gamut Terminal Node Mapping'] = temp_df['Category_Name']+' -- '+ temp_df['Gamut_Node_Name']\n\n df = pd.concat([df, temp_df], axis=0) #add prepped df for this gamut node to the final df\n\n\n return df #where gamut_att_temp is the list of all normalized values for gamut attributes\n \n\n#determine SKU or node search\nsearch_level = 'cat.CATEGORY_ID'\n\ngamut_df = pd.DataFrame()\ngrainger_df = pd.DataFrame()\ngrainger_skus = pd.DataFrame()\n\nattribute_df = pd.DataFrame()\ngrainger_att_vals = pd.DataFrame()\ngrainger_sample_vals = pd.DataFrame()\ngamut_att_vals = pd.DataFrame\n\ndata_type = fd.search_type()\n\nif data_type == 'grainger_query':\n search_level = fd.blue_search_level()\n \nsearch_data = fd.data_in(data_type, settings.directory_name)\n\nstart_time = time.time()\nprint('working...')\n\nif data_type == 'grainger_query':\n if search_level == 'cat.CATEGORY_ID':\n for k in search_data:\n grainger_df = q.gcom.grainger_q(grainger_attr_query, search_level, k)\n if grainger_df.empty == False:\n grainger_att_vals, grainger_sample_vals = q.grainger_values(grainger_df)\n grainger_sample_vals = grainger_sample_vals.rename(columns={'Grainger_Attribute_Value': 'Grainger Attribute Sample Values'})\n grainger_att_vals = grainger_att_vals.rename(columns={'Grainger_Attribute_Value': 'Grainger ALL Values'})\n temp_df = grainger_process(grainger_df, grainger_sample_vals, grainger_att_vals, k)\n attribute_df = pd.concat([attribute_df, temp_df], axis=0, sort=False)\n print ('Grainger ', k)\n else:\n print('No attribute data')\n else:\n for k in search_data:\n temp_df = q.grainger_nodes(k, search_level)\n grainger_skus = pd.concat([grainger_skus, temp_df], axis=0, sort=False)\n grainger_l3 = grainger_skus['Category_ID'].unique() #create list of pim nodes to pull\n print('graigner L3s = ', grainger_l3)\n for k in grainger_l3:\n grainger_df = q.gcom.grainger_q(grainger_attr_query, 'cat.CATEGORY_ID', k)\n if grainger_df.empty == False:\n grainger_att_vals, grainger_sample_vals = q.grainger_values(grainger_df)\n grainger_sample_vals = grainger_sample_vals.rename(columns={'Grainger_Attribute_Value': 'Grainger Attribute Sample Values'})\n grainger_att_vals = grainger_att_vals.rename(columns={'Grainger_Attribute_Value': 'Grainger ALL Values'})\n temp_df = grainger_process(grainger_df, grainger_sample_vals, grainger_att_vals, k)\n attribute_df = pd.concat([attribute_df, temp_df], axis=0, sort=False)\n print ('Grainger ', k)\n else:\n print('No attribute data') \n\n# attribute_df['Grainger-Gamut Terminal Node Mapping'] = attribute_df['Category_Name']+' -- '+attribute_df['Gamut_Node_Name']\nattribute_df = attribute_df.drop(['Count_x', 'Count_y'], axis=1)\n\n#attribute_df['Identified Matching Gamut Attribute Name (use semi-colon to separate names)'] = \"\"\n#attribute_df['Identified Matching Grainger Attribute Name (use semi-colon to separate names)'] = \"\"\n#attribute_df['Analyst Notes'] = \"\"\n#attribute_df['Taxonomist Approved (yes/no)'] = \"\"\n#attribute_df['Taxonomist Notes'] = \"\"\n\n#pull.previous_match(attribute_df)\n\n#data = process.attribute_name_match(attribute_df)\n\nfd.attribute_match_data_out(settings.directory_name, attribute_df, search_level)\n\nprocess.attribute_name_match(attribute_df)\n \n \nprint(\"--- {} seconds ---\".format(round(time.time() - start_time, 2)))","sub_path":"z. old/ATTRIBUTE_MATCH old.py","file_name":"ATTRIBUTE_MATCH old.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"228499114","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport time\nimport math\n\nRED = 0\nWHITE = 1\nBLUE = 2\nYELLOW = 3\nGREEN = 4\n\nred = [0.93365103, 0.35130355, 0.28260624] #Light\n#red = [0.83925676, 0.19983858, 0.15198655] #Dark\nwhite = [1, 1, 1]\n#orange = [0.99148726, 0.5729994, 0.25187913]\n#yellow = [0.9834839 , 1, 0.76263994]\n#yellow = [0.99455225, 0.9999614 , 0.5426788 ] #brighter\nyellow = [0.9834429 , 0.9999806 , 0.62282646]\ngreen = [0.61612934, 0.90352577, 0.5615079 ]\n#blue = [0.501899 , 0.89933634, 0.99078834]\nblue = [0.38518488, 0.8185951 , 0.96491045]\n\ndef displayImage(images, nrows = 1, ncols=1, title=[],image_max=0,sizex=15,sizey=8):\n #Handle the case of 1 image\n if nrows == 1 and ncols == 1:\n images = [images]\n #Mismatch\n if len(images) != nrows*ncols:\n print(\"Number of images != number of subplots\")\n return\n #Title mismathc\n if len(images) != len(title) and len(title)!=0:\n print(\"Number of images != number of titles\")\n return\n fig = plt.figure(figsize=(sizex,sizey))\n ax = []\n for i in range(1, ncols*nrows +1):\n image = images[i-1]\n\n #Deal for various types\n type = image.dtype\n if np.issubdtype(type, np.integer):\n if image_max==0:\n im_max = np.iinfo(type).max\n else:\n im_max=copy.deepcopy(image_max)\n else:\n im_max = 1\n\n plt.gray()\n ax.append( fig.add_subplot(nrows, ncols,i))\n if len(title)!=0:\n ax[-1].set_title(title[i-1])\n plt.axis(\"off\")\n plt.imshow(image,vmin=0,vmax=im_max)\n plt.show()\n return ax\n\ndef getColours():\n red = plt.imread(\"red_light.png\")\n purple = plt.imread(\"purple.png\")\n blue = plt.imread(\"blue.png\")\n yellow = plt.imread(\"yellow.png\")\n green = plt.imread(\"green.png\")\n #blue = plt.imread(\"blue.png\")\n return [red,purple,blue,yellow,green]\n\ndef getAverages(colours):\n avs = []\n for c in colours:\n av = np.mean(c,axis=(0,1))\n avs.append(av[0:3])\n return avs\n\ndef distIm(c1,c2):\n return c1-c2\n\ndef drawOnFeed(frame,cs):\n avs = [red,white,blue,yellow,green]\n for i in range(len(avs)):\n if not(np.isnan(cs[i][0]) or np.isnan(cs[i][1])):\n #newCol = (int(avs[i][2]*255),int(avs[i][1]*255),int(avs[i][0]*255)) #Reversed because BGR\n newCol = (0,0,0)\n newC = (int(round(cs[i][1])),int(round(cs[i][0]))) #Reversed because image\n cv2.circle(frame,newC,5,newCol,2)\n\ndef findCenters(image,tol=0.05,draw=False,frame=None):\n avs = [red,white,blue,yellow,green]\n cs = []\n uR = red+np.ones(3)*tol\n lR = red-np.ones(3)*tol\n uW = white+np.ones(3)*tol\n lW = white-np.ones(3)*tol\n uB = blue+np.ones(3)*tol\n lB = blue-np.ones(3)*tol\n uY = yellow+np.ones(3)*tol\n lY = yellow-np.ones(3)*tol\n uG = green+np.ones(3)*tol\n lG = green-np.ones(3)*tol\n u = [uR,uW,uB,uY,uG]\n l = [lR,lW,lB,lY,lG]\n for i in range(5):\n mask = cv2.inRange(image, l[i], u[i])\n res = cv2.bitwise_and(image,image, mask= mask)\n pix = np.where(mask==255)\n pixels = np.array([pix[0],pix[1]])\n centre = np.mean(pixels,axis=1)\n cs.append(centre)\n if draw and frame is not None:\n drawOnFeed(frame,cs)\n return cs\n\ndef calibrate():\n test = plt.imread(\"test.png\")[:,:,0:3]\n colours = getColours()\n testCols = []\n avs = getAverages(colours)\n for av in avs:\n testIm = np.ones((200,200,3))\n for x in range(200):\n for y in range(200):\n testIm[x,y,:] = av\n testCols.append(testIm)\n cs = findCenters(test)\n dispCols = colours+testCols\n displayImage(dispCols,2,5)\n fig, ax = plt.subplots()\n ax.imshow(test)\n for c in cs:\n ax.scatter(c[1], c[0], s=50, color='cyan')\n plt.show()\n print(avs)\n\nif __name__ == \"__main__\":\n calibrate()\n","sub_path":"colour.py","file_name":"colour.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"358697493","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 26 09:12:44 2020\r\n\r\n@author: Anup0\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import roc_auc_score, accuracy_score,classification_report,confusion_matrix\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\npath=\"D:/Nikhil Analytics/Python/Project/New project/\"\r\nCcard_data=pd.read_csv(path + \"creditcard.csv\",header=0)\r\nCcard_data.head()\r\nCcard_data.shape\r\nCcard_data.describe()\r\nCcard_data.isnull().sum()\r\n\r\n\r\nsns.countplot(Ccard_data.Class,label=\"class\",color=\"red\")\r\n\r\n# Target variable has value 0 very large compared to value 1.\r\n\r\ntarget0=Ccard_data[Ccard_data.Class==0]\r\nprint(len(target0))\r\ntarget1=Ccard_data[Ccard_data.Class==1]\r\nprint(len(target1))\r\n\r\nbalanced_data=pd.concat([target1,target0.sample(n=len(target1),random_state=10)])\r\n#Doubt1\r\n\r\n\r\nsns.countplot(balanced_data.Class,label=\"class\",color=\"green\")\r\n\r\nbalanced_data.describe()\r\n#Doubt2\r\n\r\n# Now target variable is balanced\r\n\r\nX=balanced_data.iloc[:,:-1]\r\nY=balanced_data.iloc[:,-1]\r\n\r\ntrain_x,test_x,train_y,test_y=train_test_split(X,Y,test_size=0.3,random_state=10)\r\n\r\n\r\n#USING RANDOM FOREST:-\r\n\r\nrandom_model=RandomForestClassifier(n_estimators=20,criterion=\"entropy\")\r\nrandom_model.fit(train_x,train_y)\r\npred_y=random_model.predict(test_x)\r\n\r\nprint(roc_auc_score(test_y, pred_y)) #0.9388668218965438\r\nprint(accuracy_score(test_y,pred_y)) #0.9391891891891891\r\nprint(confusion_matrix(test_y,pred_y))\r\n#[[147 2]\r\n#[ 16 131]]\r\nprint(classification_report(test_y,pred_y))\r\n\r\n# 0 0.90 0.99 0.94 149\r\n# 1 0.98 0.89 0.94 147\r\n\r\n# Using logistic regression:-\r\n\r\nlogistic=LogisticRegression(random_state=10).fit(train_x,train_y)\r\npred_y=logistic.predict(test_x)\r\n\r\nprint(accuracy_score(test_y,pred_y)) #0.902027027027027\r\nprint(confusion_matrix(test_y,pred_y))\r\n# [[138 11]\r\n# [ 18 129]]\r\nprint(classification_report(test_y,pred_y))\r\n# 0 0.88 0.93 0.90 149\r\n# 1 0.92 0.88 0.90 147\r\n\r\n\r\n# Using Gradient Boosting clasifier:- \r\n\r\ngbm_model=GradientBoostingClassifier(random_state=10).fit(train_x,train_y)\r\npred_y=gbm_model.predict(test_x) \r\n\r\nprint(accuracy_score(test_y,pred_y)) #0.9256756756756757\r\nprint(confusion_matrix(test_y,pred_y))\r\n#[[144 5]\r\n #[ 17 130]]\r\nprint(classification_report(test_y,pred_y))\r\n# 0 0.89 0.97 0.93 149\r\n# 1 0.96 0.88 0.92 147\r\n\r\n\"\"\"\r\nProject is about CREDIT CARD fraud transactions which I did using python, \r\npackages used are pandas,seaborn and sklearn.\r\nStarted by extracting data in pandas dataframe and checking for null values, which were absent.\r\nChecked for unbalanced data by plotting count graph of target variable.\r\nAs the data was unbalanced, balanced the data by concatinating.\r\nProceeded and declared feature and target variables, using new balanced dataset. (Didn't drop any columns') \r\nI put 'class' as Y, then splitted the data set into train and test with test size 30%\r\nSince, it is an unbalanced dataset, I used random forest first to create model.\r\nI used classifier instead of regressor because data is descrete.\r\nCriterion as entropy instead of gini for more accuracy.\r\nFinally checked the accuracy of model by comparing test sample of y with predicted y using the model.\r\nSince the data is unbalanced, used roc_auc_score to measure the accuracy of the model. \r\nUsing the above parameters , I got an acuuracy of - 0.9388668218965438 (best fit)\r\nI also created model using gradient boosting and logistic regression.\r\nThe best accuracy was achieved by random forest.\r\n\"\"\"\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"CreditCard_Anup.py","file_name":"CreditCard_Anup.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"361085245","text":"import sys\n\nplayer_names = []\nplayer_symbols = []\nlines_to_check = []\n\ndef populate_lines_to_check():\n lines_to_check = []\n for r in range(n):\n lines_to_check.append((r, 0, 0, 1)) # horizontal row\n\n for c in range(n):\n lines_to_check.append((0, c, 1, 0)) # vertical column\n\n lines_to_check.append((0, 0, 1, 1)) # left diag\n lines_to_check.append((0, n - 1, 1, -1)) # right diag\n\ndef declare_board():\n global board\n global n\n print(\"Enter size of board (n)\")\n n=int(input())\n board = [[' ']*n for _ in range(n)]\n populate_lines_to_check()\n\ndef print_board():\n for r in range(n):\n s = \" | \".join(board[r])\n print(s)\n print('-'*len(s))\n\ndef get_player_info():\n global player_symbols\n global player_names\n\n player1_name = input(\"Enter player1 name:\")\n player2_name = input(\"Enter player2 name:\")\n player_names=[player1_name, player2_name]\n while True:\n print(player_names[0], \"Would you like to be X or 0?\")\n symbol = input().upper()\n if symbol =='X':\n other_symbol ='0'\n elif symbol == '0':\n other_symbol='X'\n else:\n print(\"Please enter a valid symbol, {} is not a symbol !\" % (symbol))\n continue\n player_symbols = [symbol, other_symbol]\n break\n\nclass GameResult:\n def __init__(self, terminated, who_won):\n self.terminated = terminated\n assert who_won >= -1 and who_won <= 1\n self.who_won = who_won\n\n def is_game_on(self):\n return not self.terminated\n\n def is_won(self):\n return self.terminated and self.who_won >= 0\n\n def __repr__(self):\n return \"({}, {})\".format(self.terminated, self.who_won)\n\ndef main_game():\n game_on = True\n player_one_playing = True\n\n while(game_on):\n print_board()\n get_correct_user_input(0 if player_one_playing else 1)\n game_result = get_game_result()\n game_on = game_result.is_game_on()\n if not game_on:\n if game_result.who_won == -1:\n print(\"Its a Tie!!\")\n else:\n print(player_names[game_result.who_won], \"Won\")\n else:\n player_one_playing = not player_one_playing\n\ndef get_correct_user_input(turn):\n print(player_names[turn]+\"'s turn\")\n print(\"Enter row and col number\")\n while True:\n try:\n p_in_r,p_in_c=input().split( )\n p_in_r=int(p_in_r)\n p_in_c=int(p_in_c)\n except ValueError:\n sys.stderr.write(\"Sorry, I didn't understand that.Please enter a valid index\")\n continue\n if p_in_r>n-1 or p_in_r<0 or p_in_c>n-1 or p_in_c<0:\n sys.stderr.write(\"Wrong index, please choose within 0 and n\\n\")\n continue\n elif board[p_in_r][p_in_c]!=' ':\n sys.stderr.write(\"Cell already filled, please choose an empty cell\")\n continue\n else:\n break\n board[p_in_r][p_in_c]=player_symbols[turn]\n\ndef check_line(start_r, start_c, del_r, del_c):\n r = start_r\n c = start_c\n symbols_in_row = set()\n for _ in range(n):\n symbols_in_row.add(board[r][c])\n r += del_r\n c += del_c\n\n if ' ' in symbols_in_row:\n return GameResult(False, -1)\n elif len(symbols_in_row) == 1:\n # Someone won a row, lets check who\n return GameResult(True, player_symbols.index(list(symbols_in_row)[0]))\n else:\n assert len(symbols_in_row) == 2\n return GameResult(True, -1)\n\n# This can be tersified up even further by definining lines to iterate upon\n# we can decouple the code that checks the lines vs the code that generates the lines to iterate upon\ndef get_game_result():\n unfilled_lines = 0\n for line in lines_to_check:\n game_result = check_line(*line)\n sys.stderr.write(\"For line {}, the result is {}\\n\".format(line, game_result))\n if game_result.is_won():\n return game_result\n elif not game_result.terminated:\n unfilled_lines += 1\n \n # There are no winners\n if unfilled_lines > 0:\n # There were unfilled lines, so it can't be a tie\n return GameResult(False, -1)\n else:\n # All lines are tied, and hence the game is tied\n return GameResult(True, -1)\n\ndef play_again():\n print(\"Would you like to play again?\")\n user = input()\n return user.upper() == 'Y'\n\ndef play_one_time():\n declare_board()\n get_player_info()\n main_game()\n\ndef seq():\n while True:\n play_one_time()\n if not play_again():\n break\n\nseq()\n","sub_path":"tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"489265048","text":"from email.mime.text import MIMEText\nfrom email.header import Header\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email import encoders\nfrom openpyxl.chart import BarChart, Reference\nfrom datetime import datetime\nfrom flask_init import *\nimport smtplib, ssl\nimport codecs\nimport pandas as pd\nimport re\nimport openpyxl\nimport shutil\nimport os\nimport constants\n\n\ndef send_reports_email(port, smtp_server, sender_email, receiver_email, password, email_content):\n \"\"\"\n \"\"\"\n context = ssl.create_default_context()\n with smtplib.SMTP(smtp_server, port) as server:\n server.ehlo() # Can be omitted\n server.starttls(context=context)\n server.ehlo() # Can be omitted\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, email_content)\n server.quit()\n\n\ndef archive_reports():\n \"\"\"\n \"\"\"\n report_paths = get_report_paths()\n for path in report_paths:\n basedir, filename = os.path.split(path)\n new_path = os.path.join(basedir, 'Archive/', filename)\n shutil.move(path, new_path)\n\n\ndef get_report_paths():\n \"\"\"\n \"\"\"\n report_paths = list(filter(lambda file_path: os.path.isfile(file_path), map(lambda filename: os.path.join(constants.REPORTS_PATH, filename), os.listdir(constants.REPORTS_PATH))))\n return report_paths\n\n\ndef generate_basic_report(table_name, form_html_filename):\n \"\"\"\n \"\"\"\n # eg. [{hebrew_description: hebrewtext, english_name: englishtext}, {}, ...]\n medical_full_tbl_df = pd.read_sql_query(\"SELECT * FROM {0}\".format(table_name), db.engine)\n input_hebrew_descs_to_names = english_input_name_to_hebrew_desc(os.path.join(constants.WORKING_DIR, 'templates/{0}'.format(form_html_filename)))\n # will be used for replacement of df column names from english to hebrew\n df_col_name_replacement_dict = {}\n for hebrew_english_link in input_hebrew_descs_to_names:\n df_col_name_replacement_dict[hebrew_english_link[\"english_name\"]] = hebrew_english_link[\"hebrew_description\"]\n\n medical_full_tbl_df = medical_full_tbl_df.rename(columns=df_col_name_replacement_dict)\n medical_full_tbl_df = medical_full_tbl_df.replace([\"yes\", \"no\"], [\"כן\", \"לא\"])\n medical_full_tbl_df = medical_full_tbl_df.drop(\"_id\", axis=1)\n return medical_full_tbl_df\n \n \ndef english_input_name_to_hebrew_desc(html_path):\n \"\"\"\n link between /